diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPluginFuncTest.groovy new file mode 100644 index 0000000000000..4c542d371c32c --- /dev/null +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPluginFuncTest.groovy @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.doc + +import org.elasticsearch.gradle.fixtures.AbstractGradleInternalPluginFuncTest +import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitPlugin +import org.gradle.testkit.runner.TaskOutcome + +class DocsTestPluginFuncTest extends AbstractGradleInternalPluginFuncTest { + Class pluginClassUnderTest = DocsTestPlugin.class + + def setup() { + File docDir = new File(projectDir, 'doc'); + docDir.mkdirs() + addSampleDoc(docDir) + buildApiRestrictionsDisabled = true + configurationCacheCompatible = false; + buildFile << """ +tasks.named('listSnippets') { + docs = fileTree('doc') +} + +tasks.named('listConsoleCandidates') { + docs = fileTree('doc') +} +""" + } + + def "can list snippets"() { + when: + def result = gradleRunner("listSnippets").build() + then: + result.task(":listSnippets").outcome == TaskOutcome.SUCCESS + assertOutputContains(result.output, """ +> Task :listSnippets +mapper-annotated-text.asciidoc[37:39](Painless) +mapper-annotated-text.asciidoc[42:44](js) +mapper-annotated-text.asciidoc[51:69](console)// TEST[setup:seats] +""") + } + + def "can console candidates"() { + when: + def result = gradleRunner("listConsoleCandidates").build() + then: + result.task(":listConsoleCandidates").outcome == TaskOutcome.SUCCESS + assertOutputContains(result.output, """ +> Task :listConsoleCandidates +mapper-annotated-text.asciidoc[42:44](js) +""") + } + + void addSampleDoc(File docFolder) { + new File(docFolder, "mapper-annotated-text.asciidoc").text = """ +[[painless-filter-context]] +=== Filter context + +Use a Painless script as a {ref}/query-dsl-script-query.html[filter] in a +query to include and exclude documents. + + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`doc` (`Map`, read-only):: + Contains the fields of the current document where each field is a + `List` of values. + +*Return* + +`boolean`:: + Return `true` if the current document should be returned as a result of + the query, and `false` otherwise. + + +*API* + +The standard <> is available. + +*Example* + +To run this example, first follow the steps in +<>. + +This script finds all unsold documents that cost less than \$25. + +[source,Painless] +---- +doc['sold'].value == false && doc['cost'].value < 25 +---- + +[source,js] +---- +curl 'hello world' +---- + +Defining `cost` as a script parameter enables the cost to be configured +in the script query request. For example, the following request finds +all available theatre seats for evening performances that are under \$25. + +[source,console] +---- +GET seats/_search +{ + "query": { + "bool": { + "filter": { + "script": { + "script": { + "source": "doc['sold'].value == false && doc['cost'].value < params.cost", + "params": { + "cost": 25 + } + } + } + } + } + } +} +---- +// TEST[setup:seats] +""" + } +} diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy deleted file mode 100644 index 38b4cb499eeb9..0000000000000 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.gradle.internal.doc - -import org.elasticsearch.gradle.OS -import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.test.rest.CopyRestApiTask -import org.elasticsearch.gradle.internal.test.rest.CopyRestTestsTask -import org.gradle.api.Action -import org.gradle.api.Plugin -import org.gradle.api.Project -import org.gradle.api.file.Directory -import org.gradle.api.file.ProjectLayout -import org.gradle.api.internal.file.FileOperations -import org.gradle.api.provider.Provider -import org.gradle.api.tasks.TaskProvider - -import javax.inject.Inject - -/** - * Sets up tests for documentation. - */ -class DocsTestPlugin implements Plugin { - - private FileOperations fileOperations - private ProjectLayout projectLayout - - @Inject - DocsTestPlugin(FileOperations fileOperations, ProjectLayout projectLayout) { - this.projectLayout = projectLayout - this.fileOperations = fileOperations - } - - @Override - void apply(Project project) { - project.pluginManager.apply('elasticsearch.legacy-yaml-rest-test') - - String distribution = System.getProperty('tests.distribution', 'default') - // The distribution can be configured with -Dtests.distribution on the command line - project.testClusters.matching { it.name.equals("yamlRestTest") }.configureEach { testDistribution = distribution.toUpperCase() } - project.testClusters.matching { it.name.equals("yamlRestTest") }.configureEach { nameCustomization = { it.replace("yamlRestTest", "node") } } - // Docs are published separately so no need to assemble - project.tasks.named("assemble").configure {enabled = false } - Map commonDefaultSubstitutions = [ - /* These match up with the asciidoc syntax for substitutions but - * the values may differ. In particular {version} needs to resolve - * to the version being built for testing but needs to resolve to - * the last released version for docs. */ - '\\{version\\}': Version.fromString(VersionProperties.elasticsearch).toString(), - '\\{version_qualified\\}': VersionProperties.elasticsearch, - '\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''), - '\\{build_flavor\\}' : distribution, - '\\{build_type\\}' : OS.conditionalString().onWindows({"zip"}).onUnix({"tar"}).supply(), - ] - project.tasks.register('listSnippets', SnippetsTask) { - group 'Docs' - description 'List each snippet' - defaultSubstitutions = commonDefaultSubstitutions - perSnippet = new Action() { - @Override - void execute(SnippetsTask.Snippet snippet) { - println(snippet.toString()) - } - } - } - project.tasks.register('listConsoleCandidates', SnippetsTask) { - group 'Docs' - description - 'List snippets that probably should be marked // CONSOLE' - defaultSubstitutions = commonDefaultSubstitutions - perSnippet = new Action() { - @Override - void execute(SnippetsTask.Snippet snippet) { - if (RestTestsFromSnippetsTask.isConsoleCandidate(it)) { - println(it.toString()) - } - } - } - } - - Provider restRootDir = projectLayout.buildDirectory.dir("rest") - TaskProvider buildRestTests = project.tasks.register('buildRestTests', RestTestsFromSnippetsTask) { - defaultSubstitutions = commonDefaultSubstitutions - testRoot.convention(restRootDir) - doFirst { - getFileOperations().delete(testRoot.get()) - } - } - - // TODO: This effectively makes testRoot not customizable, which we don't do anyway atm - project.sourceSets.yamlRestTest.output.dir(restRootDir, builtBy: buildRestTests) - } -} diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy deleted file mode 100644 index 81207181dc9a7..0000000000000 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy +++ /dev/null @@ -1,503 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.internal.doc - -import groovy.transform.PackageScope -import org.elasticsearch.gradle.internal.doc.SnippetsTask.Snippet -import org.gradle.api.Action -import org.gradle.api.InvalidUserDataException -import org.gradle.api.file.DirectoryProperty -import org.gradle.api.internal.file.FileOperations -import org.gradle.api.tasks.Input -import org.gradle.api.tasks.Internal -import org.gradle.api.tasks.OutputDirectory -import org.gradle.api.model.ObjectFactory - -import javax.inject.Inject; -import java.nio.file.Files -import java.nio.file.Path - -/** - * Generates REST tests for each snippet marked // TEST. - */ -abstract class RestTestsFromSnippetsTask extends SnippetsTask { - /** - * These languages aren't supported by the syntax highlighter so we - * shouldn't use them. - */ - private static final List BAD_LANGUAGES = ['json', 'javascript'] - - /** - * Test setups defined in the build instead of the docs so they can be - * shared between many doc files. - */ - @Input - Map setups = new HashMap() - - /** - * Test teardowns defined in the build instead of the docs so they can be - * shared between many doc files. - */ - @Input - Map teardowns = new HashMap() - - /** - * A list of files that contain snippets that *probably* should be - * converted to `// CONSOLE` but have yet to be converted. If a file is in - * this list and doesn't contain unconverted snippets this task will fail. - * If there are unconverted snippets not in this list then this task will - * fail. All files are paths relative to the docs dir. - */ - @Input - List expectedUnconvertedCandidates = [] - - /** - * Root directory of the tests being generated. To make rest tests happy - * we generate them in a testRoot which is contained in this directory. - */ - private DirectoryProperty testRoot - - @Internal - Set names = new HashSet<>() - - @Inject - abstract FileOperations getFileOperations(); - - @Inject - RestTestsFromSnippetsTask(ObjectFactory objectFactory) { - testRoot = objectFactory.directoryProperty() - TestBuilder builder = new TestBuilder() - perSnippet = new Action() { - @Override - void execute(Snippet snippet) { - builder.handleSnippet(snippet) - } - } - doLast { - builder.checkUnconverted() - builder.finishLastTest() - } - } - - /** - * Root directory containing all the files generated by this task. It is - * contained within testRoot. - */ - File outputRoot() { - return new File(testRoot.get().asFile, '/rest-api-spec/test') - } - - @OutputDirectory - DirectoryProperty getTestRoot() { - return testRoot - } -/** - * Is this snippet a candidate for conversion to `// CONSOLE`? - */ - static isConsoleCandidate(Snippet snippet) { - /* Snippets that are responses or already marked as `// CONSOLE` or - * `// NOTCONSOLE` are not candidates. */ - if (snippet.console != null || snippet.testResponse) { - return false - } - /* js snippets almost always should be marked with `// CONSOLE`. js - * snippets that shouldn't be marked `// CONSOLE`, like examples for - * js client, should always be marked with `// NOTCONSOLE`. - * - * `sh` snippets that contain `curl` almost always should be marked - * with `// CONSOLE`. In the exceptionally rare cases where they are - * not communicating with Elasticsearch, like the examples in the ec2 - * and gce discovery plugins, the snippets should be marked - * `// NOTCONSOLE`. */ - return snippet.language == 'js' || snippet.curl - } - - /** - * Certain requests should not have the shard failure check because the - * format of the response is incompatible i.e. it is not a JSON object. - */ - static shouldAddShardFailureCheck(String path) { - return path.startsWith('_cat') == false && path.startsWith('_ml/datafeeds/') == false - } - - /** - * Converts Kibana's block quoted strings into standard JSON. These - * {@code """} delimited strings can be embedded in CONSOLE and can - * contain newlines and {@code "} without the normal JSON escaping. - * This has to add it. - */ - @PackageScope - static String replaceBlockQuote(String body) { - int start = body.indexOf('"""'); - if (start < 0) { - return body - } - /* - * 1.3 is a fairly wild guess of the extra space needed to hold - * the escaped string. - */ - StringBuilder result = new StringBuilder((int) (body.length() * 1.3)); - int startOfNormal = 0; - while (start >= 0) { - int end = body.indexOf('"""', start + 3); - if (end < 0) { - throw new InvalidUserDataException( - "Invalid block quote starting at $start in:\n$body") - } - result.append(body.substring(startOfNormal, start)); - result.append('"'); - result.append(body.substring(start + 3, end) - .replace('"', '\\"') - .replace("\n", "\\n")); - result.append('"'); - startOfNormal = end + 3; - start = body.indexOf('"""', startOfNormal); - } - result.append(body.substring(startOfNormal)); - return result.toString(); - } - - private class TestBuilder { - private static final String SYNTAX = { - String method = /(?GET|PUT|POST|HEAD|OPTIONS|DELETE)/ - String pathAndQuery = /(?[^\n]+)/ - String badBody = /GET|PUT|POST|HEAD|OPTIONS|DELETE|startyaml|#/ - String body = /(?(?:\n(?!$badBody)[^\n]+)+)/ - String rawRequest = /(?:$method\s+$pathAndQuery$body?)/ - String yamlRequest = /(?:startyaml(?s)(?.+?)(?-s)endyaml)/ - String nonComment = /(?:$rawRequest|$yamlRequest)/ - String comment = /(?#.+)/ - /(?:$comment|$nonComment)\n+/ - }() - - /** - * The file in which we saw the last snippet that made a test. - */ - Path lastDocsPath - - /** - * The file we're building. - */ - PrintWriter current - - /** - * Files containing all snippets that *probably* should be converted - * to `// CONSOLE` but have yet to be converted. All files are paths - * relative to the docs dir. - */ - Set unconvertedCandidates = new HashSet<>() - - /** - * The last non-TESTRESPONSE snippet. - */ - Snippet previousTest - - /** - * Called each time a snippet is encountered. Tracks the snippets and - * calls buildTest to actually build the test. - */ - - void handleSnippet(Snippet snippet) { - if (RestTestsFromSnippetsTask.isConsoleCandidate(snippet)) { - unconvertedCandidates.add(snippet.path.toString() - .replace('\\', '/')) - } - if (BAD_LANGUAGES.contains(snippet.language)) { - throw new InvalidUserDataException( - "$snippet: Use `js` instead of `${snippet.language}`.") - } - if (snippet.testSetup) { - testSetup(snippet) - previousTest = snippet - return - } - if (snippet.testTearDown) { - testTearDown(snippet) - previousTest = snippet - return - } - if (snippet.testResponse || snippet.language == 'console-result') { - if (previousTest == null) { - throw new InvalidUserDataException("$snippet: No paired previous test") - } - if (previousTest.path != snippet.path) { - throw new InvalidUserDataException("$snippet: Result can't be first in file") - } - response(snippet) - return - } - if ((snippet.language == 'js') && (snippet.console)) { - throw new InvalidUserDataException( - "$snippet: Use `[source,console]` instead of `// CONSOLE`.") - } - if (snippet.test || snippet.language == 'console') { - test(snippet) - previousTest = snippet - return - } - // Must be an unmarked snippet.... - } - - private void test(Snippet test) { - setupCurrent(test) - - if (test.continued) { - /* Catch some difficult to debug errors with // TEST[continued] - * and throw a helpful error message. */ - if (previousTest == null || previousTest.path != test.path) { - throw new InvalidUserDataException("// TEST[continued] " + - "cannot be on first snippet in a file: $test") - } - if (previousTest != null && previousTest.testSetup) { - throw new InvalidUserDataException("// TEST[continued] " + - "cannot immediately follow // TESTSETUP: $test") - } - if (previousTest != null && previousTest.testTearDown) { - throw new InvalidUserDataException("// TEST[continued] " + - "cannot immediately follow // TEARDOWN: $test") - } - } else { - current.println('---') - if (test.name != null && test.name.isBlank() == false) { - if(names.add(test.name) == false) { - throw new InvalidUserDataException("Duplicated snippet name '$test.name': $test") - } - current.println("\"$test.name\":") - } else { - current.println("\"line_$test.start\":") - } - /* The Elasticsearch test runner doesn't support quite a few - * constructs unless we output this skip. We don't know if - * we're going to use these constructs, but we might so we - * output the skip just in case. */ - current.println(" - skip:") - current.println(" features: ") - current.println(" - default_shards") - current.println(" - stash_in_key") - current.println(" - stash_in_path") - current.println(" - stash_path_replace") - current.println(" - warnings") - } - if (test.skip) { - if (test.continued) { - throw new InvalidUserDataException("Continued snippets " - + "can't be skipped") - } - current.println(" - always_skip") - current.println(" reason: $test.skip") - } - if (test.setup != null) { - setup(test) - } - - body(test, false) - - if (test.teardown != null) { - teardown(test) - } - } - - private void setup(final Snippet snippet) { - // insert a setup defined outside of the docs - for (final String name : snippet.setup.split(',')) { - final String setup = setups[name] - if (setup == null) { - throw new InvalidUserDataException( - "Couldn't find named setup $name for $snippet" - ) - } - current.println("# Named setup ${name}") - current.println(setup) - } - } - - private void teardown(final Snippet snippet) { - // insert a teardown defined outside of the docs - for (final String name : snippet.teardown.split(',')) { - final String teardown = teardowns[name] - if (teardown == null) { - throw new InvalidUserDataException( - "Couldn't find named teardown $name for $snippet" - ) - } - current.println("# Named teardown ${name}") - current.println(teardown) - } - } - - private void response(Snippet response) { - if (null == response.skip) { - current.println(" - match: ") - current.println(" \$body: ") - replaceBlockQuote(response.contents).eachLine { - current.println(" $it") - } - } - } - - void emitDo(String method, String pathAndQuery, String body, - String catchPart, List warnings, boolean inSetup, boolean skipShardFailures) { - def (String path, String query) = pathAndQuery.tokenize('?') - if (path == null) { - path = '' // Catch requests to the root... - } else { - path = path.replace('<', '%3C').replace('>', '%3E') - } - current.println(" - do:") - if (catchPart != null) { - current.println(" catch: $catchPart") - } - if (false == warnings.isEmpty()) { - current.println(" warnings:") - for (String warning in warnings) { - // Escape " because we're going to quote the warning - String escaped = warning.replaceAll('"', '\\\\"') - /* Quote the warning in case it starts with [ which makes - * it look too much like an array. */ - current.println(" - \"$escaped\"") - } - } - current.println(" raw:") - current.println(" method: $method") - current.println(" path: \"$path\"") - if (query != null) { - for (String param: query.tokenize('&')) { - def (String name, String value) = param.tokenize('=') - if (value == null) { - value = '' - } - current.println(" $name: \"$value\"") - } - } - if (body != null) { - // Throw out the leading newline we get from parsing the body - body = body.substring(1) - // Replace """ quoted strings with valid json ones - body = replaceBlockQuote(body) - current.println(" body: |") - body.eachLine { current.println(" $it") } - } - /* Catch any shard failures. These only cause a non-200 response if - * no shard succeeds. But we need to fail the tests on all of these - * because they mean invalid syntax or broken queries or something - * else that we don't want to teach people to do. The REST test - * framework doesn't allow us to have assertions in the setup - * section so we have to skip it there. We also omit the assertion - * from APIs that don't return a JSON object - */ - if (false == inSetup && skipShardFailures == false && shouldAddShardFailureCheck(path)) { - current.println(" - is_false: _shards.failures") - } - } - - private void testSetup(Snippet snippet) { - if (lastDocsPath == snippet.path) { - throw new InvalidUserDataException("$snippet: wasn't first. TESTSETUP can only be used in the first snippet of a document.") - } - setupCurrent(snippet) - current.println('---') - current.println("setup:") - if (snippet.setup != null) { - setup(snippet) - } - body(snippet, true) - } - - private void testTearDown(Snippet snippet) { - if (previousTest != null && previousTest.testSetup == false && lastDocsPath == snippet.path) { - throw new InvalidUserDataException("$snippet must follow test setup or be first") - } - setupCurrent(snippet) - current.println('---') - current.println('teardown:') - body(snippet, true) - } - - private void body(Snippet snippet, boolean inSetup) { - parse("$snippet", snippet.contents, SYNTAX) { matcher, last -> - if (matcher.group("comment") != null) { - // Comment - return - } - String yamlRequest = matcher.group("yaml"); - if (yamlRequest != null) { - current.println(yamlRequest) - return - } - String method = matcher.group("method") - String pathAndQuery = matcher.group("pathAndQuery") - String body = matcher.group("body") - String catchPart = last ? snippet.catchPart : null - if (pathAndQuery.startsWith('/')) { - // Leading '/'s break the generated paths - pathAndQuery = pathAndQuery.substring(1) - } - emitDo(method, pathAndQuery, body, catchPart, snippet.warnings, - inSetup, snippet.skipShardsFailures) - } - } - - private PrintWriter setupCurrent(Snippet test) { - if (lastDocsPath == test.path) { - return - } - names.clear() - finishLastTest() - lastDocsPath = test.path - - // Make the destination file: - // Shift the path into the destination directory tree - Path dest = outputRoot().toPath().resolve(test.path) - // Replace the extension - String fileName = dest.getName(dest.nameCount - 1) - dest = dest.parent.resolve(fileName.replace('.asciidoc', '.yml')) - - // Now setup the writer - Files.createDirectories(dest.parent) - current = dest.newPrintWriter('UTF-8') - } - - void finishLastTest() { - if (current != null) { - current.close() - current = null - } - } - - void checkUnconverted() { - List listedButNotFound = [] - for (String listed : expectedUnconvertedCandidates) { - if (false == unconvertedCandidates.remove(listed)) { - listedButNotFound.add(listed) - } - } - String message = "" - if (false == listedButNotFound.isEmpty()) { - Collections.sort(listedButNotFound) - listedButNotFound = listedButNotFound.collect {' ' + it} - message += "Expected unconverted snippets but none found in:\n" - message += listedButNotFound.join("\n") - } - if (false == unconvertedCandidates.isEmpty()) { - List foundButNotListed = - new ArrayList<>(unconvertedCandidates) - Collections.sort(foundButNotListed) - foundButNotListed = foundButNotListed.collect {' ' + it} - if (false == "".equals(message)) { - message += "\n" - } - message += "Unexpected unconverted snippets:\n" - message += foundButNotListed.join("\n") - } - if (false == "".equals(message)) { - throw new InvalidUserDataException(message); - } - } - } -} diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy deleted file mode 100644 index 3e4ad91024082..0000000000000 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy +++ /dev/null @@ -1,438 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.internal.doc - -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonParseException; -import com.fasterxml.jackson.core.JsonToken - -import org.gradle.api.Action; -import org.gradle.api.DefaultTask -import org.gradle.api.InvalidUserDataException -import org.gradle.api.file.ConfigurableFileTree -import org.gradle.api.tasks.Input -import org.gradle.api.tasks.InputFiles -import org.gradle.api.tasks.Internal -import org.gradle.api.tasks.TaskAction - -import java.nio.file.Path -import java.util.regex.Matcher - -/** - * A task which will run a closure on each snippet in the documentation. - */ -class SnippetsTask extends DefaultTask { - private static final String SCHAR = /(?:\\\/|[^\/])/ - private static final String SUBSTITUTION = /s\/($SCHAR+)\/($SCHAR*)\// - private static final String CATCH = /catch:\s*((?:\/[^\/]+\/)|[^ \]]+)/ - private static final String SKIP_REGEX = /skip:([^\]]+)/ - private static final String SETUP = /setup:([^ \]]+)/ - private static final String TEARDOWN = /teardown:([^ \]]+)/ - private static final String WARNING = /warning:(.+)/ - private static final String NON_JSON = /(non_json)/ - private static final String TEST_SYNTAX = - /(?:$CATCH|$SUBSTITUTION|$SKIP_REGEX|(continued)|$SETUP|$TEARDOWN|$WARNING|(skip_shard_failures)) ?/ - - /** - * Action to take on each snippet. Called with a single parameter, an - * instance of Snippet. - */ - @Internal - Action perSnippet - - /** - * The docs to scan. Defaults to every file in the directory exception the - * build.gradle file because that is appropriate for Elasticsearch's docs - * directory. - */ - @InputFiles - ConfigurableFileTree docs - - /** - * Substitutions done on every snippet's contents. - */ - @Input - Map defaultSubstitutions = [:] - - @TaskAction - void executeTask() { - /* - * Walks each line of each file, building snippets as it encounters - * the lines that make up the snippet. - */ - for (File file: docs) { - String lastLanguage - String name - int lastLanguageLine - Snippet snippet = null - StringBuilder contents = null - List substitutions = null - Closure emit = { - snippet.contents = contents.toString() - contents = null - Closure doSubstitution = { String pattern, String subst -> - /* - * $body is really common but it looks like a - * backreference so we just escape it here to make the - * tests cleaner. - */ - subst = subst.replace('$body', '\\$body') - subst = subst.replace('$_path', '\\$_path') - // \n is a new line.... - subst = subst.replace('\\n', '\n') - snippet.contents = snippet.contents.replaceAll( - pattern, subst) - } - defaultSubstitutions.each doSubstitution - if (substitutions != null) { - substitutions.each doSubstitution - substitutions = null - } - if (snippet.language == null) { - throw new InvalidUserDataException("$snippet: " - + "Snippet missing a language. This is required by " - + "Elasticsearch's doc testing infrastructure so we " - + "be sure we don't accidentally forget to test a " - + "snippet.") - } - // Try to detect snippets that contain `curl` - if (snippet.language == 'sh' || snippet.language == 'shell') { - snippet.curl = snippet.contents.contains('curl') - if (snippet.console == false && snippet.curl == false) { - throw new InvalidUserDataException("$snippet: " - + "No need for NOTCONSOLE if snippet doesn't " - + "contain `curl`.") - } - } - if (snippet.testResponse - && ('js' == snippet.language || 'console-result' == snippet.language) - && null == snippet.skip) { - String quoted = snippet.contents - // quote values starting with $ - .replaceAll(/([:,])\s*(\$[^ ,\n}]+)/, '$1 "$2"') - // quote fields starting with $ - .replaceAll(/(\$[^ ,\n}]+)\s*:/, '"$1":') - - JsonFactory jf = new JsonFactory(); - jf.configure(JsonParser.Feature.ALLOW_BACKSLASH_ESCAPING_ANY_CHARACTER,true); - JsonParser jsonParser; - - try { - jsonParser = jf.createParser(quoted); - while(jsonParser.isClosed() == false) { - jsonParser.nextToken(); - } - } catch (JsonParseException e) { - throw new InvalidUserDataException("Invalid json in " - + snippet.toString() + ". The error is:\n" + e.getMessage() + ".\n" - + "After substitutions and munging, the json looks like:\n" + quoted, e); - } - } - perSnippet.execute(snippet) - snippet = null - } - file.eachLine('UTF-8') { String line, int lineNumber -> - Matcher matcher - if (line ==~ /-{4,}\s*/) { // Four dashes looks like a snippet - if (snippet == null) { - Path path = docs.dir.toPath().relativize(file.toPath()) - snippet = new Snippet(path: path, start: lineNumber, name: name) - if (lastLanguageLine == lineNumber - 1) { - snippet.language = lastLanguage - } - name = null - } else { - snippet.end = lineNumber - } - return - } - def source = matchSource(line) - if (source.matches) { - lastLanguage = source.language - lastLanguageLine = lineNumber - name = source.name - return - } - if (line ==~ /\/\/\s*AUTOSENSE\s*/) { - throw new InvalidUserDataException("$file:$lineNumber: " - + "AUTOSENSE has been replaced by CONSOLE.") - } - if (line ==~ /\/\/\s*CONSOLE\s*/) { - if (snippet == null) { - throw new InvalidUserDataException("$file:$lineNumber: " - + "CONSOLE not paired with a snippet") - } - if (snippet.console != null) { - throw new InvalidUserDataException("$file:$lineNumber: " - + "Can't be both CONSOLE and NOTCONSOLE") - } - snippet.console = true - return - } - if (line ==~ /\/\/\s*NOTCONSOLE\s*/) { - if (snippet == null) { - throw new InvalidUserDataException("$file:$lineNumber: " - + "NOTCONSOLE not paired with a snippet") - } - if (snippet.console != null) { - throw new InvalidUserDataException("$file:$lineNumber: " - + "Can't be both CONSOLE and NOTCONSOLE") - } - snippet.console = false - return - } - matcher = line =~ /\/\/\s*TEST(\[(.+)\])?\s*/ - if (matcher.matches()) { - if (snippet == null) { - throw new InvalidUserDataException("$file:$lineNumber: " - + "TEST not paired with a snippet at ") - } - snippet.test = true - if (matcher.group(2) != null) { - String loc = "$file:$lineNumber" - parse(loc, matcher.group(2), TEST_SYNTAX) { - if (it.group(1) != null) { - snippet.catchPart = it.group(1) - return - } - if (it.group(2) != null) { - if (substitutions == null) { - substitutions = [] - } - substitutions.add([it.group(2), it.group(3)]) - return - } - if (it.group(4) != null) { - snippet.skip = it.group(4) - return - } - if (it.group(5) != null) { - snippet.continued = true - return - } - if (it.group(6) != null) { - snippet.setup = it.group(6) - return - } - if (it.group(7) != null) { - snippet.teardown = it.group(7) - return - } - if (it.group(8) != null) { - snippet.warnings.add(it.group(8)) - return - } - if (it.group(9) != null) { - snippet.skipShardsFailures = true - return - } - throw new InvalidUserDataException( - "Invalid test marker: $line") - } - } - return - } - matcher = line =~ /\/\/\s*TESTRESPONSE(\[(.+)\])?\s*/ - if (matcher.matches()) { - if (snippet == null) { - throw new InvalidUserDataException("$file:$lineNumber: " - + "TESTRESPONSE not paired with a snippet") - } - snippet.testResponse = true - if (matcher.group(2) != null) { - if (substitutions == null) { - substitutions = [] - } - String loc = "$file:$lineNumber" - parse(loc, matcher.group(2), /(?:$SUBSTITUTION|$NON_JSON|$SKIP_REGEX) ?/) { - if (it.group(1) != null) { - // TESTRESPONSE[s/adsf/jkl/] - substitutions.add([it.group(1), it.group(2)]) - } else if (it.group(3) != null) { - // TESTRESPONSE[non_json] - substitutions.add(['^', '/']) - substitutions.add(['\n$', '\\\\s*/']) - substitutions.add(['( +)', '$1\\\\s+']) - substitutions.add(['\n', '\\\\s*\n ']) - } else if (it.group(4) != null) { - // TESTRESPONSE[skip:reason] - snippet.skip = it.group(4) - } - } - } - return - } - if (line ==~ /\/\/\s*TESTSETUP\s*/) { - snippet.testSetup = true - return - } - if (line ==~ /\/\/\s*TEARDOWN\s*/) { - snippet.testTearDown = true - return - } - if (snippet == null) { - // Outside - return - } - if (snippet.end == Snippet.NOT_FINISHED) { - // Inside - if (contents == null) { - contents = new StringBuilder() - } - // We don't need the annotations - line = line.replaceAll(/<\d+>/, '') - // Nor any trailing spaces - line = line.replaceAll(/\s+$/, '') - contents.append(line).append('\n') - return - } - // Allow line continuations for console snippets within lists - if (snippet != null && line.trim() == '+') { - return - } - // Just finished - emit() - } - if (snippet != null) emit() - } - } - - static Source matchSource(String line) { - def matcher = line =~ /\["?source"?(?:\.[^,]+)?,\s*"?([-\w]+)"?(,((?!id=).)*(id="?([-\w]+)"?)?(.*))?].*/ - if(matcher.matches()){ - return new Source(matches: true, language: matcher.group(1), name: matcher.group(5)) - } - return new Source(matches: false) - } - - static class Source { - boolean matches - String language - String name - } - - static class Snippet { - static final int NOT_FINISHED = -1 - - /** - * Path to the file containing this snippet. Relative to docs.dir of the - * SnippetsTask that created it. - */ - Path path - int start - int end = NOT_FINISHED - String contents - - Boolean console = null - boolean test = false - boolean testResponse = false - boolean testSetup = false - boolean testTearDown = false - String skip = null - boolean continued = false - String language = null - String catchPart = null - String setup = null - String teardown = null - boolean curl - List warnings = new ArrayList() - boolean skipShardsFailures = false - String name - - @Override - public String toString() { - String result = "$path[$start:$end]" - if (language != null) { - result += "($language)" - } - if (console != null) { - result += console ? '// CONSOLE' : '// NOTCONSOLE' - } - if (test) { - result += '// TEST' - if (catchPart) { - result += "[catch: $catchPart]" - } - if (skip) { - result += "[skip=$skip]" - } - if (continued) { - result += '[continued]' - } - if (setup) { - result += "[setup:$setup]" - } - if (teardown) { - result += "[teardown:$teardown]" - } - for (String warning in warnings) { - result += "[warning:$warning]" - } - if (skipShardsFailures) { - result += '[skip_shard_failures]' - } - } - if (testResponse) { - result += '// TESTRESPONSE' - if (skip) { - result += "[skip=$skip]" - } - } - if (testSetup) { - result += '// TESTSETUP' - } - if (curl) { - result += '(curl)' - } - return result - } - } - - /** - * Repeatedly match the pattern to the string, calling the closure with the - * matchers each time there is a match. If there are characters that don't - * match then blow up. If the closure takes two parameters then the second - * one is "is this the last match?". - */ - protected parse(String location, String s, String pattern, Closure c) { - if (s == null) { - return // Silly null, only real stuff gets to match! - } - Matcher m = s =~ pattern - int offset = 0 - Closure extraContent = { message -> - StringBuilder cutOut = new StringBuilder() - cutOut.append(s[offset - 6..offset - 1]) - cutOut.append('*') - cutOut.append(s[offset..Math.min(offset + 5, s.length() - 1)]) - String cutOutNoNl = cutOut.toString().replace('\n', '\\n') - throw new InvalidUserDataException("$location: Extra content " - + "$message ('$cutOutNoNl') matching [$pattern]: $s") - } - while (m.find()) { - if (m.start() != offset) { - extraContent("between [$offset] and [${m.start()}]") - } - offset = m.end() - if (c.maximumNumberOfParameters == 1) { - c(m) - } else { - c(m, offset == s.length()) - } - } - if (offset == 0) { - throw new InvalidUserDataException("$location: Didn't match " - + "$pattern: $s") - } - if (offset != s.length()) { - extraContent("after [$offset]") - } - } -} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/AsciidocSnippetParser.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/AsciidocSnippetParser.java new file mode 100644 index 0000000000000..7b35fd29fbd1a --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/AsciidocSnippetParser.java @@ -0,0 +1,306 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.doc; + +import org.gradle.api.InvalidUserDataException; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class AsciidocSnippetParser implements SnippetParser { + public static final Pattern SNIPPET_PATTERN = Pattern.compile("-{4,}\\s*"); + + private static final String CATCH = "catch:\\s*((?:\\/[^\\/]+\\/)|[^ \\]]+)"; + private static final String SKIP_REGEX = "skip:([^\\]]+)"; + private static final String SETUP = "setup:([^ \\]]+)"; + private static final String TEARDOWN = "teardown:([^ \\]]+)"; + private static final String WARNING = "warning:(.+)"; + private static final String NON_JSON = "(non_json)"; + private static final String SCHAR = "(?:\\\\\\/|[^\\/])"; + private static final String SUBSTITUTION = "s\\/(" + SCHAR + "+)\\/(" + SCHAR + "*)\\/"; + private static final String TEST_SYNTAX = "(?:" + + CATCH + + "|" + + SUBSTITUTION + + "|" + + SKIP_REGEX + + "|(continued)|" + + SETUP + + "|" + + TEARDOWN + + "|" + + WARNING + + "|(skip_shard_failures)) ?"; + + private final Map defaultSubstitutions; + + public AsciidocSnippetParser(Map defaultSubstitutions) { + this.defaultSubstitutions = defaultSubstitutions; + } + + @Override + public List parseDoc(File rootDir, File docFile, List> substitutions) { + String lastLanguage = null; + Snippet snippet = null; + String name = null; + int lastLanguageLine = 0; + StringBuilder contents = null; + List snippets = new ArrayList<>(); + + try (Stream lines = Files.lines(docFile.toPath(), StandardCharsets.UTF_8)) { + List linesList = lines.collect(Collectors.toList()); + for (int lineNumber = 0; lineNumber < linesList.size(); lineNumber++) { + String line = linesList.get(lineNumber); + if (SNIPPET_PATTERN.matcher(line).matches()) { + if (snippet == null) { + Path path = rootDir.toPath().relativize(docFile.toPath()); + snippet = new Snippet(path, lineNumber + 1, name); + snippets.add(snippet); + if (lastLanguageLine == lineNumber - 1) { + snippet.language = lastLanguage; + } + name = null; + } else { + snippet.end = lineNumber + 1; + } + continue; + } + + Source source = matchSource(line); + if (source.matches) { + lastLanguage = source.language; + lastLanguageLine = lineNumber; + name = source.name; + continue; + } + if (consoleHandled(docFile.getName(), lineNumber, line, snippet)) { + continue; + } + if (testHandled(docFile.getName(), lineNumber, line, snippet, substitutions)) { + continue; + } + if (testResponseHandled(docFile.getName(), lineNumber, line, snippet, substitutions)) { + continue; + } + if (line.matches("\\/\\/\s*TESTSETUP\s*")) { + snippet.testSetup = true; + continue; + } + if (line.matches("\\/\\/\s*TEARDOWN\s*")) { + snippet.testTearDown = true; + continue; + } + if (snippet == null) { + // Outside + continue; + } + if (snippet.end == Snippet.NOT_FINISHED) { + // Inside + if (contents == null) { + contents = new StringBuilder(); + } + // We don't need the annotations + line = line.replaceAll("<\\d+>", ""); + // Nor any trailing spaces + line = line.replaceAll("\s+$", ""); + contents.append(line).append("\n"); + continue; + } + // Allow line continuations for console snippets within lists + if (snippet != null && line.trim().equals("+")) { + continue; + } + finalizeSnippet(snippet, contents.toString(), defaultSubstitutions, substitutions); + substitutions = new ArrayList<>(); + ; + snippet = null; + contents = null; + } + if (snippet != null) { + finalizeSnippet(snippet, contents.toString(), defaultSubstitutions, substitutions); + contents = null; + snippet = null; + substitutions = new ArrayList<>(); + } + } catch (IOException e) { + e.printStackTrace(); + } + return snippets; + } + + static Snippet finalizeSnippet( + final Snippet snippet, + String contents, + Map defaultSubstitutions, + Collection> substitutions + ) { + snippet.contents = contents.toString(); + snippet.validate(); + escapeSubstitutions(snippet, defaultSubstitutions, substitutions); + return snippet; + } + + private static void escapeSubstitutions( + Snippet snippet, + Map defaultSubstitutions, + Collection> substitutions + ) { + BiConsumer doSubstitution = (pattern, subst) -> { + /* + * $body is really common but it looks like a + * backreference so we just escape it here to make the + * tests cleaner. + */ + subst = subst.replace("$body", "\\$body"); + subst = subst.replace("$_path", "\\$_path"); + subst = subst.replace("\\n", "\n"); + snippet.contents = snippet.contents.replaceAll(pattern, subst); + }; + defaultSubstitutions.forEach(doSubstitution); + + if (substitutions != null) { + substitutions.forEach(e -> doSubstitution.accept(e.getKey(), e.getValue())); + } + } + + private boolean testResponseHandled( + String name, + int lineNumber, + String line, + Snippet snippet, + final List> substitutions + ) { + Matcher matcher = Pattern.compile("\\/\\/\s*TESTRESPONSE(\\[(.+)\\])?\s*").matcher(line); + if (matcher.matches()) { + if (snippet == null) { + throw new InvalidUserDataException(name + ":" + lineNumber + ": TESTRESPONSE not paired with a snippet at "); + } + snippet.testResponse = true; + if (matcher.group(2) != null) { + String loc = name + ":" + lineNumber; + ParsingUtils.parse( + loc, + matcher.group(2), + "(?:" + SUBSTITUTION + "|" + NON_JSON + "|" + SKIP_REGEX + ") ?", + (Matcher m, Boolean last) -> { + if (m.group(1) != null) { + // TESTRESPONSE[s/adsf/jkl/] + substitutions.add(Map.entry(m.group(1), m.group(2))); + } else if (m.group(3) != null) { + // TESTRESPONSE[non_json] + substitutions.add(Map.entry("^", "/")); + substitutions.add(Map.entry("\n$", "\\\\s*/")); + substitutions.add(Map.entry("( +)", "$1\\\\s+")); + substitutions.add(Map.entry("\n", "\\\\s*\n ")); + } else if (m.group(4) != null) { + // TESTRESPONSE[skip:reason] + snippet.skip = m.group(4); + } + } + ); + } + return true; + } + return false; + } + + private boolean testHandled(String name, int lineNumber, String line, Snippet snippet, List> substitutions) { + Matcher matcher = Pattern.compile("\\/\\/\s*TEST(\\[(.+)\\])?\s*").matcher(line); + if (matcher.matches()) { + if (snippet == null) { + throw new InvalidUserDataException(name + ":" + lineNumber + ": TEST not paired with a snippet at "); + } + snippet.test = true; + if (matcher.group(2) != null) { + String loc = name + ":" + lineNumber; + ParsingUtils.parse(loc, matcher.group(2), TEST_SYNTAX, (Matcher m, Boolean last) -> { + if (m.group(1) != null) { + snippet.catchPart = m.group(1); + return; + } + if (m.group(2) != null) { + substitutions.add(Map.entry(m.group(2), m.group(3))); + return; + } + if (m.group(4) != null) { + snippet.skip = m.group(4); + return; + } + if (m.group(5) != null) { + snippet.continued = true; + return; + } + if (m.group(6) != null) { + snippet.setup = m.group(6); + return; + } + if (m.group(7) != null) { + snippet.teardown = m.group(7); + return; + } + if (m.group(8) != null) { + snippet.warnings.add(m.group(8)); + return; + } + if (m.group(9) != null) { + snippet.skipShardsFailures = true; + return; + } + throw new InvalidUserDataException("Invalid test marker: " + line); + }); + } + return true; + } + return false; + } + + private boolean consoleHandled(String fileName, int lineNumber, String line, Snippet snippet) { + if (line.matches("\\/\\/\s*CONSOLE\s*")) { + if (snippet == null) { + throw new InvalidUserDataException(fileName + ":" + lineNumber + ": CONSOLE not paired with a snippet"); + } + if (snippet.console != null) { + throw new InvalidUserDataException(fileName + ":" + lineNumber + ": Can't be both CONSOLE and NOTCONSOLE"); + } + snippet.console = true; + return true; + } else if (line.matches("\\/\\/\s*NOTCONSOLE\s*")) { + if (snippet == null) { + throw new InvalidUserDataException(fileName + ":" + lineNumber + ": NOTCONSOLE not paired with a snippet"); + } + if (snippet.console != null) { + throw new InvalidUserDataException(fileName + ":" + lineNumber + ": Can't be both CONSOLE and NOTCONSOLE"); + } + snippet.console = false; + return true; + } + return false; + } + + static Source matchSource(String line) { + Pattern pattern = Pattern.compile("\\[\"?source\"?(?:\\.[^,]+)?,\\s*\"?([-\\w]+)\"?(,((?!id=).)*(id=\"?([-\\w]+)\"?)?(.*))?].*"); + Matcher matcher = pattern.matcher(line); + if (matcher.matches()) { + return new Source(true, matcher.group(1), matcher.group(5)); + } + return new Source(false, null, null); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/DocSnippetTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/DocSnippetTask.java new file mode 100644 index 0000000000000..87f0621d53fba --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/DocSnippetTask.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.doc; + +import org.apache.commons.collections.map.HashedMap; +import org.gradle.api.Action; +import org.gradle.api.DefaultTask; +import org.gradle.api.InvalidUserDataException; +import org.gradle.api.file.ConfigurableFileTree; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.TaskAction; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public abstract class DocSnippetTask extends DefaultTask { + + /** + * Action to take on each snippet. Called with a single parameter, an + * instance of Snippet. + */ + private Action perSnippet; + + /** + * The docs to scan. Defaults to every file in the directory exception the + * build.gradle file because that is appropriate for Elasticsearch's docs + * directory. + */ + private ConfigurableFileTree docs; + private Map defaultSubstitutions = new HashedMap(); + + @InputFiles + public ConfigurableFileTree getDocs() { + return docs; + } + + public void setDocs(ConfigurableFileTree docs) { + this.docs = docs; + } + + /** + * Substitutions done on every snippet's contents. + */ + @Input + public Map getDefaultSubstitutions() { + return defaultSubstitutions; + } + + @TaskAction + void executeTask() { + for (File file : docs) { + List snippets = parseDocFile(docs.getDir(), file, new ArrayList<>()); + if (perSnippet != null) { + snippets.forEach(perSnippet::execute); + } + } + } + + List parseDocFile(File rootDir, File docFile, List> substitutions) { + SnippetParser parser = parserForFileType(docFile); + return parser.parseDoc(rootDir, docFile, substitutions); + } + + private SnippetParser parserForFileType(File docFile) { + if (docFile.getName().endsWith(".asciidoc")) { + return new AsciidocSnippetParser(defaultSubstitutions); + } + throw new InvalidUserDataException("Unsupported file type: " + docFile.getName()); + } + + public void setDefaultSubstitutions(Map defaultSubstitutions) { + this.defaultSubstitutions = defaultSubstitutions; + } + + public void setPerSnippet(Action perSnippet) { + this.perSnippet = perSnippet; + } + +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.java new file mode 100644 index 0000000000000..bbb5102dd6699 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.doc; + +import org.elasticsearch.gradle.OS; +import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.VersionProperties; +import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; +import org.elasticsearch.gradle.testclusters.TestClustersPlugin; +import org.elasticsearch.gradle.testclusters.TestDistribution; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.file.Directory; +import org.gradle.api.file.ProjectLayout; +import org.gradle.api.internal.file.FileOperations; +import org.gradle.api.plugins.JavaPluginExtension; +import org.gradle.api.provider.Provider; +import org.gradle.api.tasks.TaskProvider; + +import java.util.Map; + +import javax.inject.Inject; + +public class DocsTestPlugin implements Plugin { + private FileOperations fileOperations; + private ProjectLayout projectLayout; + + @Inject + DocsTestPlugin(FileOperations fileOperations, ProjectLayout projectLayout) { + this.projectLayout = projectLayout; + this.fileOperations = fileOperations; + } + + @Override + public void apply(Project project) { + project.getPluginManager().apply("elasticsearch.legacy-yaml-rest-test"); + + String distribution = System.getProperty("tests.distribution", "default"); + // The distribution can be configured with -Dtests.distribution on the command line + NamedDomainObjectContainer testClusters = (NamedDomainObjectContainer) project + .getExtensions() + .getByName(TestClustersPlugin.EXTENSION_NAME); + + testClusters.matching((c) -> c.getName().equals("yamlRestTest")).configureEach(c -> { + c.setTestDistribution(TestDistribution.valueOf(distribution.toUpperCase())); + c.setNameCustomization((name) -> name.replace("yamlRestTest", "node")); + }); + + project.getTasks().named("assemble").configure(task -> { task.setEnabled(false); }); + + Map commonDefaultSubstitutions = Map.of( + /* These match up with the asciidoc syntax for substitutions but + * the values may differ. In particular {version} needs to resolve + * to the version being built for testing but needs to resolve to + * the last released version for docs. */ + "\\{version\\}", + Version.fromString(VersionProperties.getElasticsearch()).toString(), + "\\{version_qualified\\}", + VersionProperties.getElasticsearch(), + "\\{lucene_version\\}", + VersionProperties.getLucene().replaceAll("-snapshot-\\w+$", ""), + "\\{build_flavor\\}", + distribution, + "\\{build_type\\}", + OS.conditionalString().onWindows(() -> "zip").onUnix(() -> "tar").supply() + ); + + project.getTasks().register("listSnippets", DocSnippetTask.class, task -> { + task.setGroup("Docs"); + task.setDescription("List each snippet"); + task.setDefaultSubstitutions(commonDefaultSubstitutions); + task.setPerSnippet(snippet -> System.out.println(snippet)); + }); + + project.getTasks().register("listConsoleCandidates", DocSnippetTask.class, task -> { + task.setGroup("Docs"); + task.setDescription("List snippets that probably should be marked // CONSOLE"); + task.setDefaultSubstitutions(commonDefaultSubstitutions); + task.setPerSnippet(snippet -> { + if (snippet.isConsoleCandidate()) { + System.out.println(snippet); + } + }); + }); + + Provider restRootDir = projectLayout.getBuildDirectory().dir("rest"); + TaskProvider buildRestTests = project.getTasks() + .register("buildRestTests", RestTestsFromDocSnippetTask.class, task -> { + task.setDefaultSubstitutions(commonDefaultSubstitutions); + task.getTestRoot().convention(restRootDir); + task.doFirst(task1 -> fileOperations.delete(restRootDir.get())); + }); + + // TODO: This effectively makes testRoot not customizable, which we don't do anyway atm + JavaPluginExtension byType = project.getExtensions().getByType(JavaPluginExtension.class); + byType.getSourceSets().getByName("yamlRestTest").getOutput().dir(Map.of("builtBy", buildRestTests), restRootDir); + } + +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/ParsingUtils.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/ParsingUtils.java new file mode 100644 index 0000000000000..b17dd4c7e21d3 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/ParsingUtils.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.doc; + +import org.gradle.api.InvalidUserDataException; + +import java.util.function.BiConsumer; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class ParsingUtils { + + static void extraContent(String message, String content, int offset, String location, String pattern) { + StringBuilder cutOut = new StringBuilder(); + cutOut.append(content.substring(offset - 6, offset)); + cutOut.append('*'); + cutOut.append(content.substring(offset, Math.min(offset + 5, content.length()))); + String cutOutNoNl = cutOut.toString().replace("\n", "\\n"); + throw new InvalidUserDataException( + location + ": Extra content " + message + " ('" + cutOutNoNl + "') matching [" + pattern + "]: " + content + ); + } + + /** + * Repeatedly match the pattern to the string, calling the closure with the + * matchers each time there is a match. If there are characters that don't + * match then blow up. If the closure takes two parameters then the second + * one is "is this the last match?". + */ + static void parse(String location, String content, String pattern, BiConsumer testHandler) { + if (content == null) { + return; // Silly null, only real stuff gets to match! + } + Matcher m = Pattern.compile(pattern).matcher(content); + int offset = 0; + while (m.find()) { + if (m.start() != offset) { + extraContent("between [$offset] and [${m.start()}]", content, offset, location, pattern); + } + offset = m.end(); + testHandler.accept(m, offset == content.length()); + } + if (offset == 0) { + throw new InvalidUserDataException(location + ": Didn't match " + pattern + ": " + content); + } + if (offset != content.length()) { + extraContent("after [" + offset + "]", content, offset, location, pattern); + } + } + +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/RestTestsFromDocSnippetTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/RestTestsFromDocSnippetTask.java new file mode 100644 index 0000000000000..c5b1d67627dd9 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/RestTestsFromDocSnippetTask.java @@ -0,0 +1,526 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.doc; + +import groovy.transform.PackageScope; + +import org.gradle.api.InvalidUserDataException; +import org.gradle.api.file.DirectoryProperty; +import org.gradle.api.internal.file.FileOperations; +import org.gradle.api.model.ObjectFactory; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.Internal; +import org.gradle.api.tasks.OutputDirectory; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +public abstract class RestTestsFromDocSnippetTask extends DocSnippetTask { + + private Map setups = new HashMap<>(); + + private Map teardowns = new HashMap(); + + /** + * Test setups defined in the build instead of the docs so they can be + * shared between many doc files. + */ + @Input + public Map getSetups() { + return setups; + } + + public void setSetups(Map setups) { + this.setups = setups; + } + + /** + * Test teardowns defined in the build instead of the docs so they can be + * shared between many doc files. + */ + @Input + public Map getTeardowns() { + return teardowns; + } + + public void setTeardowns(Map teardowns) { + this.teardowns = teardowns; + } + + /** + * A list of files that contain snippets that *probably* should be + * converted to `// CONSOLE` but have yet to be converted. If a file is in + * this list and doesn't contain unconverted snippets this task will fail. + * If there are unconverted snippets not in this list then this task will + * fail. All files are paths relative to the docs dir. + */ + private List expectedUnconvertedCandidates; + + @Input + public List getExpectedUnconvertedCandidates() { + return expectedUnconvertedCandidates; + } + + public void setExpectedUnconvertedCandidates(List expectedUnconvertedCandidates) { + this.expectedUnconvertedCandidates = expectedUnconvertedCandidates; + } + + /** + * Root directory of the tests being generated. To make rest tests happy + * we generate them in a testRoot which is contained in this directory. + */ + private DirectoryProperty testRoot; + + private Set names = new HashSet<>(); + + @Internal + public Set getNames() { + return names; + } + + public void setNames(Set names) { + this.names = names; + } + + @Inject + public abstract FileOperations getFileOperations(); + + /** + * Root directory containing all the files generated by this task. It is + * contained within testRoot. + */ + @OutputDirectory + File getOutputRoot() { + return new File(testRoot.get().getAsFile(), "/rest-api-spec/test"); + } + + @OutputDirectory + DirectoryProperty getTestRoot() { + return testRoot; + } + + @Inject + public RestTestsFromDocSnippetTask(ObjectFactory objectFactory) { + testRoot = objectFactory.directoryProperty(); + TestBuilder builder = new TestBuilder(); + + setPerSnippet(snippet -> builder.handleSnippet(snippet)); + doLast(task -> { + builder.finishLastTest(); + builder.checkUnconverted(); + }); + } + + /** + * Certain requests should not have the shard failure check because the + * format of the response is incompatible i.e. it is not a JSON object. + */ + static boolean shouldAddShardFailureCheck(String path) { + return path.startsWith("_cat") == false && path.startsWith("_ml/datafeeds/") == false; + } + + /** + * Converts Kibana's block quoted strings into standard JSON. These + * {@code """} delimited strings can be embedded in CONSOLE and can + * contain newlines and {@code "} without the normal JSON escaping. + * This has to add it. + */ + @PackageScope + static String replaceBlockQuote(String body) { + int start = body.indexOf("\"\"\""); + if (start < 0) { + return body; + } + /* + * 1.3 is a fairly wild guess of the extra space needed to hold + * the escaped string. + */ + StringBuilder result = new StringBuilder((int) (body.length() * 1.3)); + int startOfNormal = 0; + while (start >= 0) { + int end = body.indexOf("\"\"\"", start + 3); + if (end < 0) { + throw new InvalidUserDataException("Invalid block quote starting at " + start + " in:\n" + body); + } + result.append(body.substring(startOfNormal, start)); + result.append('"'); + result.append(body.substring(start + 3, end).replace("\"", "\\\"").replace("\n", "\\n")); + result.append('"'); + startOfNormal = end + 3; + start = body.indexOf("\"\"\"", startOfNormal); + } + result.append(body.substring(startOfNormal)); + return result.toString(); + } + + private class TestBuilder { + /** + * These languages aren't supported by the syntax highlighter so we + * shouldn't use them. + */ + private static final List BAD_LANGUAGES = List.of("json", "javascript"); + + String method = "(?GET|PUT|POST|HEAD|OPTIONS|DELETE)"; + String pathAndQuery = "(?[^\\n]+)"; + + String badBody = "GET|PUT|POST|HEAD|OPTIONS|DELETE|startyaml|#"; + String body = "(?(?:\\n(?!" + badBody + ")[^\\n]+)+)"; + + String rawRequest = "(?:" + method + "\\s+" + pathAndQuery + body + "?)"; + + String yamlRequest = "(?:startyaml(?s)(?.+?)(?-s)endyaml)"; + String nonComment = "(?:" + rawRequest + "|" + yamlRequest + ")"; + String comment = "(?#.+)"; + + String SYNTAX = "(?:" + comment + "|" + nonComment + ")\\n+"; + + /** + * Files containing all snippets that *probably* should be converted + * to `// CONSOLE` but have yet to be converted. All files are paths + * relative to the docs dir. + */ + private Set unconvertedCandidates = new HashSet<>(); + + /** + * The last non-TESTRESPONSE snippet. + */ + Snippet previousTest; + + /** + * The file in which we saw the last snippet that made a test. + */ + Path lastDocsPath; + + /** + * The file we're building. + */ + PrintWriter current; + + Set names = new HashSet<>(); + + /** + * Called each time a snippet is encountered. Tracks the snippets and + * calls buildTest to actually build the test. + */ + public void handleSnippet(Snippet snippet) { + if (snippet.isConsoleCandidate()) { + unconvertedCandidates.add(snippet.path.toString().replace('\\', '/')); + } + if (BAD_LANGUAGES.contains(snippet.language)) { + throw new InvalidUserDataException(snippet + ": Use `js` instead of `" + snippet.language + "`."); + } + if (snippet.testSetup) { + testSetup(snippet); + previousTest = snippet; + return; + } + if (snippet.testTearDown) { + testTearDown(snippet); + previousTest = snippet; + return; + } + if (snippet.testResponse || snippet.language.equals("console-result")) { + if (previousTest == null) { + throw new InvalidUserDataException(snippet + ": No paired previous test"); + } + if (previousTest.path.equals(snippet.path) == false) { + throw new InvalidUserDataException(snippet + ": Result can't be first in file"); + } + response(snippet); + return; + } + if (("js".equals(snippet.language)) && snippet.console != null && snippet.console) { + throw new InvalidUserDataException(snippet + ": Use `[source,console]` instead of `// CONSOLE`."); + } + if (snippet.test || snippet.language.equals("console")) { + test(snippet); + previousTest = snippet; + return; + } + // Must be an unmarked snippet.... + } + + private void test(Snippet test) { + setupCurrent(test); + + if (test.continued) { + /* Catch some difficult to debug errors with // TEST[continued] + * and throw a helpful error message. */ + if (previousTest == null || previousTest.path.equals(test.path) == false) { + throw new InvalidUserDataException("// TEST[continued] " + "cannot be on first snippet in a file: " + test); + } + if (previousTest != null && previousTest.testSetup) { + throw new InvalidUserDataException("// TEST[continued] " + "cannot immediately follow // TESTSETUP: " + test); + } + if (previousTest != null && previousTest.testTearDown) { + throw new InvalidUserDataException("// TEST[continued] " + "cannot immediately follow // TEARDOWN: " + test); + } + } else { + current.println("---"); + if (test.name != null && test.name.isBlank() == false) { + if (names.add(test.name) == false) { + throw new InvalidUserDataException("Duplicated snippet name '" + test.name + "': " + test); + } + current.println("\"" + test.name + "\":"); + } else { + current.println("\"line_" + test.start + "\":"); + } + /* The Elasticsearch test runner doesn't support quite a few + * constructs unless we output this skip. We don't know if + * we're going to use these constructs, but we might so we + * output the skip just in case. */ + current.println(" - skip:"); + current.println(" features:"); + current.println(" - default_shards"); + current.println(" - stash_in_key"); + current.println(" - stash_in_path"); + current.println(" - stash_path_replace"); + current.println(" - warnings"); + } + if (test.skip != null) { + if (test.continued) { + throw new InvalidUserDataException("Continued snippets " + "can't be skipped"); + } + current.println(" - always_skip"); + current.println(" reason: " + test.skip); + } + if (test.setup != null) { + setup(test); + } + + body(test, false); + + if (test.teardown != null) { + teardown(test); + } + } + + private void response(Snippet response) { + if (null == response.skip) { + current.println(" - match:"); + current.println(" $body:"); + replaceBlockQuote(response.contents).lines().forEach(line -> current.println(" " + line)); + } + } + + private void teardown(final Snippet snippet) { + // insert a teardown defined outside of the docs + for (final String name : snippet.teardown.split(",")) { + final String teardown = teardowns.get(name); + if (teardown == null) { + throw new InvalidUserDataException("Couldn't find named teardown $name for " + snippet); + } + current.println("# Named teardown " + name); + current.println(teardown); + } + } + + private void testTearDown(Snippet snippet) { + if (previousTest != null && previousTest.testSetup == false && lastDocsPath == snippet.path) { + throw new InvalidUserDataException(snippet + " must follow test setup or be first"); + } + setupCurrent(snippet); + current.println("---"); + current.println("teardown:"); + body(snippet, true); + } + + void emitDo( + String method, + String pathAndQuery, + String body, + String catchPart, + List warnings, + boolean inSetup, + boolean skipShardFailures + ) { + String[] tokenized = pathAndQuery.split("\\?"); + String path = tokenized[0]; + String query = tokenized.length > 1 ? tokenized[1] : null; + if (path == null) { + path = ""; // Catch requests to the root... + } else { + path = path.replace("<", "%3C").replace(">", "%3E"); + } + current.println(" - do:"); + if (catchPart != null) { + current.println(" catch: " + catchPart); + } + if (false == warnings.isEmpty()) { + current.println(" warnings:"); + for (String warning : warnings) { + // Escape " because we're going to quote the warning + String escaped = warning.replaceAll("\"", "\\\\\""); + /* Quote the warning in case it starts with [ which makes + * it look too much like an array. */ + current.println(" - \"" + escaped + "\""); + } + } + current.println(" raw:"); + current.println(" method: " + method); + current.println(" path: \"" + path + "\""); + if (query != null) { + for (String param : query.split("&")) { + String[] tokenizedQuery = param.split("="); + String paramName = tokenizedQuery[0]; + String paramValue = tokenizedQuery.length > 1 ? tokenizedQuery[1] : null; + if (paramValue == null) { + paramValue = ""; + } + current.println(" " + paramName + ": \"" + paramValue + "\""); + } + } + if (body != null) { + // Throw out the leading newline we get from parsing the body + body = body.substring(1); + // Replace """ quoted strings with valid json ones + body = replaceBlockQuote(body); + current.println(" body: |"); + body.lines().forEach(line -> current.println(" " + line)); + } + /* Catch any shard failures. These only cause a non-200 response if + * no shard succeeds. But we need to fail the tests on all of these + * because they mean invalid syntax or broken queries or something + * else that we don't want to teach people to do. The REST test + * framework doesn't allow us to have assertions in the setup + * section so we have to skip it there. We also omit the assertion + * from APIs that don't return a JSON object + */ + if (false == inSetup && skipShardFailures == false && shouldAddShardFailureCheck(path)) { + current.println(" - is_false: _shards.failures"); + } + } + + private void body(Snippet snippet, boolean inSetup) { + ParsingUtils.parse(snippet.getLocation(), snippet.contents, SYNTAX, (matcher, last) -> { + if (matcher.group("comment") != null) { + // Comment + return; + } + String yamlRequest = matcher.group("yaml"); + if (yamlRequest != null) { + current.println(yamlRequest); + return; + } + String method = matcher.group("method"); + String pathAndQuery = matcher.group("pathAndQuery"); + String body = matcher.group("body"); + String catchPart = last ? snippet.catchPart : null; + if (pathAndQuery.startsWith("/")) { + // Leading '/'s break the generated paths + pathAndQuery = pathAndQuery.substring(1); + } + emitDo(method, pathAndQuery, body, catchPart, snippet.warnings, inSetup, snippet.skipShardsFailures); + }); + + } + + private PrintWriter setupCurrent(Snippet test) { + if (test.path.equals(lastDocsPath)) { + return current; + } + names.clear(); + finishLastTest(); + lastDocsPath = test.path; + + // Make the destination file: + // Shift the path into the destination directory tree + Path dest = getOutputRoot().toPath().resolve(test.path); + // Replace the extension + String fileName = dest.getName(dest.getNameCount() - 1).toString(); + dest = dest.getParent().resolve(fileName.replace(".asciidoc", ".yml")); + + // Now setup the writer + try { + Files.createDirectories(dest.getParent()); + current = new PrintWriter(dest.toFile(), "UTF-8"); + return current; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private void testSetup(Snippet snippet) { + if (lastDocsPath == snippet.path) { + throw new InvalidUserDataException( + snippet + ": wasn't first. TESTSETUP can only be used in the first snippet of a document." + ); + } + setupCurrent(snippet); + current.println("---"); + current.println("setup:"); + if (snippet.setup != null) { + setup(snippet); + } + body(snippet, true); + } + + private void setup(final Snippet snippet) { + // insert a setup defined outside of the docs + for (final String name : snippet.setup.split(",")) { + final String setup = setups.get(name); + if (setup == null) { + throw new InvalidUserDataException("Couldn't find named setup " + name + " for " + snippet); + } + current.println("# Named setup " + name); + current.println(setup); + } + } + + public void checkUnconverted() { + List listedButNotFound = new ArrayList<>(); + for (String listed : expectedUnconvertedCandidates) { + if (false == unconvertedCandidates.remove(listed)) { + listedButNotFound.add(listed); + } + } + String message = ""; + if (false == listedButNotFound.isEmpty()) { + Collections.sort(listedButNotFound); + listedButNotFound = listedButNotFound.stream().map(notfound -> " " + notfound).collect(Collectors.toList()); + message += "Expected unconverted snippets but none found in:\n"; + message += listedButNotFound.stream().collect(Collectors.joining("\n")); + } + if (false == unconvertedCandidates.isEmpty()) { + List foundButNotListed = new ArrayList<>(unconvertedCandidates); + Collections.sort(foundButNotListed); + foundButNotListed = foundButNotListed.stream().map(f -> " " + f).collect(Collectors.toList()); + if (false == "".equals(message)) { + message += "\n"; + } + message += "Unexpected unconverted snippets:\n"; + message += foundButNotListed.stream().collect(Collectors.joining("\n")); + } + if (false == "".equals(message)) { + throw new InvalidUserDataException(message); + } + } + + public void finishLastTest() { + if (current != null) { + current.close(); + current = null; + } + } + } + +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/Snippet.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/Snippet.java new file mode 100644 index 0000000000000..b8aa864734f44 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/Snippet.java @@ -0,0 +1,188 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.doc; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.core.JsonParser; + +import org.gradle.api.InvalidUserDataException; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +public class Snippet { + static final int NOT_FINISHED = -1; + + /** + * Path to the file containing this snippet. Relative to docs.dir of the + * SnippetsTask that created it. + */ + Path path; + int start; + int end = NOT_FINISHED; + public String contents; + + Boolean console = null; + boolean test = false; + boolean testResponse = false; + boolean testSetup = false; + boolean testTearDown = false; + String skip = null; + boolean continued = false; + String language = null; + String catchPart = null; + String setup = null; + String teardown = null; + boolean curl; + List warnings = new ArrayList(); + boolean skipShardsFailures = false; + String name; + + public Snippet(Path path, int start, String name) { + this.path = path; + this.start = start; + this.name = name; + } + + public void validate() { + if (language == null) { + throw new InvalidUserDataException( + name + + ": " + + "Snippet missing a language. This is required by " + + "Elasticsearch's doc testing infrastructure so we " + + "be sure we don't accidentally forget to test a " + + "snippet." + ); + } + assertValidCurlInput(); + assertValidJsonInput(); + } + + String getLocation() { + return path + "[" + start + ":" + end + "]"; + } + + private void assertValidCurlInput() { + // Try to detect snippets that contain `curl` + if ("sh".equals(language) || "shell".equals(language)) { + curl = contents.contains("curl"); + if (console == Boolean.FALSE && curl == false) { + throw new InvalidUserDataException(name + ": " + "No need for NOTCONSOLE if snippet doesn't " + "contain `curl`."); + } + } + } + + private void assertValidJsonInput() { + if (testResponse && ("js" == language || "console-result" == language) && null == skip) { + String quoted = contents + // quote values starting with $ + .replaceAll("([:,])\\s*(\\$[^ ,\\n}]+)", "$1 \"$2\"") + // quote fields starting with $ + .replaceAll("(\\$[^ ,\\n}]+)\\s*:", "\"$1\":"); + + JsonFactory jf = new JsonFactory(); + jf.configure(JsonParser.Feature.ALLOW_BACKSLASH_ESCAPING_ANY_CHARACTER, true); + JsonParser jsonParser; + + try { + jsonParser = jf.createParser(quoted); + while (jsonParser.isClosed() == false) { + jsonParser.nextToken(); + } + } catch (JsonParseException e) { + throw new InvalidUserDataException( + "Invalid json in " + + name + + ". The error is:\n" + + e.getMessage() + + ".\n" + + "After substitutions and munging, the json looks like:\n" + + quoted, + e + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + @Override + public String toString() { + String result = path + "[" + start + ":" + end + "]"; + if (language != null) { + result += "(" + language + ")"; + } + if (console != null) { + result += console ? "// CONSOLE" : "// NOTCONSOLE"; + } + if (test) { + result += "// TEST"; + if (catchPart != null) { + result += "[catch: " + catchPart + "]"; + } + if (skip != null) { + result += "[skip=" + skip + "]"; + } + if (continued) { + result += "[continued]"; + } + if (setup != null) { + result += "[setup:" + setup + "]"; + } + if (teardown != null) { + result += "[teardown:" + teardown + "]"; + } + for (String warning : warnings) { + result += "[warning:" + warning + "]"; + } + if (skipShardsFailures) { + result += "[skip_shard_failures]"; + } + } + if (testResponse) { + result += "// TESTRESPONSE"; + if (skip != null) { + result += "[skip=" + skip + "]"; + } + } + if (testSetup) { + result += "// TESTSETUP"; + } + if (curl) { + result += "(curl)"; + } + return result; + } + + /** + * Is this snippet a candidate for conversion to `// CONSOLE`? + */ + boolean isConsoleCandidate() { + /* Snippets that are responses or already marked as `// CONSOLE` or + * `// NOTCONSOLE` are not candidates. */ + if (console != null || testResponse) { + return false; + } + /* js snippets almost always should be marked with `// CONSOLE`. js + * snippets that shouldn't be marked `// CONSOLE`, like examples for + * js client, should always be marked with `// NOTCONSOLE`. + * + * `sh` snippets that contain `curl` almost always should be marked + * with `// CONSOLE`. In the exceptionally rare cases where they are + * not communicating with Elasticsearch, like the examples in the ec2 + * and gce discovery plugins, the snippets should be marked + * `// NOTCONSOLE`. */ + return language.equals("js") || curl; + } + +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/SnippetParser.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/SnippetParser.java new file mode 100644 index 0000000000000..064c1c460febf --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/SnippetParser.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.doc; + +import java.io.File; +import java.util.List; +import java.util.Map; + +public interface SnippetParser { + List parseDoc(File rootDir, File docFile, List> substitutions); +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/Source.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/Source.java new file mode 100644 index 0000000000000..b7f2f01aa7987 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/doc/Source.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.doc; + +public final class Source { + boolean matches; + String language; + String name; + + public Source(boolean matches, String language, String name) { + this.matches = matches; + this.language = language; + this.name = name; + } +} diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/doc/AsciidocParserSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/doc/AsciidocParserSpec.groovy new file mode 100644 index 0000000000000..b7ac363ef7ad3 --- /dev/null +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/doc/AsciidocParserSpec.groovy @@ -0,0 +1,184 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.doc; + +import spock.lang.Specification +import spock.lang.Unroll + +import org.gradle.api.InvalidUserDataException + +import static org.elasticsearch.gradle.internal.doc.AsciidocSnippetParser.finalizeSnippet; +import static org.elasticsearch.gradle.internal.doc.AsciidocSnippetParser.matchSource; + +class AsciidocParserSpec extends Specification { + + def testMatchSource() { + expect: + with(matchSource("[source,console]")) { + matches == true + language == "console" + name == null + } + + with(matchSource("[source,console,id=snippet-name-1]")) { + matches == true + language == "console" + name == "snippet-name-1" + } + + with(matchSource("[source, console, id=snippet-name-1]")) { + matches == true + language == "console" + name == "snippet-name-1" + } + + with(matchSource("[source, console, id=snippet-name-1]")) { + matches == true + language == "console" + name == "snippet-name-1" + } + + with(matchSource("[source,console,attr=5,id=snippet-name-1,attr2=6]")) { + matches == true + language == "console" + name == "snippet-name-1" + } + + with(matchSource("[source,console, attr=5, id=snippet-name-1, attr2=6]")) { + matches == true + language == "console" + name == "snippet-name-1" + } + + with(matchSource("[\"source\",\"console\",id=\"snippet-name-1\"]")) { + matches == true + language == "console" + name == "snippet-name-1" + } + + with(matchSource("[source,console,id=\"snippet-name-1\"]")) { + matches == true + language == "console" + name == "snippet-name-1" + } + with(matchSource("[source.merge.styled,esql]")) { + matches == true + language == "esql" + } + + with(matchSource("[source.merge.styled,foo-bar]")) { + matches == true + language == "foo-bar" + } + } + + @Unroll + def "checks for valid json for #languageParam"() { + given: + def snippet = snippet() { + language = languageParam + testResponse = true + } + def json = """{ + "name": "John Doe", + "age": 30, + "isMarried": true, + "address": { + "street": "123 Main Street", + "city": "Springfield", + "state": "IL", + "zip": "62701" + }, + "hobbies": ["Reading", "Cooking", "Traveling"] +}""" + when: + def result = finalizeSnippet(snippet, json, [:], [:].entrySet()) + then: + result != null + + when: + finalizeSnippet(snippet, "some no valid json", [:], [:].entrySet()) + then: + def e = thrown(InvalidUserDataException) + e.message.contains("Invalid json in") + + when: + snippet.skip = "true" + result = finalizeSnippet(snippet, "some no valid json", [:], [:].entrySet()) + then: + result != null + + where: + languageParam << ["js", "console-result"] + } + + def "test finalized snippet handles substitutions"() { + given: + def snippet = snippet() { + language = "console" + } + when: + finalizeSnippet(snippet, "snippet-content substDefault subst", [substDefault: "\$body"], [subst: 'substValue'].entrySet()) + then: + snippet.contents == "snippet-content \$body substValue" + } + + def snippetMustHaveLanguage() { + given: + def snippet = snippet() + when: + finalizeSnippet(snippet, "snippet-content", [:], []) + then: + def e = thrown(InvalidUserDataException) + e.message.contains("Snippet missing a language.") + } + + def testEmit() { + given: + def snippet = snippet() { + language = "console" + } + when: + finalizeSnippet(snippet, "snippet-content", [:], []) + then: + snippet.contents == "snippet-content" + } + + def testSnippetsWithCurl() { + given: + def snippet = snippet() { + language = "sh" + name = "snippet-name-1" + } + when: + finalizeSnippet(snippet, "curl substDefault subst", [:], [:].entrySet()) + then: + snippet.curl == true + } + + def "test snippets with no curl no console"() { + given: + def snippet = snippet() { + console = false + language = "shell" + } + when: + finalizeSnippet(snippet, "hello substDefault subst", [:], [:].entrySet()) + then: + def e = thrown(InvalidUserDataException) + e.message.contains("No need for NOTCONSOLE if snippet doesn't contain `curl`") + } + + Snippet snippet(Closure configClosure = {}) { + def snippet = new Snippet(new File("SomePath").toPath(), 0, "snippet-name-1") + configClosure.delegate = snippet + configClosure() + return snippet + } +} diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/doc/DocSnippetTaskSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/doc/DocSnippetTaskSpec.groovy new file mode 100644 index 0000000000000..89939645d0f9c --- /dev/null +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/doc/DocSnippetTaskSpec.groovy @@ -0,0 +1,676 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.doc + +import spock.lang.Specification +import spock.lang.TempDir + +import org.gradle.api.InvalidUserDataException +import org.gradle.testfixtures.ProjectBuilder + +class DocSnippetTaskSpec extends Specification { + + @TempDir + File tempDir + + def "handling test parsing multiple snippets per file"() { + given: + def project = ProjectBuilder.builder().build() + def task = project.tasks.register("docSnippetTask", DocSnippetTask).get() + when: + def substitutions = [] + def snippets = task.parseDocFile( + tempDir, docFile( + """ +[[mapper-annotated-text]] +=== Mapper annotated text plugin + +experimental[] + +The mapper-annotated-text plugin provides the ability to index text that is a +combination of free-text and special markup that is typically used to identify +items of interest such as people or organisations (see NER or Named Entity Recognition +tools). + + +The elasticsearch markup allows one or more additional tokens to be injected, unchanged, into the token +stream at the same position as the underlying text it annotates. + +:plugin_name: mapper-annotated-text +include::install_remove.asciidoc[] + +[[mapper-annotated-text-usage]] +==== Using the `annotated-text` field + +The `annotated-text` tokenizes text content as per the more common {ref}/text.html[`text`] field (see +"limitations" below) but also injects any marked-up annotation tokens directly into +the search index: + +[source,console] +-------------------------- +PUT my-index-000001 +{ + "mappings": { + "properties": { + "my_field": { + "type": "annotated_text" + } + } + } +} +-------------------------- + +Such a mapping would allow marked-up text eg wikipedia articles to be indexed as both text +and structured tokens. The annotations use a markdown-like syntax using URL encoding of +one or more values separated by the `&` symbol. + + +We can use the "_analyze" api to test how an example annotation would be stored as tokens +in the search index: + + +[source,js] +-------------------------- +GET my-index-000001/_analyze +{ + "field": "my_field", + "text":"Investors in [Apple](Apple+Inc.) rejoiced." +} +-------------------------- +// NOTCONSOLE + +Response: + +[source,js] +-------------------------------------------------- +{ + "tokens": [ + { + "token": "investors", + "start_offset": 0, + "end_offset": 9, + "type": "", + "position": 0 + }, + { + "token": "in", + "start_offset": 10, + "end_offset": 12, + "type": "", + "position": 1 + }, + { + "token": "Apple Inc.", <1> + "start_offset": 13, + "end_offset": 18, + "type": "annotation", + "position": 2 + }, + { + "token": "apple", + "start_offset": 13, + "end_offset": 18, + "type": "", + "position": 2 + }, + { + "token": "rejoiced", + "start_offset": 19, + "end_offset": 27, + "type": "", + "position": 3 + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +<1> Note the whole annotation token `Apple Inc.` is placed, unchanged as a single token in +the token stream and at the same position (position 2) as the text token (`apple`) it annotates. + + +We can now perform searches for annotations using regular `term` queries that don't tokenize +the provided search values. Annotations are a more precise way of matching as can be seen +in this example where a search for `Beck` will not match `Jeff Beck` : + +[source,console] +-------------------------- +# Example documents +PUT my-index-000001/_doc/1 +{ + "my_field": "[Beck](Beck) announced a new tour"<1> +} + +PUT my-index-000001/_doc/2 +{ + "my_field": "[Jeff Beck](Jeff+Beck&Guitarist) plays a strat"<2> +} + +# Example search +GET my-index-000001/_search +{ + "query": { + "term": { + "my_field": "Beck" <3> + } + } +} +-------------------------- + +<1> As well as tokenising the plain text into single words e.g. `beck`, here we +inject the single token value `Beck` at the same position as `beck` in the token stream. +<2> Note annotations can inject multiple tokens at the same position - here we inject both +the very specific value `Jeff Beck` and the broader term `Guitarist`. This enables +broader positional queries e.g. finding mentions of a `Guitarist` near to `strat`. +<3> A benefit of searching with these carefully defined annotation tokens is that a query for +`Beck` will not match document 2 that contains the tokens `jeff`, `beck` and `Jeff Beck` + +WARNING: Any use of `=` signs in annotation values eg `[Prince](person=Prince)` will +cause the document to be rejected with a parse failure. In future we hope to have a use for +the equals signs so wil actively reject documents that contain this today. + + +[[mapper-annotated-text-tips]] +==== Data modelling tips +===== Use structured and unstructured fields + +Annotations are normally a way of weaving structured information into unstructured text for +higher-precision search. + +`Entity resolution` is a form of document enrichment undertaken by specialist software or people +where references to entities in a document are disambiguated by attaching a canonical ID. +The ID is used to resolve any number of aliases or distinguish between people with the +same name. The hyperlinks connecting Wikipedia's articles are a good example of resolved +entity IDs woven into text. + +These IDs can be embedded as annotations in an annotated_text field but it often makes +sense to include them in dedicated structured fields to support discovery via aggregations: + +[source,console] +-------------------------- +PUT my-index-000001 +{ + "mappings": { + "properties": { + "my_unstructured_text_field": { + "type": "annotated_text" + }, + "my_structured_people_field": { + "type": "text", + "fields": { + "keyword" : { + "type": "keyword" + } + } + } + } + } +} +-------------------------- + +Applications would then typically provide content and discover it as follows: + +[source,console] +-------------------------- +# Example documents +PUT my-index-000001/_doc/1 +{ + "my_unstructured_text_field": "[Shay](%40kimchy) created elasticsearch", + "my_twitter_handles": ["@kimchy"] <1> +} + +GET my-index-000001/_search +{ + "query": { + "query_string": { + "query": "elasticsearch OR logstash OR kibana",<2> + "default_field": "my_unstructured_text_field" + } + }, + "aggregations": { + \t"top_people" :{ + \t "significant_terms" : { <3> +\t "field" : "my_twitter_handles.keyword" + \t } + \t} + } +} +-------------------------- + +<1> Note the `my_twitter_handles` contains a list of the annotation values +also used in the unstructured text. (Note the annotated_text syntax requires escaping). +By repeating the annotation values in a structured field this application has ensured that +the tokens discovered in the structured field can be used for search and highlighting +in the unstructured field. +<2> In this example we search for documents that talk about components of the elastic stack +<3> We use the `my_twitter_handles` field here to discover people who are significantly +associated with the elastic stack. + +===== Avoiding over-matching annotations +By design, the regular text tokens and the annotation tokens co-exist in the same indexed +field but in rare cases this can lead to some over-matching. + +The value of an annotation often denotes a _named entity_ (a person, place or company). +The tokens for these named entities are inserted untokenized, and differ from typical text +tokens because they are normally: + +* Mixed case e.g. `Madonna` +* Multiple words e.g. `Jeff Beck` +* Can have punctuation or numbers e.g. `Apple Inc.` or `@kimchy` + +This means, for the most part, a search for a named entity in the annotated text field will +not have any false positives e.g. when selecting `Apple Inc.` from an aggregation result +you can drill down to highlight uses in the text without "over matching" on any text tokens +like the word `apple` in this context: + + the apple was very juicy + +However, a problem arises if your named entity happens to be a single term and lower-case e.g. the +company `elastic`. In this case, a search on the annotated text field for the token `elastic` +may match a text document such as this: + + they fired an elastic band + +To avoid such false matches users should consider prefixing annotation values to ensure +they don't name clash with text tokens e.g. + + [elastic](Company_elastic) released version 7.0 of the elastic stack today + + + + +[[mapper-annotated-text-highlighter]] +==== Using the `annotated` highlighter + +The `annotated-text` plugin includes a custom highlighter designed to mark up search hits +in a way which is respectful of the original markup: + +[source,console] +-------------------------- +# Example documents +PUT my-index-000001/_doc/1 +{ + "my_field": "The cat sat on the [mat](sku3578)" +} + +GET my-index-000001/_search +{ + "query": { + "query_string": { + "query": "cats" + } + }, + "highlight": { + "fields": { + "my_field": { + "type": "annotated", <1> + "require_field_match": false + } + } + } +} +-------------------------- + +<1> The `annotated` highlighter type is designed for use with annotated_text fields + +The annotated highlighter is based on the `unified` highlighter and supports the same +settings but does not use the `pre_tags` or `post_tags` parameters. Rather than using +html-like markup such as `cat` the annotated highlighter uses the same +markdown-like syntax used for annotations and injects a key=value annotation where `_hit_term` +is the key and the matched search term is the value e.g. + + The [cat](_hit_term=cat) sat on the [mat](sku3578) + +The annotated highlighter tries to be respectful of any existing markup in the original +text: + +* If the search term matches exactly the location of an existing annotation then the +`_hit_term` key is merged into the url-like syntax used in the `(...)` part of the +existing annotation. +* However, if the search term overlaps the span of an existing annotation it would break +the markup formatting so the original annotation is removed in favour of a new annotation +with just the search hit information in the results. +* Any non-overlapping annotations in the original text are preserved in highlighter +selections + + +[[mapper-annotated-text-limitations]] +==== Limitations + +The annotated_text field type supports the same mapping settings as the `text` field type +but with the following exceptions: + +* No support for `fielddata` or `fielddata_frequency_filter` +* No support for `index_prefixes` or `index_phrases` indexing + +""" + ), substitutions + ) + then: + snippets*.test == [false, false, false, false, false, false, false] + snippets*.catchPart == [null, null, null, null, null, null, null] + } + + def "handling test parsing"() { + when: + def substitutions = [] + def snippets = task().parseDocFile( + tempDir, docFile( + """ +[source,console] +---- +POST logs-my_app-default/_rollover/ +---- +// TEST[s/_explain\\/1/_explain\\/1?error_trace=false/ catch:/painless_explain_error/] +""" + ), substitutions + ) + then: + snippets*.test == [true] + snippets*.catchPart == ["/painless_explain_error/"] + substitutions.size() == 1 + substitutions[0].key == "_explain\\/1" + substitutions[0].value == "_explain\\/1?error_trace=false" + + when: + substitutions = [] + snippets = task().parseDocFile( + tempDir, docFile( + """ + +[source,console] +---- +PUT _snapshot/my_hdfs_repository +{ + "type": "hdfs", + "settings": { + "uri": "hdfs://namenode:8020/", + "path": "elasticsearch/repositories/my_hdfs_repository", + "conf.dfs.client.read.shortcircuit": "true" + } +} +---- +// TEST[skip:we don't have hdfs set up while testing this] +""" + ), substitutions + ) + then: + snippets*.test == [true] + snippets*.skip == ["we don't have hdfs set up while testing this"] + } + + def "handling testresponse parsing"() { + when: + def substitutions = [] + def snippets = task().parseDocFile( + tempDir, docFile( + """ +[source,console] +---- +POST logs-my_app-default/_rollover/ +---- +// TESTRESPONSE[s/\\.\\.\\./"script_stack": \$body.error.caused_by.script_stack, "script": \$body.error.caused_by.script, "lang": \$body.error.caused_by.lang, "position": \$body.error.caused_by.position, "caused_by": \$body.error.caused_by.caused_by, "reason": \$body.error.caused_by.reason/] +""" + ), substitutions + ) + then: + snippets*.test == [false] + snippets*.testResponse == [true] + substitutions.size() == 1 + substitutions[0].key == "\\.\\.\\." + substitutions[0].value == + "\"script_stack\": \$body.error.caused_by.script_stack, \"script\": \$body.error.caused_by.script, \"lang\": \$body.error.caused_by.lang, \"position\": \$body.error.caused_by.position, \"caused_by\": \$body.error.caused_by.caused_by, \"reason\": \$body.error.caused_by.reason" + + when: + snippets = task().parseDocFile( + tempDir, docFile( + """ +[source,console] +---- +POST logs-my_app-default/_rollover/ +---- +// TESTRESPONSE[skip:no setup made for this example yet] +""" + ), [] + ) + then: + snippets*.test == [false] + snippets*.testResponse == [true] + snippets*.skip == ["no setup made for this example yet"] + + when: + substitutions = [] + snippets = task().parseDocFile( + tempDir, docFile( + """ +[source,txt] +--------------------------------------------------------------------------- +my-index-000001 0 p RELOCATING 3014 31.1mb 192.168.56.10 H5dfFeA -> -> 192.168.56.30 bGG90GE +--------------------------------------------------------------------------- +// TESTRESPONSE[non_json] +""" + ), substitutions + ) + then: + snippets*.test == [false] + snippets*.testResponse == [true] + substitutions.size() == 4 + } + + + def "handling console parsing"() { + when: + def snippets = task().parseDocFile( + tempDir, docFile( + """ +[source,console] +---- + +// $firstToken +---- +""" + ), [] + ) + then: + snippets*.console == [firstToken.equals("CONSOLE")] + + + when: + task().parseDocFile( + tempDir, docFile( + """ +[source,console] +---- +// $firstToken +// $secondToken +---- +""" + ), [] + ) + then: + def e = thrown(InvalidUserDataException) + e.message == "mapping-charfilter.asciidoc:4: Can't be both CONSOLE and NOTCONSOLE" + + when: + task().parseDocFile( + tempDir, docFile( + """ +// $firstToken +// $secondToken +""" + ), [] + ) + then: + e = thrown(InvalidUserDataException) + e.message == "mapping-charfilter.asciidoc:1: $firstToken not paired with a snippet" + + where: + firstToken << ["CONSOLE", "NOTCONSOLE"] + secondToken << ["NOTCONSOLE", "CONSOLE"] + } + + def "test parsing snippet from doc"() { + def doc = docFile( + """ +[source,console] +---- +GET /_analyze +{ + "tokenizer": "keyword", + "char_filter": [ + { + "type": "mapping", + "mappings": [ + "٠ => 0", + "١ => 1", + "٢ => 2" + ] + } + ], + "text": "My license plate is ٢٥٠١٥" +} +---- +""" + ) + def snippets = task().parseDocFile(tempDir, doc, []) + expect: + snippets*.start == [3] + snippets*.language == ["console"] + snippets*.contents == ["""GET /_analyze +{ + "tokenizer": "keyword", + "char_filter": [ + { + "type": "mapping", + "mappings": [ + "٠ => 0", + "١ => 1", + "٢ => 2" + ] + } + ], + "text": "My license plate is ٢٥٠١٥" +} +"""] + } + + def "test parsing snippet from doc2"() { + given: + def doc = docFile( + """ +[role="xpack"] +[[ml-update-snapshot]] += Update model snapshots API +++++ +Update model snapshots +++++ + +Updates certain properties of a snapshot. + +[[ml-update-snapshot-request]] +== {api-request-title} + +`POST _ml/anomaly_detectors//model_snapshots//_update` + +[[ml-update-snapshot-prereqs]] +== {api-prereq-title} + +Requires the `manage_ml` cluster privilege. This privilege is included in the +`machine_learning_admin` built-in role. + +[[ml-update-snapshot-path-parms]] +== {api-path-parms-title} + +``:: +(Required, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] + +``:: +(Required, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=snapshot-id] + +[[ml-update-snapshot-request-body]] +== {api-request-body-title} + +The following properties can be updated after the model snapshot is created: + +`description`:: +(Optional, string) A description of the model snapshot. + +`retain`:: +(Optional, Boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=retain] + + +[[ml-update-snapshot-example]] +== {api-examples-title} + +[source,console] +-------------------------------------------------- +POST +_ml/anomaly_detectors/it_ops_new_logs/model_snapshots/1491852978/_update +{ + "description": "Snapshot 1", + "retain": true +} +-------------------------------------------------- +// TEST[skip:todo] + +When the snapshot is updated, you receive the following results: +[source,js] +---- +{ + "acknowledged": true, + "model": { + "job_id": "it_ops_new_logs", + "timestamp": 1491852978000, + "description": "Snapshot 1", +... + "retain": true + } +} +---- +""" + ) + def snippets = task().parseDocFile(tempDir, doc, []) + expect: + snippets*.start == [50, 62] + snippets*.language == ["console", "js"] + snippets*.contents == ["""POST +_ml/anomaly_detectors/it_ops_new_logs/model_snapshots/1491852978/_update +{ + "description": "Snapshot 1", + "retain": true +} +""", """{ + "acknowledged": true, + "model": { + "job_id": "it_ops_new_logs", + "timestamp": 1491852978000, + "description": "Snapshot 1", +... + "retain": true + } +} +"""] + } + + + File docFile(String docContent) { + def file = tempDir.toPath().resolve("mapping-charfilter.asciidoc").toFile() + file.text = docContent + return file + } + + + private DocSnippetTask task() { + ProjectBuilder.builder().build().tasks.register("docSnippetTask", DocSnippetTask).get() + } + +} diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromDocSnippetTaskSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromDocSnippetTaskSpec.groovy new file mode 100644 index 0000000000000..6ef4726e1578a --- /dev/null +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromDocSnippetTaskSpec.groovy @@ -0,0 +1,839 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.doc + +import spock.lang.Specification +import spock.lang.TempDir + +import org.gradle.api.InvalidUserDataException +import org.gradle.testfixtures.ProjectBuilder + +import static org.elasticsearch.gradle.internal.doc.RestTestsFromDocSnippetTask.replaceBlockQuote +import static org.elasticsearch.gradle.internal.doc.RestTestsFromDocSnippetTask.shouldAddShardFailureCheck + +class RestTestsFromDocSnippetTaskSpec extends Specification { + + @TempDir + File tempDir; + + def "test simple block quote"() { + expect: + replaceBlockQuote("\"foo\": \"\"\"bort baz\"\"\"") == "\"foo\": \"bort baz\"" + } + + def "test multiple block quotes"() { + expect: + replaceBlockQuote("\"foo\": \"\"\"bort baz\"\"\", \"bar\": \"\"\"other\"\"\"") == "\"foo\": \"bort baz\", \"bar\": \"other\"" + } + + def "test escaping in block quote"() { + expect: + replaceBlockQuote("\"foo\": \"\"\"bort\" baz\"\"\"") == "\"foo\": \"bort\\\" baz\"" + replaceBlockQuote("\"foo\": \"\"\"bort\n baz\"\"\"") == "\"foo\": \"bort\\n baz\"" + } + + def "test invalid block quotes"() { + given: + String input = "\"foo\": \"\"\"bar\""; + when: + RestTestsFromDocSnippetTask.replaceBlockQuote(input); + then: + def e = thrown(InvalidUserDataException) + e.message == "Invalid block quote starting at 7 in:\n" + input + } + + def "test is doc write request"() { + expect: + shouldAddShardFailureCheck("doc-index/_search") == true + shouldAddShardFailureCheck("_cat") == false + shouldAddShardFailureCheck("_ml/datafeeds/datafeed-id/_preview") == false + } + + def "can create rest tests from docs"() { + def build = ProjectBuilder.builder().build() + given: + def task = build.tasks.create("restTestFromSnippet", RestTestsFromDocSnippetTask) +// def task = build.tasks.create("restTestFromSnippet", RestTestsFromSnippetsTask) + task.expectedUnconvertedCandidates = ["ml-update-snapshot.asciidoc", "reference/security/authorization/run-as-privilege.asciidoc"] +// + docs() + task.docs = build.fileTree(new File(tempDir, "docs")) + task.testRoot.convention(build.getLayout().buildDirectory.dir("rest-tests")); + + when: + task.getActions().forEach { it.execute(task) } + def restSpec = new File(task.getTestRoot().get().getAsFile(), "rest-api-spec/test/painless-debugging.yml") + + then: + restSpec.exists() + restSpec.text == """--- +"line_22": + - skip: + features: + - default_shards + - stash_in_key + - stash_in_path + - stash_path_replace + - warnings + - do: + raw: + method: PUT + path: "hockey/_doc/1" + refresh: "" + body: | + {"first":"johnny","last":"gaudreau","goals":[9,27,1],"assists":[17,46,0],"gp":[26,82,1]} + - is_false: _shards.failures + - do: + catch: /painless_explain_error/ + raw: + method: POST + path: "hockey/_explain/1" + error_trace: "false" + body: | + { + "query": { + "script": { + "script": "Debug.explain(doc.goals)" + } + } + } + - is_false: _shards.failures + - match: + \$body: + { + "error": { + "type": "script_exception", + "to_string": "[1, 9, 27]", + "painless_class": "org.elasticsearch.index.fielddata.ScriptDocValues.Longs", + "java_class": "org.elasticsearch.index.fielddata.ScriptDocValues\$Longs", + "script_stack": \$body.error.script_stack, "script": \$body.error.script, "lang": \$body.error.lang, "position": \$body.error.position, "caused_by": \$body.error.caused_by, "root_cause": \$body.error.root_cause, "reason": \$body.error.reason + }, + "status": 400 + } + - do: + catch: /painless_explain_error/ + raw: + method: POST + path: "hockey/_update/1" + error_trace: "false" + body: | + { + "script": "Debug.explain(ctx._source)" + } + - is_false: _shards.failures + - match: + \$body: + { + "error" : { + "root_cause": \$body.error.root_cause, + "type": "illegal_argument_exception", + "reason": "failed to execute script", + "caused_by": { + "type": "script_exception", + "to_string": \$body.error.caused_by.to_string, + "painless_class": "java.util.LinkedHashMap", + "java_class": "java.util.LinkedHashMap", + "script_stack": \$body.error.caused_by.script_stack, "script": \$body.error.caused_by.script, "lang": \$body.error.caused_by.lang, "position": \$body.error.caused_by.position, "caused_by": \$body.error.caused_by.caused_by, "reason": \$body.error.caused_by.reason + } + }, + "status": 400 + } +""" + def restSpec2 = new File(task.testRoot.get().getAsFile(), "rest-api-spec/test/ml-update-snapshot.yml") + restSpec2.exists() + restSpec2.text == """--- +"line_50": + - skip: + features: + - default_shards + - stash_in_key + - stash_in_path + - stash_path_replace + - warnings + - always_skip + reason: todo + - do: + raw: + method: POST + path: "_ml/anomaly_detectors/it_ops_new_logs/model_snapshots/1491852978/_update" + body: | + { + "description": "Snapshot 1", + "retain": true + } + - is_false: _shards.failures +""" + def restSpec3 = new File(task.testRoot.get().getAsFile(), "rest-api-spec/test/reference/sql/getting-started.yml") + restSpec3.exists() + restSpec3.text == """--- +"line_10": + - skip: + features: + - default_shards + - stash_in_key + - stash_in_path + - stash_path_replace + - warnings + - do: + raw: + method: PUT + path: "library/_bulk" + refresh: "" + body: | + {"index":{"_id": "Leviathan Wakes"}} + {"name": "Leviathan Wakes", "author": "James S.A. Corey", "release_date": "2011-06-02", "page_count": 561} + {"index":{"_id": "Hyperion"}} + {"name": "Hyperion", "author": "Dan Simmons", "release_date": "1989-05-26", "page_count": 482} + {"index":{"_id": "Dune"}} + {"name": "Dune", "author": "Frank Herbert", "release_date": "1965-06-01", "page_count": 604} + - is_false: _shards.failures + - do: + raw: + method: POST + path: "_sql" + format: "txt" + body: | + { + "query": "SELECT * FROM library WHERE release_date < '2000-01-01'" + } + - is_false: _shards.failures + - match: + \$body: + / \\s+author \\s+\\| \\s+name \\s+\\| \\s+page_count \\s+\\| \\s+release_date\\s* + ---------------\\+---------------\\+---------------\\+------------------------\\s* + Dan \\s+Simmons \\s+\\|Hyperion \\s+\\|482 \\s+\\|1989-05-26T00:00:00.000Z\\s* + Frank \\s+Herbert \\s+\\|Dune \\s+\\|604 \\s+\\|1965-06-01T00:00:00.000Z\\s*/ +""" + + def restSpec4 = new File(task.testRoot.get().getAsFile(), "rest-api-spec/test/reference/security/authorization/run-as-privilege.yml") + restSpec4.exists() + restSpec4.text == """--- +"line_51": + - skip: + features: + - default_shards + - stash_in_key + - stash_in_path + - stash_path_replace + - warnings + - do: + raw: + method: POST + path: "_security/role/my_director" + refresh: "true" + body: | + { + "cluster": ["manage"], + "indices": [ + { + "names": [ "index1", "index2" ], + "privileges": [ "manage" ] + } + ], + "run_as": [ "jacknich", "rdeniro" ], + "metadata" : { + "version" : 1 + } + } + - is_false: _shards.failures +--- +"line_114": + - skip: + features: + - default_shards + - stash_in_key + - stash_in_path + - stash_path_replace + - warnings + - do: + raw: + method: POST + path: "_security/role/my_admin_role" + refresh: "true" + body: | + { + "cluster": ["manage"], + "indices": [ + { + "names": [ "index1", "index2" ], + "privileges": [ "manage" ] + } + ], + "applications": [ + { + "application": "myapp", + "privileges": [ "admin", "read" ], + "resources": [ "*" ] + } + ], + "run_as": [ "analyst_user" ], + "metadata" : { + "version" : 1 + } + } + - is_false: _shards.failures +--- +"line_143": + - skip: + features: + - default_shards + - stash_in_key + - stash_in_path + - stash_path_replace + - warnings + - do: + raw: + method: POST + path: "_security/role/my_analyst_role" + refresh: "true" + body: | + { + "cluster": [ "monitor"], + "indices": [ + { + "names": [ "index1", "index2" ], + "privileges": ["manage"] + } + ], + "applications": [ + { + "application": "myapp", + "privileges": [ "read" ], + "resources": [ "*" ] + } + ], + "metadata" : { + "version" : 1 + } + } + - is_false: _shards.failures +--- +"line_170": + - skip: + features: + - default_shards + - stash_in_key + - stash_in_path + - stash_path_replace + - warnings + - do: + raw: + method: POST + path: "_security/user/admin_user" + refresh: "true" + body: | + { + "password": "l0ng-r4nd0m-p@ssw0rd", + "roles": [ "my_admin_role" ], + "full_name": "Eirian Zola", + "metadata": { "intelligence" : 7} + } + - is_false: _shards.failures +--- +"line_184": + - skip: + features: + - default_shards + - stash_in_key + - stash_in_path + - stash_path_replace + - warnings + - do: + raw: + method: POST + path: "_security/user/analyst_user" + refresh: "true" + body: | + { + "password": "l0nger-r4nd0mer-p@ssw0rd", + "roles": [ "my_analyst_role" ], + "full_name": "Monday Jaffe", + "metadata": { "innovation" : 8} + } + - is_false: _shards.failures +""" +} + + File docFile(String fileName, String docContent) { + def file = tempDir.toPath().resolve(fileName).toFile() + file.parentFile.mkdirs() + file.text = docContent + return file + } + + + void docs() { + docFile( + "docs/reference/sql/getting-started.asciidoc", """ +[role="xpack"] +[[sql-getting-started]] +== Getting Started with SQL + +To start using {es-sql}, create +an index with some data to experiment with: + +[source,console] +-------------------------------------------------- +PUT /library/_bulk?refresh +{"index":{"_id": "Leviathan Wakes"}} +{"name": "Leviathan Wakes", "author": "James S.A. Corey", "release_date": "2011-06-02", "page_count": 561} +{"index":{"_id": "Hyperion"}} +{"name": "Hyperion", "author": "Dan Simmons", "release_date": "1989-05-26", "page_count": 482} +{"index":{"_id": "Dune"}} +{"name": "Dune", "author": "Frank Herbert", "release_date": "1965-06-01", "page_count": 604} +-------------------------------------------------- + +And now you can execute SQL using the <>: + +[source,console] +-------------------------------------------------- +POST /_sql?format=txt +{ + "query": "SELECT * FROM library WHERE release_date < '2000-01-01'" +} +-------------------------------------------------- +// TEST[continued] + +Which should return something along the lines of: + +[source,text] +-------------------------------------------------- + author | name | page_count | release_date +---------------+---------------+---------------+------------------------ +Dan Simmons |Hyperion |482 |1989-05-26T00:00:00.000Z +Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z +-------------------------------------------------- +// TESTRESPONSE[s/\\|/\\\\|/ s/\\+/\\\\+/] +// TESTRESPONSE[non_json] + +You can also use the <>. There is a script to start it +shipped in x-pack's bin directory: + +[source,bash] +-------------------------------------------------- +\$ ./bin/elasticsearch-sql-cli +-------------------------------------------------- + +From there you can run the same query: + +[source,sqlcli] +-------------------------------------------------- +sql> SELECT * FROM library WHERE release_date < '2000-01-01'; + author | name | page_count | release_date +---------------+---------------+---------------+------------------------ +Dan Simmons |Hyperion |482 |1989-05-26T00:00:00.000Z +Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z +-------------------------------------------------- +""" + ) + docFile( + "docs/ml-update-snapshot.asciidoc", + """ +[role="xpack"] +[[ml-update-snapshot]] += Update model snapshots API +++++ +Update model snapshots +++++ + +Updates certain properties of a snapshot. + +[[ml-update-snapshot-request]] +== {api-request-title} + +`POST _ml/anomaly_detectors//model_snapshots//_update` + +[[ml-update-snapshot-prereqs]] +== {api-prereq-title} + +Requires the `manage_ml` cluster privilege. This privilege is included in the +`machine_learning_admin` built-in role. + +[[ml-update-snapshot-path-parms]] +== {api-path-parms-title} + +``:: +(Required, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] + +``:: +(Required, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=snapshot-id] + +[[ml-update-snapshot-request-body]] +== {api-request-body-title} + +The following properties can be updated after the model snapshot is created: + +`description`:: +(Optional, string) A description of the model snapshot. + +`retain`:: +(Optional, Boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=retain] + + +[[ml-update-snapshot-example]] +== {api-examples-title} + +[source,console] +-------------------------------------------------- +POST +_ml/anomaly_detectors/it_ops_new_logs/model_snapshots/1491852978/_update +{ + "description": "Snapshot 1", + "retain": true +} +-------------------------------------------------- +// TEST[skip:todo] + +When the snapshot is updated, you receive the following results: +[source,js] +---- +{ + "acknowledged": true, + "model": { + "job_id": "it_ops_new_logs", + "timestamp": 1491852978000, + "description": "Snapshot 1", +... + "retain": true + } +} +---- + +""" + ) + + docFile( + "docs/painless-debugging.asciidoc", + """ + +[[painless-debugging]] +=== Painless Debugging + +==== Debug.Explain + +Painless doesn't have a +{wikipedia}/Read%E2%80%93eval%E2%80%93print_loop[REPL] +and while it'd be nice for it to have one day, it wouldn't tell you the +whole story around debugging painless scripts embedded in Elasticsearch because +the data that the scripts have access to or "context" is so important. For now +the best way to debug embedded scripts is by throwing exceptions at choice +places. While you can throw your own exceptions +(`throw new Exception('whatever')`), Painless's sandbox prevents you from +accessing useful information like the type of an object. So Painless has a +utility method, `Debug.explain` which throws the exception for you. For +example, you can use {ref}/search-explain.html[`_explain`] to explore the +context available to a {ref}/query-dsl-script-query.html[script query]. + +[source,console] +--------------------------------------------------------- +PUT /hockey/_doc/1?refresh +{"first":"johnny","last":"gaudreau","goals":[9,27,1],"assists":[17,46,0],"gp":[26,82,1]} + +POST /hockey/_explain/1 +{ + "query": { + "script": { + "script": "Debug.explain(doc.goals)" + } + } +} +--------------------------------------------------------- +// TEST[s/_explain\\/1/_explain\\/1?error_trace=false/ catch:/painless_explain_error/] +// The test system sends error_trace=true by default for easier debugging so +// we have to override it to get a normal shaped response + +Which shows that the class of `doc.first` is +`org.elasticsearch.index.fielddata.ScriptDocValues.Longs` by responding with: + +[source,console-result] +--------------------------------------------------------- +{ + "error": { + "type": "script_exception", + "to_string": "[1, 9, 27]", + "painless_class": "org.elasticsearch.index.fielddata.ScriptDocValues.Longs", + "java_class": "org.elasticsearch.index.fielddata.ScriptDocValues\$Longs", + ... + }, + "status": 400 +} +--------------------------------------------------------- +// TESTRESPONSE[s/\\.\\.\\./"script_stack": \$body.error.script_stack, "script": \$body.error.script, "lang": \$body.error.lang, "position": \$body.error.position, "caused_by": \$body.error.caused_by, "root_cause": \$body.error.root_cause, "reason": \$body.error.reason/] + +You can use the same trick to see that `_source` is a `LinkedHashMap` +in the `_update` API: + +[source,console] +--------------------------------------------------------- +POST /hockey/_update/1 +{ + "script": "Debug.explain(ctx._source)" +} +--------------------------------------------------------- +// TEST[continued s/_update\\/1/_update\\/1?error_trace=false/ catch:/painless_explain_error/] + +The response looks like: + +[source,console-result] +--------------------------------------------------------- +{ + "error" : { + "root_cause": ..., + "type": "illegal_argument_exception", + "reason": "failed to execute script", + "caused_by": { + "type": "script_exception", + "to_string": "{gp=[26, 82, 1], last=gaudreau, assists=[17, 46, 0], first=johnny, goals=[9, 27, 1]}", + "painless_class": "java.util.LinkedHashMap", + "java_class": "java.util.LinkedHashMap", + ... + } + }, + "status": 400 +} +--------------------------------------------------------- +// TESTRESPONSE[s/"root_cause": \\.\\.\\./"root_cause": \$body.error.root_cause/] +// TESTRESPONSE[s/\\.\\.\\./"script_stack": \$body.error.caused_by.script_stack, "script": \$body.error.caused_by.script, "lang": \$body.error.caused_by.lang, "position": \$body.error.caused_by.position, "caused_by": \$body.error.caused_by.caused_by, "reason": \$body.error.caused_by.reason/] +// TESTRESPONSE[s/"to_string": ".+"/"to_string": \$body.error.caused_by.to_string/] + +Once you have a class you can go to <> to see a list of +available methods. + +""" + ) + docFile( + "docs/reference/security/authorization/run-as-privilege.asciidoc", + """[role="xpack"] +[[run-as-privilege]] += Submitting requests on behalf of other users + +{es} roles support a `run_as` privilege that enables an authenticated user to +submit requests on behalf of other users. For example, if your external +application is trusted to authenticate users, {es} can authenticate the external +application and use the _run as_ mechanism to issue authorized requests as +other users without having to re-authenticate each user. + +To "run as" (impersonate) another user, the first user (the authenticating user) +must be authenticated by a mechanism that supports run-as delegation. The second +user (the `run_as` user) must be authorized by a mechanism that supports +delegated run-as lookups by username. + +The `run_as` privilege essentially operates like a secondary form of +<>. Delegated authorization applies +to the authenticating user, and the `run_as` privilege applies to the user who +is being impersonated. + +Authenticating user:: +-- +For the authenticating user, the following realms (plus API keys) all support +`run_as` delegation: `native`, `file`, Active Directory, JWT, Kerberos, LDAP and +PKI. + +Service tokens, the {es} Token Service, SAML 2.0, and OIDC 1.0 do not +support `run_as` delegation. +-- + +`run_as` user:: +-- +{es} supports `run_as` for any realm that supports user lookup. +Not all realms support user lookup. Refer to the list of <> +and ensure that the realm you wish to use is configured in a manner that +supports user lookup. + +The `run_as` user must be retrieved from a <> - it is not +possible to run as a +<>, +<> or +<>. +-- + +To submit requests on behalf of other users, you need to have the `run_as` +privilege in your <>. For example, the following request +creates a `my_director` role that grants permission to submit request on behalf +of `jacknich` or `redeniro`: + +[source,console] +---- +POST /_security/role/my_director?refresh=true +{ + "cluster": ["manage"], + "indices": [ + { + "names": [ "index1", "index2" ], + "privileges": [ "manage" ] + } + ], + "run_as": [ "jacknich", "rdeniro" ], + "metadata" : { + "version" : 1 + } +} +---- + +To submit a request as another user, you specify the user in the +`es-security-runas-user` request header. For example: + +[source,sh] +---- +curl -H "es-security-runas-user: jacknich" -u es-admin -X GET http://localhost:9200/ +---- + +The `run_as` user passed in through the `es-security-runas-user` header must be +available from a realm that supports delegated authorization lookup by username. +Realms that don't support user lookup can't be used by `run_as` delegation from +other realms. + +For example, JWT realms can authenticate external users specified in JWTs, and +execute requests as a `run_as` user in the `native` realm. {es} will retrieve the +indicated `runas` user and execute the request as that user using their roles. + +[[run-as-privilege-apply]] +== Apply the `run_as` privilege to roles +You can apply the `run_as` privilege when creating roles with the +<>. Users who are assigned +a role that contains the `run_as` privilege inherit all privileges from their +role, and can also submit requests on behalf of the indicated users. + +NOTE: Roles for the authenticated user and the `run_as` user are not merged. If +a user authenticates without specifying the `run_as` parameter, only the +authenticated user's roles are used. If a user authenticates and their roles +include the `run_as` parameter, only the `run_as` user's roles are used. + +After a user successfully authenticates to {es}, an authorization process determines whether the user behind an incoming request is allowed to run +that request. If the authenticated user has the `run_as` privilege in their list +of permissions and specifies the run-as header, {es} _discards_ the authenticated +user and associated roles. It then looks in each of the configured realms in the +realm chain until it finds the username that's associated with the `run_as` user, +and uses those roles to execute any requests. + +Consider an admin role and an analyst role. The admin role has higher privileges, +but might also want to submit requests as another user to test and verify their +permissions. + +First, we'll create an admin role named `my_admin_role`. This role has `manage` +<> on the entire cluster, and on a subset of +indices. This role also contains the `run_as` privilege, which enables any user +with this role to submit requests on behalf of the specified `analyst_user`. + +[source,console] +---- +POST /_security/role/my_admin_role?refresh=true +{ + "cluster": ["manage"], + "indices": [ + { + "names": [ "index1", "index2" ], + "privileges": [ "manage" ] + } + ], + "applications": [ + { + "application": "myapp", + "privileges": [ "admin", "read" ], + "resources": [ "*" ] + } + ], + "run_as": [ "analyst_user" ], + "metadata" : { + "version" : 1 + } +} +---- + +Next, we'll create an analyst role named `my_analyst_role`, which has more +restricted `monitor` cluster privileges and `manage` privileges on a subset of +indices. + +[source,console] +---- +POST /_security/role/my_analyst_role?refresh=true +{ + "cluster": [ "monitor"], + "indices": [ + { + "names": [ "index1", "index2" ], + "privileges": ["manage"] + } + ], + "applications": [ + { + "application": "myapp", + "privileges": [ "read" ], + "resources": [ "*" ] + } + ], + "metadata" : { + "version" : 1 + } +} +---- + +We'll create an administrator user and assign them the role named `my_admin_role`, +which allows this user to submit requests as the `analyst_user`. + +[source,console] +---- +POST /_security/user/admin_user?refresh=true +{ + "password": "l0ng-r4nd0m-p@ssw0rd", + "roles": [ "my_admin_role" ], + "full_name": "Eirian Zola", + "metadata": { "intelligence" : 7} +} +---- + +We can also create an analyst user and assign them the role named +`my_analyst_role`. + +[source,console] +---- +POST /_security/user/analyst_user?refresh=true +{ + "password": "l0nger-r4nd0mer-p@ssw0rd", + "roles": [ "my_analyst_role" ], + "full_name": "Monday Jaffe", + "metadata": { "innovation" : 8} +} +---- + +You can then authenticate to {es} as the `admin_user` or `analyst_user`. However, the `admin_user` could optionally submit requests on +behalf of the `analyst_user`. The following request authenticates to {es} with a +`Basic` authorization token and submits the request as the `analyst_user`: + +[source,sh] +---- +curl -s -X GET -H "Authorization: Basic YWRtaW5fdXNlcjpsMG5nLXI0bmQwbS1wQHNzdzByZA==" -H "es-security-runas-user: analyst_user" https://localhost:9200/_security/_authenticate +---- + +The response indicates that the `analyst_user` submitted this request, using the +`my_analyst_role` that's assigned to that user. When the `admin_user` submitted +the request, {es} authenticated that user, discarded their roles, and then used +the roles of the `run_as` user. + +[source,sh] +---- +{"username":"analyst_user","roles":["my_analyst_role"],"full_name":"Monday Jaffe","email":null, +"metadata":{"innovation":8},"enabled":true,"authentication_realm":{"name":"native", +"type":"native"},"lookup_realm":{"name":"native","type":"native"},"authentication_type":"realm"} +% +---- + +The `authentication_realm` and `lookup_realm` in the response both specify +the `native` realm because both the `admin_user` and `analyst_user` are from +that realm. If the two users are in different realms, the values for +`authentication_realm` and `lookup_realm` are different (such as `pki` and +`native`). +""" + ) + + } +} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/RestTestFromSnippetsTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/RestTestFromSnippetsTaskTests.java deleted file mode 100644 index 534134e78d40b..0000000000000 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/RestTestFromSnippetsTaskTests.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.gradle.internal.doc; - -import org.gradle.api.InvalidUserDataException; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import static org.elasticsearch.gradle.internal.doc.RestTestsFromSnippetsTask.replaceBlockQuote; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class RestTestFromSnippetsTaskTests { - @Rule - public ExpectedException expectedEx = ExpectedException.none(); - - @Test - public void testInvalidBlockQuote() { - String input = "\"foo\": \"\"\"bar\""; - expectedEx.expect(InvalidUserDataException.class); - expectedEx.expectMessage("Invalid block quote starting at 7 in:\n" + input); - replaceBlockQuote(input); - } - - @Test - public void testSimpleBlockQuote() { - assertEquals("\"foo\": \"bort baz\"", replaceBlockQuote("\"foo\": \"\"\"bort baz\"\"\"")); - } - - @Test - public void testMultipleBlockQuotes() { - assertEquals( - "\"foo\": \"bort baz\", \"bar\": \"other\"", - replaceBlockQuote("\"foo\": \"\"\"bort baz\"\"\", \"bar\": \"\"\"other\"\"\"") - ); - } - - @Test - public void testEscapingInBlockQuote() { - assertEquals("\"foo\": \"bort\\\" baz\"", replaceBlockQuote("\"foo\": \"\"\"bort\" baz\"\"\"")); - assertEquals("\"foo\": \"bort\\n baz\"", replaceBlockQuote("\"foo\": \"\"\"bort\n baz\"\"\"")); - } - - @Test - public void testIsDocWriteRequest() { - assertTrue((boolean) RestTestsFromSnippetsTask.shouldAddShardFailureCheck("doc-index/_search")); - assertFalse((boolean) RestTestsFromSnippetsTask.shouldAddShardFailureCheck("_cat")); - assertFalse((boolean) RestTestsFromSnippetsTask.shouldAddShardFailureCheck("_ml/datafeeds/datafeed-id/_preview")); - } -} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/SnippetsTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/SnippetsTaskTests.java deleted file mode 100644 index 0acae6ca03297..0000000000000 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/SnippetsTaskTests.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.gradle.internal.doc; - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -public class SnippetsTaskTests { - - @Test - public void testMatchSource() { - SnippetsTask.Source source = SnippetsTask.matchSource("[source,console]"); - assertTrue(source.getMatches()); - assertEquals("console", source.getLanguage()); - assertNull(source.getName()); - - source = SnippetsTask.matchSource("[source,console,id=snippet-name-1]"); - assertTrue(source.getMatches()); - assertEquals("console", source.getLanguage()); - assertEquals("snippet-name-1", source.getName()); - - source = SnippetsTask.matchSource("[source, console, id=snippet-name-1]"); - assertTrue(source.getMatches()); - assertEquals("console", source.getLanguage()); - assertEquals("snippet-name-1", source.getName()); - - source = SnippetsTask.matchSource("[source,console,attr=5,id=snippet-name-1,attr2=6]"); - assertTrue(source.getMatches()); - assertEquals("console", source.getLanguage()); - assertEquals("snippet-name-1", source.getName()); - - source = SnippetsTask.matchSource("[source,console, attr=5, id=snippet-name-1, attr2=6]"); - assertTrue(source.getMatches()); - assertEquals("console", source.getLanguage()); - assertEquals("snippet-name-1", source.getName()); - - source = SnippetsTask.matchSource("[\"source\",\"console\",id=\"snippet-name-1\"]"); - assertTrue(source.getMatches()); - assertEquals("console", source.getLanguage()); - assertEquals("snippet-name-1", source.getName()); - - source = SnippetsTask.matchSource("[source,console,id=\"snippet-name-1\"]"); - assertTrue(source.getMatches()); - assertEquals("console", source.getLanguage()); - assertEquals("snippet-name-1", source.getName()); - - source = SnippetsTask.matchSource("[source.merge.styled,esql]"); - assertTrue(source.getMatches()); - assertEquals("esql", source.getLanguage()); - - source = SnippetsTask.matchSource("[source.merge.styled,foo-bar]"); - assertTrue(source.getMatches()); - assertEquals("foo-bar", source.getLanguage()); - } -}