diff --git a/.ci/jobs.t/elastic+elasticsearch+{branch}+docker-test-ramdisk.yml b/.ci/jobs.t/elastic+elasticsearch+{branch}+docker-test-ramdisk.yml deleted file mode 100644 index d920109bdbd90..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+{branch}+docker-test-ramdisk.yml +++ /dev/null @@ -1,11 +0,0 @@ -- job: - name: elastic+elasticsearch+%BRANCH%+docker-test-ramdisk - workspace: /dev/shm/elastic+elasticsearch+%BRANCH%+docker-test - display-name: "elastic / elasticsearch # %BRANCH% - docker test ramdisk (experimental)" - description: "Experimental test job to shake out issues with Docker in CI\n" - node: "(debian-10 || sles-12 || debian-8 || sles-15) && immutable" - triggers: [] - builders: - - shell: | - #!/usr/local/bin/runbld --redirect-stderr --last-good-commit elastic+elasticsearch+%BRANCH%+git+push - .ci/build.sh -Dbwc.checkout.align=true -p test/fixtures composeUp diff --git a/.eclipseformat.xml b/.eclipseformat.xml index ebda0af1503e5..9e913b41926a8 100644 --- a/.eclipseformat.xml +++ b/.eclipseformat.xml @@ -62,7 +62,7 @@ - + @@ -129,7 +129,7 @@ - + @@ -242,7 +242,7 @@ - + diff --git a/.gitignore b/.gitignore index e3f75ba3d1fb4..ec394ab09063a 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,8 @@ *.ipr *.iws build-idea/ +# These files are generated in the main tree by IntelliJ +benchmarks/src/main/generated/* # eclipse files .project @@ -42,7 +44,7 @@ html_docs /tmp/ eclipse-build -# projects using testfixtures +# projects using testfixtures testfixtures_shared/ # These are generated from .ci/jobs.t diff --git a/build.gradle b/build.gradle index b03bb7e5a3794..343ee3df31452 100644 --- a/build.gradle +++ b/build.gradle @@ -106,6 +106,7 @@ subprojects { // is greater than the number of unformatted projects, this can be // switched to an exclude list, and eventualy removed completely. def projectPathsToFormat = [ + ':build-tools', ':x-pack:plugin:enrich' ] @@ -114,6 +115,10 @@ subprojects { spotless { java { + // Normally this isn't necessary, but we have Java sources in + // non-standard places + target '**/*.java' + removeUnusedImports() eclipse().configFile rootProject.file('.eclipseformat.xml') trimTrailingWhitespace() diff --git a/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java b/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java index 27cde3b6e1bf7..8895e4f6077f6 100644 --- a/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java +++ b/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java @@ -65,14 +65,14 @@ public static void main(String[] args) throws Exception { } Path inputDir = Paths.get(args[0]); - try (Reaper reaper = new Reaper(inputDir)){ + try (Reaper reaper = new Reaper(inputDir)) { System.in.read(); reaper.reap(); } } private void reap() { - try (Stream stream = Files.list(inputDir)){ + try (Stream stream = Files.list(inputDir)) { final List inputFiles = stream.filter(p -> p.getFileName().toString().endsWith(".cmd")).collect(Collectors.toList()); for (Path inputFile : inputFiles) { @@ -118,7 +118,7 @@ private void delete(Path toDelete) { @Override public void close() { if (failed == false) { - try (Stream stream = Files.walk(inputDir)){ + try (Stream stream = Files.walk(inputDir)) { stream.sorted(Comparator.reverseOrder()).forEach(this::delete); } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/BwcVersions.java b/buildSrc/src/main/java/org/elasticsearch/gradle/BwcVersions.java index b5f1aa5f551dc..7c272ac35c667 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/BwcVersions.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/BwcVersions.java @@ -109,15 +109,20 @@ public BwcVersions(List versionLines) { } protected BwcVersions(List versionLines, Version currentVersionProperty) { - this(versionLines.stream() - .map(LINE_PATTERN::matcher) - .filter(Matcher::matches) - .map(match -> new Version( - Integer.parseInt(match.group(1)), - Integer.parseInt(match.group(2)), - Integer.parseInt(match.group(3)) - )) - .collect(Collectors.toCollection(TreeSet::new)), currentVersionProperty); + this( + versionLines.stream() + .map(LINE_PATTERN::matcher) + .filter(Matcher::matches) + .map( + match -> new Version( + Integer.parseInt(match.group(1)), + Integer.parseInt(match.group(2)), + Integer.parseInt(match.group(3)) + ) + ) + .collect(Collectors.toCollection(TreeSet::new)), + currentVersionProperty + ); } // for testkit tests, until BwcVersions is extracted into an extension @@ -140,8 +145,10 @@ public BwcVersions(SortedSet allVersions, Version currentVersionPropert Map unreleased = new HashMap<>(); for (Version unreleasedVersion : getUnreleased()) { - unreleased.put(unreleasedVersion, - new UnreleasedVersionInfo(unreleasedVersion, getBranchFor(unreleasedVersion), getGradleProjectPathFor(unreleasedVersion))); + unreleased.put( + unreleasedVersion, + new UnreleasedVersionInfo(unreleasedVersion, getBranchFor(unreleasedVersion), getGradleProjectPathFor(unreleasedVersion)) + ); } this.unreleased = Collections.unmodifiableMap(unreleased); } @@ -149,18 +156,18 @@ public BwcVersions(SortedSet allVersions, Version currentVersionPropert private void assertNoOlderThanTwoMajors() { Set majors = groupByMajor.keySet(); if (majors.size() != 2 && currentVersion.getMinor() != 0 && currentVersion.getRevision() != 0) { - throw new IllegalStateException( - "Expected exactly 2 majors in parsed versions but found: " + majors - ); + throw new IllegalStateException("Expected exactly 2 majors in parsed versions but found: " + majors); } } private void assertCurrentVersionMatchesParsed(Version currentVersionProperty) { if (currentVersionProperty.equals(currentVersion) == false) { throw new IllegalStateException( - "Parsed versions latest version does not match the one configured in build properties. " + - "Parsed latest version is " + currentVersion + " but the build has " + - currentVersionProperty + "Parsed versions latest version does not match the one configured in build properties. " + + "Parsed latest version is " + + currentVersion + + " but the build has " + + currentVersionProperty ); } } @@ -175,12 +182,7 @@ public UnreleasedVersionInfo unreleasedInfo(Version version) { public void forPreviousUnreleased(Consumer consumer) { List collect = getUnreleased().stream() .filter(version -> version.equals(currentVersion) == false) - .map(version -> new UnreleasedVersionInfo( - version, - getBranchFor(version), - getGradleProjectPathFor(version) - ) - ) + .map(version -> new UnreleasedVersionInfo(version, getBranchFor(version), getGradleProjectPathFor(version))) .collect(Collectors.toList()); collect.forEach(uvi -> consumer.accept(uvi)); @@ -196,22 +198,18 @@ private String getGradleProjectPathFor(Version version) { Map> releasedMajorGroupedByMinor = getReleasedMajorGroupedByMinor(); if (version.getRevision() == 0) { - List unreleasedStagedOrMinor = getUnreleased().stream() - .filter(v -> v.getRevision() == 0) - .collect(Collectors.toList()); + List unreleasedStagedOrMinor = getUnreleased().stream().filter(v -> v.getRevision() == 0).collect(Collectors.toList()); if (unreleasedStagedOrMinor.size() > 2) { if (unreleasedStagedOrMinor.get(unreleasedStagedOrMinor.size() - 2).equals(version)) { return ":distribution:bwc:minor"; - } else{ + } else { return ":distribution:bwc:staged"; } } else { return ":distribution:bwc:minor"; } } else { - if (releasedMajorGroupedByMinor - .getOrDefault(version.getMinor(), emptyList()) - .contains(version)) { + if (releasedMajorGroupedByMinor.getOrDefault(version.getMinor(), emptyList()).contains(version)) { return ":distribution:bwc:bugfix"; } else { return ":distribution:bwc:maintenance"; @@ -229,7 +227,7 @@ private String getBranchFor(Version version) { return "master"; case ":distribution:bwc:minor": // The .x branch will always point to the latest minor (for that major), so a "minor" project will be on the .x branch - // unless there is more recent (higher) minor. + // unless there is more recent (higher) minor. final Version latestInMajor = getLatestVersionByKey(groupByMajor, version.getMajor()); if (latestInMajor.getMinor() == version.getMinor()) { return version.getMajor() + ".x"; @@ -279,23 +277,16 @@ public List getUnreleased() { } } - return unmodifiableList( - unreleased.stream() - .sorted() - .distinct() - .collect(Collectors.toList()) - ); + return unmodifiableList(unreleased.stream().sorted().distinct().collect(Collectors.toList())); } private Version getLatestInMinor(int major, int minor) { - return groupByMajor.get(major).stream() - .filter(v -> v.getMinor() == minor) - .max(Version::compareTo) - .orElse(null); + return groupByMajor.get(major).stream().filter(v -> v.getMinor() == minor).max(Version::compareTo).orElse(null); } private Version getLatestVersionByKey(Map> groupByMajor, int key) { - return groupByMajor.getOrDefault(key, emptyList()).stream() + return groupByMajor.getOrDefault(key, emptyList()) + .stream() .max(Version::compareTo) .orElseThrow(() -> new IllegalStateException("Unexpected number of versions in collection")); } @@ -307,11 +298,9 @@ private Map> getReleasedMajorGroupedByMinor() { final Map> groupByMinor; if (currentMajorVersions.size() == 1) { // Current is an unreleased major: x.0.0 so we have to look for other unreleased versions in the previous major - groupByMinor = previousMajorVersions.stream() - .collect(Collectors.groupingBy(Version::getMinor, Collectors.toList())); + groupByMinor = previousMajorVersions.stream().collect(Collectors.groupingBy(Version::getMinor, Collectors.toList())); } else { - groupByMinor = currentMajorVersions.stream() - .collect(Collectors.groupingBy(Version::getMinor, Collectors.toList())); + groupByMinor = currentMajorVersions.stream().collect(Collectors.groupingBy(Version::getMinor, Collectors.toList())); } return groupByMinor; } @@ -321,8 +310,9 @@ public void compareToAuthoritative(List authoritativeReleasedVersions) notReallyReleased.removeAll(authoritativeReleasedVersions); if (notReallyReleased.isEmpty() == false) { throw new IllegalStateException( - "out-of-date released versions" + - "\nFollowing versions are not really released, but the build thinks they are: " + notReallyReleased + "out-of-date released versions" + + "\nFollowing versions are not really released, but the build thinks they are: " + + notReallyReleased ); } @@ -330,17 +320,19 @@ public void compareToAuthoritative(List authoritativeReleasedVersions) incorrectlyConsideredUnreleased.retainAll(getUnreleased()); if (incorrectlyConsideredUnreleased.isEmpty() == false) { throw new IllegalStateException( - "out-of-date released versions" + - "\nBuild considers versions unreleased, " + - "but they are released according to an authoritative source: " + incorrectlyConsideredUnreleased + - "\nThe next versions probably needs to be added to Version.java (CURRENT doesn't count)." + "out-of-date released versions" + + "\nBuild considers versions unreleased, " + + "but they are released according to an authoritative source: " + + incorrectlyConsideredUnreleased + + "\nThe next versions probably needs to be added to Version.java (CURRENT doesn't count)." ); } } private List getReleased() { List unreleased = getUnreleased(); - return groupByMajor.values().stream() + return groupByMajor.values() + .stream() .flatMap(Collection::stream) .filter(each -> unreleased.contains(each) == false) .collect(Collectors.toList()); @@ -348,10 +340,7 @@ private List getReleased() { public List getIndexCompatible() { return unmodifiableList( - Stream.concat( - groupByMajor.get(currentVersion.getMajor() - 1).stream(), - groupByMajor.get(currentVersion.getMajor()).stream() - ) + Stream.concat(groupByMajor.get(currentVersion.getMajor() - 1).stream(), groupByMajor.get(currentVersion.getMajor()).stream()) .collect(Collectors.toList()) ); } @@ -361,10 +350,7 @@ public List getWireCompatible() { List prevMajors = groupByMajor.get(currentVersion.getMajor() - 1); int minor = prevMajors.get(prevMajors.size() - 1).getMinor(); - for (int i = prevMajors.size() - 1; - i > 0 && prevMajors.get(i).getMinor() == minor; - i-- - ) { + for (int i = prevMajors.size() - 1; i > 0 && prevMajors.get(i).getMinor() == minor; i--) { wireCompat.add(prevMajors.get(i)); } wireCompat.addAll(groupByMajor.get(currentVersion.getMajor())); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/ConcatFilesTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/ConcatFilesTask.java index 7da6381cc2a24..ac5f030e7e8a4 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/ConcatFilesTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/ConcatFilesTask.java @@ -55,7 +55,9 @@ public void setFiles(FileTree files) { } @InputFiles - public FileTree getFiles() { return files; } + public FileTree getFiles() { + return files; + } public void setHeaderLine(String headerLine) { this.headerLine = headerLine; @@ -63,7 +65,9 @@ public void setHeaderLine(String headerLine) { @Input @Optional - public String getHeaderLine() { return headerLine; } + public String getHeaderLine() { + return headerLine; + } public void setTarget(File target) { this.target = target; @@ -77,10 +81,7 @@ public File getTarget() { @TaskAction public void concatFiles() throws IOException { if (getHeaderLine() != null) { - Files.write( - getTarget().toPath(), - (getHeaderLine() + '\n').getBytes(StandardCharsets.UTF_8) - ); + Files.write(getTarget().toPath(), (getHeaderLine() + '\n').getBytes(StandardCharsets.UTF_8)); } // To remove duplicate lines @@ -88,9 +89,7 @@ public void concatFiles() throws IOException { for (File f : getFiles()) { uniqueLines.addAll(Files.readAllLines(f.toPath(), StandardCharsets.UTF_8)); } - Files.write( - getTarget().toPath(), uniqueLines, StandardCharsets.UTF_8, StandardOpenOption.APPEND - ); + Files.write(getTarget().toPath(), uniqueLines, StandardCharsets.UTF_8, StandardOpenOption.APPEND); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java index 861d5afcaf65c..1a520341f12b1 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java @@ -105,8 +105,10 @@ void setupDistributions(Project project) { if (distribution.getType().shouldExtract()) { // for the distribution extracted, add a root level task that does the extraction, and depend on that // extracted configuration as an artifact consisting of the extracted distribution directory - dependencies.add(distribution.getExtracted().configuration.getName(), - projectDependency(project, ":", configName("extracted_elasticsearch", distribution))); + dependencies.add( + distribution.getExtracted().configuration.getName(), + projectDependency(project, ":", configName("extracted_elasticsearch", distribution)) + ); // ensure a root level download task exists setupRootDownload(project.getRootProject(), distribution); } @@ -139,7 +141,7 @@ private void setupRootDownload(Project rootProject, ElasticsearchDistribution di TaskProvider extractTask = rootProject.getTasks().register(extractTaskName, Sync.class, syncTask -> { syncTask.dependsOn(downloadConfig); syncTask.into(extractDir); - syncTask.from((Callable)() -> { + syncTask.from((Callable) () -> { File archiveFile = archiveGetter.get(); String archivePath = archiveFile.toString(); if (archivePath.endsWith(".zip")) { @@ -150,9 +152,12 @@ private void setupRootDownload(Project rootProject, ElasticsearchDistribution di throw new IllegalStateException("unexpected file extension on [" + archivePath + "]"); }); }); - rootProject.getArtifacts().add(extractedConfigName, - rootProject.getLayout().getProjectDirectory().dir(extractDir), - artifact -> artifact.builtBy(extractTask)); + rootProject.getArtifacts() + .add( + extractedConfigName, + rootProject.getLayout().getProjectDirectory().dir(extractDir), + artifact -> artifact.builtBy(extractTask) + ); } } @@ -219,7 +224,6 @@ private Object dependencyNotation(Project project, ElasticsearchDistribution dis return "org.elasticsearch.distribution.integ-test-zip:elasticsearch:" + distribution.getVersion() + "@zip"; } - Version distroVersion = Version.fromString(distribution.getVersion()); String extension = distribution.getType().toString(); String classifier = ":x86_64"; @@ -293,9 +297,16 @@ private static String distributionProjectName(ElasticsearchDistribution distribu } private static String configName(String prefix, ElasticsearchDistribution distribution) { - return prefix + "_" + distribution.getVersion() + "_" + distribution.getType() + "_" + - (distribution.getPlatform() == null ? "" : distribution.getPlatform() + "_") - + distribution.getFlavor() + (distribution.getBundledJdk() ? "" : "_nojdk"); + return String.format( + Locale.ROOT, + "%s_%s_%s_%s%s%s", + prefix, + distribution.getVersion(), + distribution.getType(), + distribution.getPlatform() == null ? "" : distribution.getPlatform() + "_", + distribution.getFlavor(), + distribution.getBundledJdk() ? "" : "_nojdk" + ); } private static String capitalize(String s) { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java b/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java index bf482edb250dd..ff6b53fa29447 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java @@ -120,8 +120,12 @@ public String toString() { private final Property flavor; private final Property bundledJdk; - ElasticsearchDistribution(String name, ObjectFactory objectFactory, Configuration fileConfiguration, - Configuration extractedConfiguration) { + ElasticsearchDistribution( + String name, + ObjectFactory objectFactory, + Configuration fileConfiguration, + Configuration extractedConfiguration + ) { this.name = name; this.configuration = fileConfiguration; this.version = objectFactory.property(String.class).convention(VersionProperties.getElasticsearch()); @@ -188,8 +192,9 @@ public Extracted getExtracted() { case DEB: case DOCKER: case RPM: - throw new UnsupportedOperationException("distribution type [" + getType() + "] for " + - "elasticsearch distribution [" + name + "] cannot be extracted"); + throw new UnsupportedOperationException( + "distribution type [" + getType() + "] for " + "elasticsearch distribution [" + name + "] cannot be extracted" + ); default: return extracted; @@ -217,15 +222,18 @@ void finalizeValues() { if (getType() == Type.INTEG_TEST_ZIP) { if (platform.getOrNull() != null) { throw new IllegalArgumentException( - "platform not allowed for elasticsearch distribution [" + name + "] of type [integ_test_zip]"); + "platform not allowed for elasticsearch distribution [" + name + "] of type [integ_test_zip]" + ); } if (flavor.getOrNull() != null) { throw new IllegalArgumentException( - "flavor [" + flavor.get() + "] not allowed for elasticsearch distribution [" + name + "] of type [integ_test_zip]"); + "flavor [" + flavor.get() + "] not allowed for elasticsearch distribution [" + name + "] of type [integ_test_zip]" + ); } if (bundledJdk.getOrNull() != null) { throw new IllegalArgumentException( - "bundledJdk not allowed for elasticsearch distribution [" + name + "] of type [integ_test_zip]"); + "bundledJdk not allowed for elasticsearch distribution [" + name + "] of type [integ_test_zip]" + ); } return; } @@ -237,8 +245,9 @@ void finalizeValues() { } } else { // rpm, deb or docker if (platform.isPresent()) { - throw new IllegalArgumentException("platform not allowed for elasticsearch distribution [" - + name + "] of type [" + getType() + "]"); + throw new IllegalArgumentException( + "platform not allowed for elasticsearch distribution [" + name + "] of type [" + getType() + "]" + ); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java index dff60f7693bbd..4b30848ac4235 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTask.java @@ -82,8 +82,8 @@ public void setOutputDir(File outputDir) { public File copy(String resource) { if (getState().getExecuted() || getState().getExecuting()) { - throw new GradleException("buildResources can't be configured after the task ran. " + - "Make sure task is not used after configuration time" + throw new GradleException( + "buildResources can't be configured after the task ran. " + "Make sure task is not used after configuration time" ); } resources.add(resource); @@ -96,19 +96,18 @@ public void doExport() { setDidWork(false); throw new StopExecutionException(); } - resources.stream().parallel() - .forEach(resourcePath -> { - Path destination = outputDir.get().file(resourcePath).getAsFile().toPath(); - try (InputStream is = getClass().getClassLoader().getResourceAsStream(resourcePath)) { - Files.createDirectories(destination.getParent()); - if (is == null) { - throw new GradleException("Can't export `" + resourcePath + "` from build-tools: not found"); - } - Files.copy(is, destination, StandardCopyOption.REPLACE_EXISTING); - } catch (IOException e) { - throw new GradleException("Can't write resource `" + resourcePath + "` to " + destination, e); + resources.stream().parallel().forEach(resourcePath -> { + Path destination = outputDir.get().file(resourcePath).getAsFile().toPath(); + try (InputStream is = getClass().getClassLoader().getResourceAsStream(resourcePath)) { + Files.createDirectories(destination.getParent()); + if (is == null) { + throw new GradleException("Can't export `" + resourcePath + "` from build-tools: not found"); } - }); + Files.copy(is, destination, StandardCopyOption.REPLACE_EXISTING); + } catch (IOException e) { + throw new GradleException("Can't write resource `" + resourcePath + "` to " + destination, e); + } + }); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/FileSupplier.java b/buildSrc/src/main/java/org/elasticsearch/gradle/FileSupplier.java index d28afe0c41dd0..a2d85d75a75d3 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/FileSupplier.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/FileSupplier.java @@ -3,5 +3,4 @@ import java.io.File; import java.util.function.Supplier; -public interface FileSupplier extends Supplier { -} +public interface FileSupplier extends Supplier {} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java index ca429b67bdd0d..12f0a8e8e9c76 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java @@ -35,8 +35,7 @@ public class Jdk implements Buildable, Iterable { private static final List ALLOWED_VENDORS = List.of("adoptopenjdk", "openjdk"); - static final Pattern VERSION_PATTERN = - Pattern.compile("(\\d+)(\\.\\d+\\.\\d+)?\\+(\\d+(?:\\.\\d+)?)(@([a-f0-9]{32}))?"); + static final Pattern VERSION_PATTERN = Pattern.compile("(\\d+)(\\.\\d+\\.\\d+)?\\+(\\d+(?:\\.\\d+)?)(@([a-f0-9]{32}))?"); private static final List ALLOWED_PLATFORMS = Collections.unmodifiableList(Arrays.asList("darwin", "linux", "windows", "mac")); private final String name; @@ -87,7 +86,8 @@ public String getPlatform() { public void setPlatform(String platform) { if (ALLOWED_PLATFORMS.contains(platform) == false) { throw new IllegalArgumentException( - "unknown platform [" + platform + "] for jdk [" + name + "], must be one of " + ALLOWED_PLATFORMS); + "unknown platform [" + platform + "] for jdk [" + name + "], must be one of " + ALLOWED_PLATFORMS + ); } this.platform.set(platform); } @@ -135,7 +135,7 @@ void finalizeValues() { } version.finalizeValue(); platform.finalizeValue(); - vendor.finalizeValue();; + vendor.finalizeValue(); } @Override diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java index 59b2aa98d4f36..b1895727949ef 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java @@ -56,9 +56,7 @@ public class JdkDownloadPlugin implements Plugin { @Override public void apply(Project project) { - NamedDomainObjectContainer jdksContainer = project.container(Jdk.class, name -> - new Jdk(name, project) - ); + NamedDomainObjectContainer jdksContainer = project.container(Jdk.class, name -> new Jdk(name, project)); project.getExtensions().add(CONTAINER_NAME, jdksContainer); project.afterEvaluate(p -> { @@ -134,7 +132,8 @@ private static void setupRootJdkDownload(Project rootProject, String platform, S Locale.ROOT, "adoptopenjdk/OpenJDK%sU-jdk_x64_[module]_hotspot_[revision]_%s.[ext]", jdkMajor, - jdkBuild); + jdkBuild + ); ivyRepo.patternLayout(layout -> layout.artifact(pattern)); ivyRepo.content(content -> content.includeGroup("adoptopenjdk")); }); @@ -146,8 +145,11 @@ private static void setupRootJdkDownload(Project rootProject, String platform, S ivyRepo.setName(repoName); ivyRepo.setUrl("https://download.oracle.com"); ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); - ivyRepo.patternLayout(layout -> layout.artifact( - "java/GA/jdk" + jdkVersion + "/" + hash + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); + ivyRepo.patternLayout( + layout -> layout.artifact( + "java/GA/jdk" + jdkVersion + "/" + hash + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]" + ) + ); ivyRepo.content(content -> content.includeGroup("openjdk")); }); } else { @@ -156,8 +158,11 @@ private static void setupRootJdkDownload(Project rootProject, String platform, S ivyRepo.setName(repoName); ivyRepo.setUrl("https://download.oracle.com"); ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); - ivyRepo.patternLayout(layout -> - layout.artifact("java/GA/jdk" + jdkMajor + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); + ivyRepo.patternLayout( + layout -> layout.artifact( + "java/GA/jdk" + jdkMajor + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]" + ) + ); ivyRepo.content(content -> content.includeGroup("openjdk")); }); } @@ -173,15 +178,17 @@ private static void setupRootJdkDownload(Project rootProject, String platform, S jdkConfig = configurations.create(remoteConfigName); configurations.create(localConfigName); } - String platformDep = platform.equals("darwin") || platform.equals("osx") ? - (vendor.equals("adoptopenjdk") ? "mac" : "osx") : platform; + String platformDep = platform.equals("darwin") || platform.equals("osx") + ? (vendor.equals("adoptopenjdk") ? "mac" : "osx") + : platform; String extension = platform.equals("windows") ? "zip" : "tar.gz"; String jdkDep = vendor + ":" + platformDep + ":" + jdkVersion + "@" + extension; rootProject.getDependencies().add(configName(vendor, version, platform), jdkDep); // add task for extraction - final Provider extractPath = - rootProject.getLayout().getBuildDirectory().dir("jdks/" + vendor + "-" + jdkVersion + "_" + platform); + final Provider extractPath = rootProject.getLayout() + .getBuildDirectory() + .dir("jdks/" + vendor + "-" + jdkVersion + "_" + platform); // delay resolving jdkConfig until runtime Supplier jdkArchiveGetter = jdkConfig::getSingleFile; @@ -208,7 +215,8 @@ private static void setupRootJdkDownload(Project rootProject, String platform, S String[] pathSegments = details.getRelativePath().getSegments(); int index = 0; for (; index < pathSegments.length; index++) { - if (pathSegments[index].matches("jdk-.*")) break; + if (pathSegments[index].matches("jdk-.*")) + break; } assert index + 1 <= pathSegments.length; String[] newPathSegments = Arrays.copyOfRange(pathSegments, index + 1, pathSegments.length); @@ -235,39 +243,38 @@ public void execute(Task t) { extractTask = rootProject.getTasks().register(extractTaskName, SymbolicLinkPreservingUntarTask.class, task -> { task.getTarFile().set(jdkConfiguration.getSingleFile()); task.getExtractPath().set(extractPath); - task.setTransform( - name -> { - /* - * We want to remove up to the and including the jdk-.* relative paths. That is a JDK archive is structured as: - * jdk-12.0.1/ - * jdk-12.0.1/Contents - * ... - * - * and we want to remove the leading jdk-12.0.1. Note however that there could also be a leading ./ as in - * ./ - * ./jdk-12.0.1/ - * ./jdk-12.0.1/Contents - * - * so we account for this and search the path components until we find the jdk-12.0.1, and strip the leading - * components. - */ - final Path entryName = Paths.get(name); - int index = 0; - for (; index < entryName.getNameCount(); index++) { - if (entryName.getName(index).toString().matches("jdk-.*")) break; - } - if (index + 1 >= entryName.getNameCount()) { - // this happens on the top-level directories in the archive, which we are removing - return null; + task.setTransform(name -> { + /* + * We want to remove up to the and including the jdk-.* relative paths. That is a JDK archive is structured as: + * jdk-12.0.1/ + * jdk-12.0.1/Contents + * ... + * + * and we want to remove the leading jdk-12.0.1. Note however that there could also be a leading ./ as in + * ./ + * ./jdk-12.0.1/ + * ./jdk-12.0.1/Contents + * + * so we account for this and search the path components until we find the jdk-12.0.1, and strip the leading + * components. + */ + final Path entryName = Paths.get(name); + int index = 0; + for (; index < entryName.getNameCount(); index++) { + if (entryName.getName(index).toString().matches("jdk-.*")) { + break; } - // finally remove the top-level directories from the output path - return entryName.subpath(index + 1, entryName.getNameCount()); - }); + } + if (index + 1 >= entryName.getNameCount()) { + // this happens on the top-level directories in the archive, which we are removing + return null; + } + // finally remove the top-level directories from the output path + return entryName.subpath(index + 1, entryName.getNameCount()); + }); }); } - rootProject.getArtifacts().add(localConfigName, - extractPath, - artifact -> artifact.builtBy(extractTask)); + rootProject.getArtifacts().add(localConfigName, extractPath, artifact -> artifact.builtBy(extractTask)); } private static String configName(String vendor, String version, String platform) { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyMap.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyMap.java index a6e8d5cbbe8bd..dcfce3829376c 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyMap.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyMap.java @@ -110,14 +110,17 @@ public Collection values() { @Override public Set> entrySet() { - return delegate.entrySet().stream() + return delegate.entrySet() + .stream() .peek(this::validate) - .collect(Collectors.toMap(Entry::getKey, entry -> entry.getValue().getValue())).entrySet(); + .collect(Collectors.toMap(Entry::getKey, entry -> entry.getValue().getValue())) + .entrySet(); } @Override public List getNormalizedCollection() { - return delegate.values().stream() + return delegate.values() + .stream() .peek(this::validate) .filter(entry -> entry.getNormalization() != PropertyNormalization.IGNORE_VALUE) .map(entry -> normalizationMapper == null ? entry : normalizationMapper.apply(entry.getKey(), entry.getValue())) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/LoggingOutputStream.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LoggingOutputStream.java index 8a1dfe16de28f..be77c68dbc1cf 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/LoggingOutputStream.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/LoggingOutputStream.java @@ -44,7 +44,9 @@ public abstract class LoggingOutputStream extends OutputStream { @Override public void write(final int b) throws IOException { - if (b == 0) return; + if (b == 0) { + return; + } if (b == '\n') { // always flush with newlines instead of adding to the buffer flush(); @@ -69,7 +71,9 @@ public void write(final int b) throws IOException { @Override public void flush() { - if (end == start) return; + if (end == start) { + return; + } logLine(new String(buffer, start, end - start)); start = end; } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/ReaperPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/ReaperPlugin.java index 2d37e30391cfa..fb6edfc87d399 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/ReaperPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/ReaperPlugin.java @@ -38,10 +38,13 @@ public void apply(Project project) { project.getPlugins().apply(GlobalBuildInfoPlugin.class); - Path inputDir = project.getRootDir().toPath().resolve(".gradle") - .resolve("reaper").resolve("build-" + ProcessHandle.current().pid()); - ReaperService service = project.getExtensions().create("reaper", ReaperService.class, - project, project.getBuildDir().toPath(), inputDir); + Path inputDir = project.getRootDir() + .toPath() + .resolve(".gradle") + .resolve("reaper") + .resolve("build-" + ProcessHandle.current().pid()); + ReaperService service = project.getExtensions() + .create("reaper", ReaperService.class, project, project.getBuildDir().toPath(), inputDir); project.getGradle().buildFinished(result -> service.shutdown()); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/ReaperService.java b/buildSrc/src/main/java/org/elasticsearch/gradle/ReaperService.java index f60fb5b2816ac..f7e2d74c9a205 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/ReaperService.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/ReaperService.java @@ -59,8 +59,8 @@ public ReaperService(Project project, Path buildDir, Path inputDir) { */ public void registerPid(String serviceId, long pid) { String[] killPidCommand = OS.conditional() - .onWindows(() -> new String[]{"Taskill", "/F", "/PID", String.valueOf(pid)}) - .onUnix(() -> new String[]{"kill", "-9", String.valueOf(pid)}) + .onWindows(() -> new String[] { "Taskill", "/F", "/PID", String.valueOf(pid) }) + .onUnix(() -> new String[] { "kill", "-9", String.valueOf(pid) }) .supply(); registerCommand(serviceId, killPidCommand); } @@ -79,9 +79,7 @@ public void registerCommand(String serviceId, String... command) { } private Path getCmdFile(String serviceId) { - return inputDir.resolve( - serviceId.replaceAll("[^a-zA-Z0-9]","-") + ".cmd" - ); + return inputDir.resolve(serviceId.replaceAll("[^a-zA-Z0-9]", "-") + ".cmd"); } public void unregister(String serviceId) { @@ -99,8 +97,7 @@ void shutdown() { reaperProcess.getOutputStream().close(); logger.info("Waiting for reaper to exit normally"); if (reaperProcess.waitFor() != 0) { - throw new GradleException("Reaper process failed. Check log at " - + inputDir.resolve("error.log") + " for details"); + throw new GradleException("Reaper process failed. Check log at " + inputDir.resolve("error.log") + " for details"); } } catch (Exception e) { throw new RuntimeException(e); @@ -120,9 +117,12 @@ private synchronized void ensureReaperStarted() { // start the reaper ProcessBuilder builder = new ProcessBuilder( Jvm.current().getJavaExecutable().toString(), // same jvm as gradle - "-Xms4m", "-Xmx16m", // no need for a big heap, just need to read some files and execute - "-jar", jarPath.toString(), - inputDir.toString()); + "-Xms4m", + "-Xmx16m", // no need for a big heap, just need to read some files and execute + "-jar", + jarPath.toString(), + inputDir.toString() + ); logger.info("Launching reaper: " + String.join(" ", builder.command())); // be explicit for stdin, we use closing of the pipe to signal shutdown to the reaper builder.redirectInput(ProcessBuilder.Redirect.PIPE); @@ -146,12 +146,7 @@ private Path locateReaperJar() { if (matcher.matches()) { String path = matcher.group(1); - return Path.of( - OS.conditional() - .onWindows(() -> path.substring(1)) - .onUnix(() -> path) - .supply() - ); + return Path.of(OS.conditional().onWindows(() -> path.substring(1)).onUnix(() -> path).supply()); } else { throw new RuntimeException("Unable to locate " + REAPER_CLASS + " on build classpath."); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java b/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java index 52447b6bc20a1..db805b26cdca5 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java @@ -112,7 +112,7 @@ public boolean wait(int durationInMs) throws GeneralSecurityException, Interrupt ssl = null; } IOException failure = null; - for (; ; ) { + while (true) { try { checkResource(ssl); return true; @@ -161,11 +161,12 @@ private void configureSslContext(HttpURLConnection connection, SSLContext ssl) { private void configureBasicAuth(HttpURLConnection connection) { if (username != null) { if (password == null) { - throw new IllegalStateException("Basic Auth user [" + username - + "] has been set, but no password has been configured"); + throw new IllegalStateException("Basic Auth user [" + username + "] has been set, but no password has been configured"); } - connection.setRequestProperty("Authorization", - "Basic " + Base64.getEncoder().encodeToString((username + ":" + password).getBytes(StandardCharsets.UTF_8))); + connection.setRequestProperty( + "Authorization", + "Basic " + Base64.getEncoder().encodeToString((username + ":" + password).getBytes(StandardCharsets.UTF_8)) + ); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java index 07d68ca679f81..75ae735e8a9f1 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java @@ -28,6 +28,7 @@ import java.nio.file.Files; import java.util.Arrays; import java.util.List; +import java.util.Locale; import static java.nio.charset.StandardCharsets.UTF_8; @@ -115,8 +116,14 @@ public RegularFileProperty getRuntimeVersionFile() { public void generate() { String javaVendorVersion = System.getProperty("java.vendor.version", System.getProperty("java.vendor")); String gradleJavaVersion = System.getProperty("java.version"); - String gradleJavaVersionDetails = javaVendorVersion + " " + gradleJavaVersion + " [" + System.getProperty("java.vm.name") - + " " + System.getProperty("java.vm.version") + "]"; + String gradleJavaVersionDetails = javaVendorVersion + + " " + + gradleJavaVersion + + " [" + + System.getProperty("java.vm.name") + + " " + + System.getProperty("java.vm.version") + + "]"; String compilerJavaVersionDetails = gradleJavaVersionDetails; JavaVersion compilerJavaVersionEnum = JavaVersion.current(); @@ -147,21 +154,24 @@ public void generate() { } try (BufferedWriter writer = new BufferedWriter(new FileWriter(outputFile.getAsFile().get()))) { + final String osName = System.getProperty("os.name"); + final String osVersion = System.getProperty("os.version"); + final String osArch = System.getProperty("os.arch"); + final JavaVersion parsedVersion = JavaVersion.toVersion(gradleJavaVersion); + writer.write(" Gradle Version : " + getProject().getGradle().getGradleVersion() + "\n"); - writer.write(" OS Info : " + System.getProperty("os.name") + " " + System.getProperty("os.version") - + " (" + System.getProperty("os.arch") + ")\n"); + writer.write(" OS Info : " + osName + " " + osVersion + " (" + osArch + ")\n"); + if (gradleJavaVersionDetails.equals(compilerJavaVersionDetails) == false || gradleJavaVersionDetails.equals(runtimeJavaVersionDetails) == false) { writer.write(" Compiler JDK Version : " + compilerJavaVersionEnum + " (" + compilerJavaVersionDetails + ")\n"); writer.write(" Compiler java.home : " + compilerJavaHome + "\n"); writer.write(" Runtime JDK Version : " + runtimeJavaVersionEnum + " (" + runtimeJavaVersionDetails + ")\n"); writer.write(" Runtime java.home : " + runtimeJavaHome + "\n"); - writer.write(" Gradle JDK Version : " + JavaVersion.toVersion(gradleJavaVersion) - + " (" + gradleJavaVersionDetails + ")\n"); + writer.write(" Gradle JDK Version : " + parsedVersion + " (" + gradleJavaVersionDetails + ")\n"); writer.write(" Gradle java.home : " + gradleJavaHome); } else { - writer.write(" JDK Version : " + JavaVersion.toVersion(gradleJavaVersion) - + " (" + gradleJavaVersionDetails + ")\n"); + writer.write(" JDK Version : " + parsedVersion + " (" + gradleJavaVersionDetails + ")\n"); writer.write(" JAVA_HOME : " + gradleJavaHome); } } catch (IOException e) { @@ -170,14 +180,24 @@ public void generate() { // enforce Java version if (compilerJavaVersionEnum.compareTo(minimumCompilerVersion) < 0) { - String message = "The compiler java.home must be set to a JDK installation directory for Java " + minimumCompilerVersion + - " but is [" + compilerJavaHome + "] corresponding to [" + compilerJavaVersionEnum + "]"; + String message = String.format( + Locale.ROOT, + "The compiler java.home must be set to a JDK installation directory for Java %s but is [%s] " + "corresponding to [%s]", + minimumCompilerVersion, + compilerJavaHome, + compilerJavaVersionEnum + ); throw new GradleException(message); } if (runtimeJavaVersionEnum.compareTo(minimumRuntimeVersion) < 0) { - String message = "The runtime java.home must be set to a JDK installation directory for Java " + minimumRuntimeVersion + - " but is [" + runtimeJavaHome + "] corresponding to [" + runtimeJavaVersionEnum + "]"; + String message = String.format( + Locale.ROOT, + "The runtime java.home must be set to a JDK installation directory for Java %s but is [%s] " + "corresponding to [%s]", + minimumRuntimeVersion, + runtimeJavaHome, + runtimeJavaVersionEnum + ); throw new GradleException(message); } @@ -195,8 +215,15 @@ public void generate() { expectedJavaVersionEnum = JavaVersion.toVersion(Integer.toString(version)); } if (javaVersionEnum != expectedJavaVersionEnum) { - String message = "The environment variable JAVA" + version + "_HOME must be set to a JDK installation directory for Java " + - expectedJavaVersionEnum + " but is [" + javaHome + "] corresponding to [" + javaVersionEnum + "]"; + String message = String.format( + Locale.ROOT, + "The environment variable JAVA%d_HOME must be set to a JDK installation directory for Java" + + " %s but is [%s] corresponding to [%s]", + version, + expectedJavaVersionEnum, + javaHome, + javaVersionEnum + ); throw new GradleException(message); } } @@ -217,11 +244,11 @@ private void writeToFile(File file, String content) { * Finds printable java version of the given JAVA_HOME */ private String findJavaVersionDetails(File javaHome) { - String versionInfoScript = "print(" + - "java.lang.System.getProperty(\"java.vendor.version\", java.lang.System.getProperty(\"java.vendor\")) + \" \" + " + - "java.lang.System.getProperty(\"java.version\") + \" [\" + " + - "java.lang.System.getProperty(\"java.vm.name\") + \" \" + " + - "java.lang.System.getProperty(\"java.vm.version\") + \"]\");"; + String versionInfoScript = "print(" + + "java.lang.System.getProperty(\"java.vendor.version\", java.lang.System.getProperty(\"java.vendor\")) + \" \" + " + + "java.lang.System.getProperty(\"java.version\") + \" [\" + " + + "java.lang.System.getProperty(\"java.vm.name\") + \" \" + " + + "java.lang.System.getProperty(\"java.vm.version\") + \"]\");"; return runJavaAsScript(javaHome, versionInfoScript).trim(); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java index 7c3c4ab8e8083..dbe13916a2858 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java @@ -66,8 +66,8 @@ public void apply(Project project) { } } - GenerateGlobalBuildInfoTask generateTask = project.getTasks().create("generateGlobalBuildInfo", - GenerateGlobalBuildInfoTask.class, task -> { + GenerateGlobalBuildInfoTask generateTask = project.getTasks() + .create("generateGlobalBuildInfo", GenerateGlobalBuildInfoTask.class, task -> { task.setJavaVersions(javaVersions); task.setMinimumCompilerVersion(minimumCompilerVersion); task.setMinimumRuntimeVersion(minimumRuntimeVersion); @@ -104,14 +104,16 @@ public void apply(Project project) { params.setInFipsJvm(isInFipsJvm()); }); - project.allprojects(p -> { - // Make sure than any task execution generates and prints build info - p.getTasks().configureEach(task -> { - if (task != generateTask && task != printTask) { - task.dependsOn(printTask); - } - }); - }); + project.allprojects( + p -> { + // Make sure than any task execution generates and prints build info + p.getTasks().configureEach(task -> { + if (task != generateTask && task != printTask) { + task.dependsOn(printTask); + } + }); + } + ); } private static File findCompilerJavaHome() { @@ -139,11 +141,16 @@ private static File findRuntimeJavaHome(final File compilerJavaHome) { private static String findJavaHome(String version) { String versionedJavaHome = System.getenv(getJavaHomeEnvVarName(version)); if (versionedJavaHome == null) { - throw new GradleException( - "$" + getJavaHomeEnvVarName(version) + " must be set to build Elasticsearch. " + - "Note that if the variable was just set you might have to run `./gradlew --stop` for " + - "it to be picked up. See https://github.com/elastic/elasticsearch/issues/31399 details." + final String exceptionMessage = String.format( + Locale.ROOT, + "$%s must be set to build Elasticsearch. " + + "Note that if the variable was just set you " + + "might have to run `./gradlew --stop` for " + + "it to be picked up. See https://github.com/elastic/elasticsearch/issues/31399 details.", + getJavaHomeEnvVarName(version) ); + + throw new GradleException(exceptionMessage); } return versionedJavaHome; } @@ -157,9 +164,9 @@ private static boolean isInFipsJvm() { } private static String getResourceContents(String resourcePath) { - try (BufferedReader reader = new BufferedReader( - new InputStreamReader(GlobalBuildInfoPlugin.class.getResourceAsStream(resourcePath)) - )) { + try ( + BufferedReader reader = new BufferedReader(new InputStreamReader(GlobalBuildInfoPlugin.class.getResourceAsStream(resourcePath))) + ) { StringBuilder b = new StringBuilder(); for (String line = reader.readLine(); line != null; line = reader.readLine()) { if (b.length() != 0) { @@ -194,7 +201,7 @@ private static int findDefaultParallel(Project project) { if (name.equals("physical id")) { currentID = value; } - // Number of cores not including hyper-threading + // Number of cores not including hyper-threading if (name.equals("cpu cores")) { assert currentID.isEmpty() == false; socketToCore.put("currentID", Integer.valueOf(value)); @@ -298,9 +305,7 @@ public static String gitRevision(File rootDir) { private static String readFirstLine(final Path path) throws IOException { String firstLine; try (Stream lines = Files.lines(path, StandardCharsets.UTF_8)) { - firstLine = lines - .findFirst() - .orElseThrow(() -> new IOException("file [" + path + "] is empty")); + firstLine = lines.findFirst().orElseThrow(() -> new IOException("file [" + path + "] is empty")); } return firstLine; } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/DependencyLicensesTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/DependencyLicensesTask.java index 07f13ab338c4b..e6d147b8ee606 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/DependencyLicensesTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/DependencyLicensesTask.java @@ -44,6 +44,7 @@ import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.regex.Matcher; @@ -209,7 +210,8 @@ private void failIfAnyMissing(String item, Boolean exists, String type) { } private void checkDependencies(Map licenses, Map notices, Set shaFiles) - throws NoSuchAlgorithmException, IOException { + throws NoSuchAlgorithmException, + IOException { for (File dependency : dependencies) { String jarName = dependency.getName(); String depName = regex.matcher(jarName).replaceFirst(""); @@ -223,8 +225,8 @@ private void checkDependencies(Map licenses, Map shaFiles, File dependency, String jarName, String depName) - throws NoSuchAlgorithmException, IOException { + private void validateSha(Set shaFiles, File dependency, String jarName, String depName) throws NoSuchAlgorithmException, + IOException { if (ignoreShas.contains(depName)) { // local deps should not have sha files! if (getShaFile(jarName).exists()) { @@ -269,10 +271,18 @@ private void checkSha(File jar, String jarName, Set shaFiles) throws NoSuc String sha = getSha1(jar); if (expectedSha.equals(sha) == false) { - throw new GradleException( - "SHA has changed! Expected " + expectedSha + " for " + jarName + " but got " + sha + ". " + - "\nThis usually indicates a corrupt dependency cache or artifacts changed upstream." + - "\nEither wipe your cache, fix the upstream artifact, or delete " + shaFile + " and run updateShas"); + final String exceptionMessage = String.format( + Locale.ROOT, + "SHA has changed! Expected %s for %s but got %s." + + "\nThis usually indicates a corrupt dependency cache or artifacts changed upstream." + + "\nEither wipe your cache, fix the upstream artifact, or delete %s and run updateShas", + expectedSha, + jarName, + sha, + shaFile + ); + + throw new GradleException(exceptionMessage); } shaFiles.remove(shaFile); } @@ -314,13 +324,11 @@ Set getShaFiles() { throw new GradleException("\"" + licensesDir.getPath() + "\" isn't a valid directory"); } - return Arrays.stream(array) - .filter(file -> file.getName().endsWith(SHA_EXTENSION)) - .collect(Collectors.toSet()); + return Arrays.stream(array).filter(file -> file.getName().endsWith(SHA_EXTENSION)).collect(Collectors.toSet()); } String getSha1(File file) throws IOException, NoSuchAlgorithmException { - byte[] bytes = Files.readAllBytes(file.toPath()); + byte[] bytes = Files.readAllBytes(file.toPath()); MessageDigest digest = MessageDigest.getInstance("SHA-1"); char[] encoded = Hex.encodeHex(digest.digest(bytes)); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/FilePermissionsTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/FilePermissionsTask.java index 4b079384be5c5..68839f86c1035 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/FilePermissionsTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/FilePermissionsTask.java @@ -50,10 +50,10 @@ public class FilePermissionsTask extends DefaultTask { * A pattern set of which files should be checked. */ private final PatternFilterable filesFilter = new PatternSet() - // we always include all source files, and exclude what should not be checked - .include("**") - // exclude sh files that might have the executable bit set - .exclude("**/*.sh"); + // we always include all source files, and exclude what should not be checked + .include("**") + // exclude sh files that might have the executable bit set + .exclude("**/*.sh"); private File outputMarker = new File(getProject().getBuildDir(), "markers/filePermissions"); @@ -64,11 +64,11 @@ public FilePermissionsTask() { private static boolean isExecutableFile(File file) { try { Set permissions = Files.getFileAttributeView(file.toPath(), PosixFileAttributeView.class) - .readAttributes() - .permissions(); + .readAttributes() + .permissions(); return permissions.contains(PosixFilePermission.OTHERS_EXECUTE) - || permissions.contains(PosixFilePermission.OWNER_EXECUTE) - || permissions.contains(PosixFilePermission.GROUP_EXECUTE); + || permissions.contains(PosixFilePermission.OWNER_EXECUTE) + || permissions.contains(PosixFilePermission.GROUP_EXECUTE); } catch (IOException e) { throw new IllegalStateException("unable to read the file " + file + " attributes", e); } @@ -80,10 +80,11 @@ private static boolean isExecutableFile(File file) { @InputFiles @SkipWhenEmpty public FileCollection getFiles() { - return Boilerplate.getJavaSourceSets(getProject()).stream() - .map(sourceSet -> sourceSet.getAllSource().matching(filesFilter)) - .reduce(FileTree::plus) - .orElse(getProject().files().getAsFileTree()); + return Boilerplate.getJavaSourceSets(getProject()) + .stream() + .map(sourceSet -> sourceSet.getAllSource().matching(filesFilter)) + .reduce(FileTree::plus) + .orElse(getProject().files().getAsFileTree()); } @TaskAction @@ -91,10 +92,11 @@ public void checkInvalidPermissions() throws IOException { if (Os.isFamily(Os.FAMILY_WINDOWS)) { throw new StopExecutionException(); } - List failures = getFiles().getFiles().stream() - .filter(FilePermissionsTask::isExecutableFile) - .map(file -> "Source file is executable: " + file) - .collect(Collectors.toList()); + List failures = getFiles().getFiles() + .stream() + .filter(FilePermissionsTask::isExecutableFile) + .map(file -> "Source file is executable: " + file) + .collect(Collectors.toList()); if (!failures.isEmpty()) { throw new GradleException("Found invalid file permissions:\n" + String.join("\n", failures)); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.java index 123aa397da856..2bf9e56d3afbd 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.java @@ -88,7 +88,9 @@ public ForbiddenPatternsTask() { @InputFiles @SkipWhenEmpty public FileCollection getFiles() { - return getProject().getConvention().getPlugin(JavaPluginConvention.class).getSourceSets() + return getProject().getConvention() + .getPlugin(JavaPluginConvention.class) + .getSourceSets() .stream() .map(sourceSet -> sourceSet.getAllSource().matching(filesFilter)) .reduce(FileTree::plus) @@ -101,8 +103,8 @@ public void checkInvalidPatterns() throws IOException { List failures = new ArrayList<>(); for (File f : getFiles()) { List lines; - try(Stream stream = Files.lines(f.toPath(), StandardCharsets.UTF_8)) { - lines = stream.collect(Collectors.toList()); + try (Stream stream = Files.lines(f.toPath(), StandardCharsets.UTF_8)) { + lines = stream.collect(Collectors.toList()); } catch (UncheckedIOException e) { throw new IllegalArgumentException("Failed to read " + f + " as UTF_8", e); } @@ -112,13 +114,17 @@ public void checkInvalidPatterns() throws IOException { .collect(Collectors.toList()); String path = getProject().getRootProject().getProjectDir().toURI().relativize(f.toURI()).toString(); - failures.addAll(invalidLines.stream() - .map(l -> new AbstractMap.SimpleEntry<>(l+1, lines.get(l))) - .flatMap(kv -> patterns.entrySet().stream() - .filter(p -> Pattern.compile(p.getValue()).matcher(kv.getValue()).find()) - .map(p -> "- " + p.getKey() + " on line " + kv.getKey() + " of " + path) - ) - .collect(Collectors.toList())); + failures.addAll( + invalidLines.stream() + .map(l -> new AbstractMap.SimpleEntry<>(l + 1, lines.get(l))) + .flatMap( + kv -> patterns.entrySet() + .stream() + .filter(p -> Pattern.compile(p.getValue()).matcher(kv.getValue()).find()) + .map(p -> "- " + p.getKey() + " on line " + kv.getKey() + " of " + path) + ) + .collect(Collectors.toList()) + ); } if (failures.isEmpty() == false) { throw new GradleException("Found invalid patterns:\n" + String.join("\n", failures)); @@ -143,7 +149,7 @@ public void exclude(String... excludes) { filesFilter.exclude(excludes); } - public void rule(Map props) { + public void rule(Map props) { String name = props.remove("name"); if (name == null) { throw new InvalidUserDataException("Missing [name] for invalid pattern rule"); @@ -153,8 +159,7 @@ public void rule(Map props) { throw new InvalidUserDataException("Missing [pattern] for invalid pattern rule"); } if (props.isEmpty() == false) { - throw new InvalidUserDataException("Unknown arguments for ForbiddenPatterns rule mapping: " - + props.keySet().toString()); + throw new InvalidUserDataException("Unknown arguments for ForbiddenPatterns rule mapping: " + props.keySet().toString()); } // TODO: fail if pattern contains a newline, it won't work (currently) patterns.put(name, pattern); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java index a730e069d5a7f..0cb01d764b28f 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java @@ -67,10 +67,15 @@ public void setClasspath(FileCollection classpath) { @PathSensitive(PathSensitivity.RELATIVE) @SkipWhenEmpty public FileCollection getClassDirectories() { - return getProject().getConvention().getPlugin(JavaPluginConvention.class).getSourceSets().stream() + return getProject().getConvention() + .getPlugin(JavaPluginConvention.class) + .getSourceSets() + .stream() // Don't pick up all source sets like the java9 ones as logger-check doesn't support the class format - .filter(sourceSet -> sourceSet.getName().equals(SourceSet.MAIN_SOURCE_SET_NAME) - || sourceSet.getName().equals(SourceSet.TEST_SOURCE_SET_NAME)) + .filter( + sourceSet -> sourceSet.getName().equals(SourceSet.MAIN_SOURCE_SET_NAME) + || sourceSet.getName().equals(SourceSet.TEST_SOURCE_SET_NAME) + ) .map(sourceSet -> sourceSet.getOutput().getClassesDirs()) .reduce(FileCollection::plus) .orElse(getProject().files()) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java index 5a29c4a4a3570..9e2ac08a8ddea 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java @@ -36,7 +36,7 @@ public File getSuccessMarker() { @TaskAction public void writeMarker() throws IOException { - Files.write(getSuccessMarker().toPath(), new byte[]{}, StandardOpenOption.CREATE); + Files.write(getSuccessMarker().toPath(), new byte[] {}, StandardOpenOption.CREATE); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionRule.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionRule.java index 5fec05d945ed5..32838fd243d6f 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionRule.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionRule.java @@ -67,6 +67,7 @@ public void setBaseClasses(Collection baseClasses) { public void taskName(Pattern expression) { taskNames.add(expression); } + public void taskName(String expression) { taskNames.add(Pattern.compile(expression)); } @@ -86,8 +87,10 @@ public Set getTaskNames() { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; TestingConventionRule that = (TestingConventionRule) o; return Objects.equals(suffix, that.suffix); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java index 2f5028315a425..5b16ca7d40f1e 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java @@ -71,19 +71,22 @@ public TestingConventionsTasks() { @Input public Map> getClassFilesPerEnabledTask() { - return getProject().getTasks().withType(Test.class).stream() + return getProject().getTasks() + .withType(Test.class) + .stream() .filter(Task::getEnabled) - .collect(Collectors.toMap( - Task::getPath, - task -> task.getCandidateClassFiles().getFiles() - )); + .collect(Collectors.toMap(Task::getPath, task -> task.getCandidateClassFiles().getFiles())); } @Input public Map getTestClassNames() { if (testClassNames == null) { - testClassNames = Boilerplate.getJavaSourceSets(getProject()).getByName("test").getOutput().getClassesDirs() - .getFiles().stream() + testClassNames = Boilerplate.getJavaSourceSets(getProject()) + .getByName("test") + .getOutput() + .getClassesDirs() + .getFiles() + .stream() .filter(File::exists) .flatMap(testRoot -> walkPathAndLoadClasses(testRoot).entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); @@ -113,8 +116,11 @@ public Set getMainClassNamedLikeTests() { return Collections.emptySet(); } return javaSourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME) - .getOutput().getClassesDirs().getAsFileTree() - .getFiles().stream() + .getOutput() + .getClassesDirs() + .getAsFileTree() + .getFiles() + .stream() .filter(file -> file.getName().endsWith(".class")) .map(File::getName) .map(name -> name.substring(0, name.length() - 6)) @@ -126,21 +132,22 @@ public Set getMainClassNamedLikeTests() { public void doCheck() throws IOException { final String problems; - try (URLClassLoader isolatedClassLoader = new URLClassLoader( - getTestsClassPath().getFiles().stream().map(this::fileToUrl).toArray(URL[]::new) - )) { + try ( + URLClassLoader isolatedClassLoader = new URLClassLoader( + getTestsClassPath().getFiles().stream().map(this::fileToUrl).toArray(URL[]::new) + ) + ) { Predicate> isStaticClass = clazz -> Modifier.isStatic(clazz.getModifiers()); Predicate> isPublicClass = clazz -> Modifier.isPublic(clazz.getModifiers()); Predicate> isAbstractClass = clazz -> Modifier.isAbstract(clazz.getModifiers()); - final Map> classes = getTestClassNames().entrySet().stream() - .collect(Collectors.toMap( - Map.Entry::getValue, - entry -> loadClassWithoutInitializing(entry.getKey(), isolatedClassLoader)) - ); + final Map> classes = getTestClassNames().entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getValue, entry -> loadClassWithoutInitializing(entry.getKey(), isolatedClassLoader))); final FileTree allTestClassFiles = getProject().files( - classes.values().stream() + classes.values() + .stream() .filter(isStaticClass.negate()) .filter(isPublicClass) .filter((Predicate>) this::implementsNamingConvention) @@ -150,11 +157,13 @@ public void doCheck() throws IOException { final Map> classFilesPerTask = getClassFilesPerEnabledTask(); - final Map>> testClassesPerTask = classFilesPerTask.entrySet().stream() + final Map>> testClassesPerTask = classFilesPerTask.entrySet() + .stream() .collect( Collectors.toMap( Map.Entry::getKey, - entry -> entry.getValue().stream() + entry -> entry.getValue() + .stream() .map(classes::get) .filter(this::implementsNamingConvention) .collect(Collectors.toSet()) @@ -172,23 +181,27 @@ public void doCheck() throws IOException { .collect( Collectors.toMap( TestingConventionRule::getSuffix, - rule -> rule.getBaseClasses().stream() + rule -> rule.getBaseClasses() + .stream() .map(each -> loadClassWithoutInitializing(each, isolatedClassLoader)) .collect(Collectors.toSet()) - )); + ) + ); } problems = collectProblems( checkNoneExists( "Test classes implemented by inner classes will not run", - classes.values().stream() + classes.values() + .stream() .filter(isStaticClass) .filter(isPublicClass) .filter(((Predicate>) this::implementsNamingConvention).or(this::seemsLikeATest)) ), checkNoneExists( "Seem like test classes but don't match naming convention", - classes.values().stream() + classes.values() + .stream() .filter(isStaticClass.negate()) .filter(isPublicClass) .filter(isAbstractClass.negate()) @@ -199,54 +212,42 @@ public void doCheck() throws IOException { // TODO: check for abstract classes that implement the naming conventions // No empty enabled tasks collectProblems( - testClassesPerTask.entrySet().stream() - .map(entry -> - checkAtLeastOneExists( - "test class included in task " + entry.getKey(), - entry.getValue().stream() - ) - ) + testClassesPerTask.entrySet() + .stream() + .map(entry -> checkAtLeastOneExists("test class included in task " + entry.getKey(), entry.getValue().stream())) .sorted() .collect(Collectors.joining("\n")) ), checkNoneExists( - "Test classes are not included in any enabled task (" + - classFilesPerTask.keySet().stream() - .collect(Collectors.joining(",")) + ")", - allTestClassFiles.getFiles().stream() - .filter(testFile -> - classFilesPerTask.values().stream() - .anyMatch(fileSet -> fileSet.contains(testFile)) == false - ) + "Test classes are not included in any enabled task (" + + classFilesPerTask.keySet().stream().collect(Collectors.joining(",")) + + ")", + allTestClassFiles.getFiles() + .stream() + .filter(testFile -> classFilesPerTask.values().stream().anyMatch(fileSet -> fileSet.contains(testFile)) == false) .map(classes::get) ), - collectProblems( - suffixToBaseClass.entrySet().stream() - .filter(entry -> entry.getValue().isEmpty() == false) - .map(entry -> { - return checkNoneExists( - "Tests classes with suffix `" + entry.getKey() + "` should extend " + - entry.getValue().stream().map(Class::getName).collect(Collectors.joining(" or ")) + - " but the following classes do not", - classes.values().stream() - .filter(clazz -> clazz.getName().endsWith(entry.getKey())) - .filter(clazz -> entry.getValue().stream() - .anyMatch(test -> test.isAssignableFrom(clazz)) == false) - ); - }).sorted() - .collect(Collectors.joining("\n")) - ), + collectProblems(suffixToBaseClass.entrySet().stream().filter(entry -> entry.getValue().isEmpty() == false).map(entry -> { + return checkNoneExists( + "Tests classes with suffix `" + + entry.getKey() + + "` should extend " + + entry.getValue().stream().map(Class::getName).collect(Collectors.joining(" or ")) + + " but the following classes do not", + classes.values() + .stream() + .filter(clazz -> clazz.getName().endsWith(entry.getKey())) + .filter(clazz -> entry.getValue().stream().anyMatch(test -> test.isAssignableFrom(clazz)) == false) + ); + }).sorted().collect(Collectors.joining("\n"))), // TODO: check that the testing tasks are included in the right task based on the name ( from the rule ) - checkNoneExists( - "Classes matching the test naming convention should be in test not main", - getMainClassNamedLikeTests() - ) + checkNoneExists("Classes matching the test naming convention should be in test not main", getMainClassNamedLikeTests()) ); } if (problems.isEmpty()) { getSuccessMarker().getParentFile().mkdirs(); - Files.write(getSuccessMarker().toPath(), new byte[]{}, StandardOpenOption.CREATE); + Files.write(getSuccessMarker().toPath(), new byte[] {}, StandardOpenOption.CREATE); } else { getLogger().error(problems); throw new IllegalStateException("Testing conventions are not honored"); @@ -254,17 +255,11 @@ public void doCheck() throws IOException { } private String collectProblems(String... problems) { - return Stream.of(problems) - .map(String::trim) - .filter(s -> s.isEmpty() == false) - .collect(Collectors.joining("\n")); + return Stream.of(problems).map(String::trim).filter(s -> s.isEmpty() == false).collect(Collectors.joining("\n")); } private String checkNoneExists(String message, Stream> stream) { - String problem = stream - .map(each -> " * " + each.getName()) - .sorted() - .collect(Collectors.joining("\n")); + String problem = stream.map(each -> " * " + each.getName()).sorted().collect(Collectors.joining("\n")); if (problem.isEmpty() == false) { return message + ":\n" + problem; } else { @@ -273,10 +268,7 @@ private String checkNoneExists(String message, Stream> stream } private String checkNoneExists(String message, Set candidates) { - String problem = candidates.stream() - .map(each -> " * " + each) - .sorted() - .collect(Collectors.joining("\n")); + String problem = candidates.stream().map(each -> " * " + each).sorted().collect(Collectors.joining("\n")); if (problem.isEmpty() == false) { return message + ":\n" + problem; } else { @@ -309,8 +301,12 @@ private boolean seemsLikeATest(Class clazz) { return true; } if (isAnnotated(method, junitAnnotation)) { - getLogger().debug("{} is a test because it has method '{}' annotated with '{}'", - clazz.getName(), method.getName(), junitAnnotation.getName()); + getLogger().debug( + "{} is a test because it has method '{}' annotated with '{}'", + clazz.getName(), + method.getName(), + junitAnnotation.getName() + ); return true; } } @@ -318,9 +314,7 @@ private boolean seemsLikeATest(Class clazz) { return false; } catch (NoClassDefFoundError e) { // Include the message to get more info to get more a more useful message when running Gradle without -s - throw new IllegalStateException( - "Failed to inspect class " + clazz.getName() + ". Missing class? " + e.getMessage(), - e); + throw new IllegalStateException("Failed to inspect class " + clazz.getName() + ". Missing class? " + e.getMessage(), e); } } @@ -329,9 +323,7 @@ private boolean implementsNamingConvention(Class clazz) { } private boolean implementsNamingConvention(String className) { - if (naming.stream() - .map(TestingConventionRule::getSuffix) - .anyMatch(suffix -> className.endsWith(suffix))) { + if (naming.stream().map(TestingConventionRule::getSuffix).anyMatch(suffix -> className.endsWith(suffix))) { getLogger().debug("{} is a test because it matches the naming convention", className); return true; } @@ -339,9 +331,7 @@ private boolean implementsNamingConvention(String className) { } private boolean matchesTestMethodNamingConvention(Method method) { - return method.getName().startsWith(TEST_METHOD_PREFIX) && - Modifier.isStatic(method.getModifiers()) == false - ; + return method.getName().startsWith(TEST_METHOD_PREFIX) && Modifier.isStatic(method.getModifiers()) == false; } private boolean isAnnotated(Method method, Class annotation) { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java index 4ed1543df1b55..b97ff3cb214a1 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java @@ -69,9 +69,7 @@ public class ThirdPartyAuditTask extends DefaultTask { "WARNING: Class '(.*)' cannot be loaded \\(.*\\)\\. Please fix the classpath!" ); - private static final Pattern VIOLATION_PATTERN = Pattern.compile( - "\\s\\sin ([a-zA-Z0-9$.]+) \\(.*\\)" - ); + private static final Pattern VIOLATION_PATTERN = Pattern.compile("\\s\\sin ([a-zA-Z0-9$.]+) \\(.*\\)"); private static final int SIG_KILL_EXIT_VALUE = 137; private static final List EXPECTED_EXIT_CODES = Arrays.asList( CliMain.EXIT_SUCCESS, @@ -124,10 +122,7 @@ public void setJavaHome(String javaHome) { @Internal public File getJarExpandDir() { - return new File( - new File(getProject().getBuildDir(), "precommit/thirdPartyAudit"), - getName() - ); + return new File(new File(getProject().getBuildDir(), "precommit/thirdPartyAudit"), getName()); } @OutputFile @@ -154,7 +149,7 @@ public void ignoreViolations(String... violatingClasses) { } } - public void ignoreJarHellWithJDK(String ...classes) { + public void ignoreJarHellWithJDK(String... classes) { for (String each : classes) { jdkJarHellExcludes.add(each); } @@ -174,16 +169,15 @@ public Set getMissingClassExcludes() { @Classpath @SkipWhenEmpty public Set getJarsToScan() { - // These are SelfResolvingDependency, and some of them backed by file collections, like the Gradle API files, + // These are SelfResolvingDependency, and some of them backed by file collections, like the Gradle API files, // or dependencies added as `files(...)`, we can't be sure if those are third party or not. // err on the side of scanning these to make sure we don't miss anything - Spec reallyThirdParty = dep -> dep.getGroup() != null && - dep.getGroup().startsWith("org.elasticsearch") == false; - Set jars = getRuntimeConfiguration() + Spec reallyThirdParty = dep -> dep.getGroup() != null && dep.getGroup().startsWith("org.elasticsearch") == false; + Set jars = getRuntimeConfiguration().getResolvedConfiguration().getFiles(reallyThirdParty); + Set compileOnlyConfiguration = getProject().getConfigurations() + .getByName("compileOnly") .getResolvedConfiguration() .getFiles(reallyThirdParty); - Set compileOnlyConfiguration = getProject().getConfigurations().getByName("compileOnly").getResolvedConfiguration() - .getFiles(reallyThirdParty); // don't scan provided dependencies that we already scanned, e.x. don't scan cores dependencies for every plugin if (compileOnlyConfiguration != null) { jars.removeAll(compileOnlyConfiguration); @@ -221,8 +215,7 @@ public void runThirdPartyAudit() throws IOException { if (bogousExcludesCount != 0 && bogousExcludesCount == missingClassExcludes.size() + violationsExcludes.size()) { logForbiddenAPIsOutput(forbiddenApisOutput); throw new IllegalStateException( - "All excluded classes seem to have no issues. " + - "This is sometimes an indication that the check silently failed" + "All excluded classes seem to have no issues. This is sometimes an indication that the check silently failed" ); } assertNoPointlessExclusions("are not missing", missingClassExcludes, missingClasses); @@ -232,10 +225,7 @@ public void runThirdPartyAudit() throws IOException { assertNoPointlessExclusions("do not generate jar hell with the JDK", jdkJarHellExcludes, jdkJarHellClasses); if (missingClassExcludes == null && (missingClasses.isEmpty() == false)) { - getLogger().info( - "Found missing classes, but task is configured to ignore all of them:\n {}", - formatClassList(missingClasses) - ); + getLogger().info("Found missing classes, but task is configured to ignore all of them:\n {}", formatClassList(missingClasses)); missingClasses.clear(); } @@ -247,7 +237,7 @@ public void runThirdPartyAudit() throws IOException { if (missingClasses.isEmpty() == false) { getLogger().error("Missing classes:\n{}", formatClassList(missingClasses)); } - if(violationsClasses.isEmpty() == false) { + if (violationsClasses.isEmpty() == false) { getLogger().error("Classes with violations:\n{}", formatClassList(violationsClasses)); } throw new IllegalStateException("Audit of third party dependencies failed"); @@ -257,7 +247,7 @@ public void runThirdPartyAudit() throws IOException { // Mark successful third party audit check getSuccessMarker().getParentFile().mkdirs(); - Files.write(getSuccessMarker().toPath(), new byte[]{}); + Files.write(getSuccessMarker().toPath(), new byte[] {}); } private void logForbiddenAPIsOutput(String forbiddenApisOutput) { @@ -310,8 +300,7 @@ private void assertNoJarHell(Set jdkJarHellClasses) { jdkJarHellClasses.removeAll(jdkJarHellExcludes); if (jdkJarHellClasses.isEmpty() == false) { throw new IllegalStateException( - "Audit of third party dependencies failed:\n" + - " Jar Hell with the JDK:\n" + formatClassList(jdkJarHellClasses) + "Audit of third party dependencies failed:\n Jar Hell with the JDK:\n" + formatClassList(jdkJarHellClasses) ); } } @@ -328,10 +317,7 @@ private void assertNoPointlessExclusions(String specifics, Set excludes, } private String formatClassList(Set classList) { - return classList.stream() - .map(name -> " * " + name) - .sorted() - .collect(Collectors.joining("\n")); + return classList.stream().map(name -> " * " + name).sorted().collect(Collectors.joining("\n")); } private String runForbiddenAPIsCli() throws IOException { @@ -347,11 +333,7 @@ private String runForbiddenAPIsCli() throws IOException { ); spec.jvmArgs("-Xmx1g"); spec.setMain("de.thetaphi.forbiddenapis.cli.CliMain"); - spec.args( - "-f", getSignatureFile().getAbsolutePath(), - "-d", getJarExpandDir(), - "--allowmissingclasses" - ); + spec.args("-f", getSignatureFile().getAbsolutePath(), "-d", getJarExpandDir(), "--allowmissingclasses"); spec.setErrorOutput(errorOut); if (getLogger().isInfoEnabled() == false) { spec.setStandardOutput(new NullOutputStream()); @@ -359,9 +341,7 @@ private String runForbiddenAPIsCli() throws IOException { spec.setIgnoreExitValue(true); }); if (OS.current().equals(OS.LINUX) && result.getExitValue() == SIG_KILL_EXIT_VALUE) { - throw new IllegalStateException( - "Third party audit was killed buy SIGKILL, could be a victim of the Linux OOM killer" - ); + throw new IllegalStateException("Third party audit was killed buy SIGKILL, could be a victim of the Linux OOM killer"); } final String forbiddenApisOutput; try (ByteArrayOutputStream outputStream = errorOut) { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTar.java b/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTar.java index f76289dc591b3..e92221f1f869e 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTar.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTar.java @@ -79,7 +79,8 @@ private static class SymbolicLinkPreservingTarCopyAction implements CopyAction { SymbolicLinkPreservingTarCopyAction( final Provider tarFile, final ArchiveOutputStreamFactory compressor, - final boolean isPreserveFileTimestamps) { + final boolean isPreserveFileTimestamps + ) { this.tarFile = tarFile; this.compressor = compressor; this.isPreserveFileTimestamps = isPreserveFileTimestamps; @@ -87,8 +88,10 @@ private static class SymbolicLinkPreservingTarCopyAction implements CopyAction { @Override public WorkResult execute(final CopyActionProcessingStream stream) { - try (OutputStream out = compressor.createArchiveOutputStream(tarFile.get().getAsFile()); - TarArchiveOutputStream tar = new TarArchiveOutputStream(out)) { + try ( + OutputStream out = compressor.createArchiveOutputStream(tarFile.get().getAsFile()); + TarArchiveOutputStream tar = new TarArchiveOutputStream(out) + ) { tar.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU); stream.process(new SymbolicLinkPreservingTarStreamAction(tar)); } catch (final IOException e) { @@ -136,7 +139,8 @@ private boolean isChildOfVisitedSymbolicLink(final FileCopyDetailsInternal detai return false; } for (final File symbolicLink : visitedSymbolicLinks) { - if (isChildOf(symbolicLink, file)) return true; + if (isChildOf(symbolicLink, file)) + return true; } return false; } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingUntarTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingUntarTask.java index 8b65bfc553fd9..7b35531cb1efd 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingUntarTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingUntarTask.java @@ -95,8 +95,11 @@ public SymbolicLinkPreservingUntarTask(final ObjectFactory objectFactory) { final void execute() { // ensure the target extraction path is empty getProject().delete(extractPath); - try (TarArchiveInputStream tar = - new TarArchiveInputStream(new GzipCompressorInputStream(new FileInputStream(tarFile.getAsFile().get())))) { + try ( + TarArchiveInputStream tar = new TarArchiveInputStream( + new GzipCompressorInputStream(new FileInputStream(tarFile.getAsFile().get())) + ) + ) { final Path destinationPath = extractPath.get().getAsFile().toPath(); TarArchiveEntry entry = tar.getNextTarEntry(); while (entry != null) { @@ -127,9 +130,10 @@ final void execute() { final PosixFileAttributeView view = Files.getFileAttributeView(destination, PosixFileAttributeView.class); if (view != null) { final Set permissions = PosixFilePermissions.fromString( - permissions((entry.getMode() >> 6) & 07) + - permissions((entry.getMode() >> 3) & 07) + - permissions((entry.getMode() >> 0) & 07)); + permissions((entry.getMode() >> 6) & 07) + permissions((entry.getMode() >> 3) & 07) + permissions( + (entry.getMode() >> 0) & 07 + ) + ); Files.setPosixFilePermissions(destination, permissions); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/test/BatsTestTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/test/BatsTestTask.java index 8b5d106b6ab8b..8dbac3f7c19fd 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/test/BatsTestTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/test/BatsTestTask.java @@ -110,9 +110,9 @@ public void runBats() { List command = new ArrayList<>(); command.add("bats"); command.add("--tap"); - command.addAll(testsDir.getAsFileTree().getFiles().stream() - .filter(f -> f.getName().endsWith(".bats")) - .sorted().collect(Collectors.toList())); + command.addAll( + testsDir.getAsFileTree().getFiles().stream().filter(f -> f.getName().endsWith(".bats")).sorted().collect(Collectors.toList()) + ); getProject().exec(spec -> { spec.setWorkingDir(distributionsDir.getAsFile()); spec.environment(System.getenv()); diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/test/DistroTestPlugin.java similarity index 78% rename from buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/test/DistroTestPlugin.java index b62ff129c8638..28aac0f558412 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/test/DistroTestPlugin.java @@ -19,7 +19,6 @@ package org.elasticsearch.gradle.test; -import org.elasticsearch.gradle.BuildPlugin; import org.elasticsearch.gradle.BwcVersions; import org.elasticsearch.gradle.DistributionDownloadPlugin; import org.elasticsearch.gradle.ElasticsearchDistribution; @@ -93,7 +92,7 @@ public void apply(Project project) { final boolean runDockerTests = shouldRunDockerTests(project); project.getPluginManager().apply(DistributionDownloadPlugin.class); - project.getPluginManager().apply(BuildPlugin.class); + project.getPluginManager().apply("elasticsearch.build"); // TODO: it would be useful to also have the SYSTEM_JAVA_HOME setup in the root project, so that running from GCP only needs // a java for gradle to run, and the tests are self sufficient and consistent with the java they use @@ -117,12 +116,12 @@ public void apply(Project project) { } Map> batsTests = new HashMap<>(); batsTests.put("bats oss", configureBatsTest(project, "oss", distributionsDir, copyDistributionsTask)); - batsTests.put("bats default", configureBatsTest(project, "default", distributionsDir, copyDistributionsTask)); - configureBatsTest(project, "plugins",distributionsDir, copyDistributionsTask, copyPluginsTask).configure(t -> - t.setPluginsDir(pluginsDir) + configureBatsTest(project, "plugins", distributionsDir, copyDistributionsTask, copyPluginsTask).configure( + t -> t.setPluginsDir(pluginsDir) + ); + configureBatsTest(project, "upgrade", distributionsDir, copyDistributionsTask, copyUpgradeTask).configure( + t -> t.setUpgradeDir(upgradeDir) ); - configureBatsTest(project, "upgrade", distributionsDir, copyDistributionsTask, copyUpgradeTask).configure(t -> - t.setUpgradeDir(upgradeDir)); project.subprojects(vmProject -> { vmProject.getPluginManager().apply(VagrantBasePlugin.class); @@ -136,8 +135,12 @@ public void apply(Project project) { Platform platform = distribution.getPlatform(); // this condition ensures windows boxes get windows distributions, and linux boxes get linux distributions if (isWindows(vmProject) == (platform == Platform.WINDOWS)) { - TaskProvider vmTask = - configureVMWrapperTask(vmProject, distribution.getName() + " distribution", destructiveTaskName, vmDependencies); + TaskProvider vmTask = configureVMWrapperTask( + vmProject, + distribution.getName() + " distribution", + destructiveTaskName, + vmDependencies + ); vmTask.configure(t -> t.dependsOn(distribution)); distroTest.configure(t -> { @@ -170,7 +173,12 @@ public void apply(Project project) { } private static Jdk createJdk( - NamedDomainObjectContainer jdksContainer, String name, String vendor, String version, String platform) { + NamedDomainObjectContainer jdksContainer, + String name, + String vendor, + String version, + String platform + ) { Jdk jdk = jdksContainer.create(name); jdk.setVendor(vendor); jdk.setVersion(version); @@ -216,17 +224,14 @@ private static List configureVM(Project project) { vagrant.vmEnv("PATH", convertPath(project, vagrant, gradleJdk, "/bin:$PATH", "\\bin;$Env:PATH")); // pass these along to get correct build scans if (System.getenv("JENKINS_URL") != null) { - Stream.of("JOB_NAME", "JENKINS_URL", "BUILD_NUMBER", "BUILD_URL").forEach(name -> - vagrant.vmEnv(name, System.getenv(name)) - ); + Stream.of("JOB_NAME", "JENKINS_URL", "BUILD_NUMBER", "BUILD_URL").forEach(name -> vagrant.vmEnv(name, System.getenv(name))); } vagrant.setIsWindowsVM(isWindows(project)); return Arrays.asList(systemJdk, gradleJdk); } - private static Object convertPath(Project project, VagrantExtension vagrant, Jdk jdk, - String additionaLinux, String additionalWindows) { + private static Object convertPath(Project project, VagrantExtension vagrant, Jdk jdk, String additionaLinux, String additionalWindows) { return new Object() { @Override public String toString() { @@ -241,110 +246,111 @@ public String toString() { private static TaskProvider configureCopyDistributionsTask(Project project, Provider distributionsDir) { // temporary, until we have tasks per distribution - return project.getTasks().register(COPY_DISTRIBUTIONS_TASK, Copy.class, - t -> { - t.into(distributionsDir); - t.from(project.getConfigurations().getByName(DISTRIBUTIONS_CONFIGURATION)); - - Path distributionsPath = distributionsDir.get().getAsFile().toPath(); - TaskInputs inputs = t.getInputs(); - inputs.property("version", VersionProperties.getElasticsearch()); - t.doLast(action -> { - try { - Files.writeString(distributionsPath.resolve("version"), VersionProperties.getElasticsearch()); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); + return project.getTasks().register(COPY_DISTRIBUTIONS_TASK, Copy.class, t -> { + t.into(distributionsDir); + t.from(project.getConfigurations().getByName(DISTRIBUTIONS_CONFIGURATION)); + + Path distributionsPath = distributionsDir.get().getAsFile().toPath(); + TaskInputs inputs = t.getInputs(); + inputs.property("version", VersionProperties.getElasticsearch()); + t.doLast(action -> { + try { + Files.writeString(distributionsPath.resolve("version"), VersionProperties.getElasticsearch()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); }); } - private static TaskProvider configureCopyUpgradeTask(Project project, Version upgradeVersion, - Provider upgradeDir) { + private static TaskProvider configureCopyUpgradeTask(Project project, Version upgradeVersion, Provider upgradeDir) { // temporary, until we have tasks per distribution - return project.getTasks().register(COPY_UPGRADE_TASK, Copy.class, - t -> { - t.into(upgradeDir); - t.from(project.getConfigurations().getByName(UPGRADE_CONFIGURATION)); - - Path upgradePath = upgradeDir.get().getAsFile().toPath(); - - // write bwc version, and append -SNAPSHOT if it is an unreleased version - ExtraPropertiesExtension extraProperties = project.getExtensions().getByType(ExtraPropertiesExtension.class); - BwcVersions bwcVersions = (BwcVersions) extraProperties.get("bwcVersions"); - final String upgradeFromVersion; - if (bwcVersions.unreleasedInfo(upgradeVersion) != null) { - upgradeFromVersion = upgradeVersion.toString() + "-SNAPSHOT"; - } else { - upgradeFromVersion = upgradeVersion.toString(); + return project.getTasks().register(COPY_UPGRADE_TASK, Copy.class, t -> { + t.into(upgradeDir); + t.from(project.getConfigurations().getByName(UPGRADE_CONFIGURATION)); + + Path upgradePath = upgradeDir.get().getAsFile().toPath(); + + // write bwc version, and append -SNAPSHOT if it is an unreleased version + ExtraPropertiesExtension extraProperties = project.getExtensions().getByType(ExtraPropertiesExtension.class); + BwcVersions bwcVersions = (BwcVersions) extraProperties.get("bwcVersions"); + final String upgradeFromVersion; + if (bwcVersions.unreleasedInfo(upgradeVersion) != null) { + upgradeFromVersion = upgradeVersion.toString() + "-SNAPSHOT"; + } else { + upgradeFromVersion = upgradeVersion.toString(); + } + TaskInputs inputs = t.getInputs(); + inputs.property("upgrade_from_version", upgradeFromVersion); + // TODO: this is serializable, need to think how to represent this as an input + // inputs.property("bwc_versions", bwcVersions); + t.doLast(action -> { + try { + Files.writeString(upgradePath.resolve("upgrade_from_version"), upgradeFromVersion); + // this is always true, but bats tests rely on it. It is just temporary until bats is removed. + Files.writeString(upgradePath.resolve("upgrade_is_oss"), ""); + } catch (IOException e) { + throw new UncheckedIOException(e); } - TaskInputs inputs = t.getInputs(); - inputs.property("upgrade_from_version", upgradeFromVersion); - // TODO: this is serializable, need to think how to represent this as an input - //inputs.property("bwc_versions", bwcVersions); - t.doLast(action -> { - try { - Files.writeString(upgradePath.resolve("upgrade_from_version"), upgradeFromVersion); - // this is always true, but bats tests rely on it. It is just temporary until bats is removed. - Files.writeString(upgradePath.resolve("upgrade_is_oss"), ""); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); }); + }); } private static TaskProvider configureCopyPluginsTask(Project project, Provider pluginsDir) { Configuration pluginsConfiguration = project.getConfigurations().create(PLUGINS_CONFIGURATION); // temporary, until we have tasks per distribution - return project.getTasks().register(COPY_PLUGINS_TASK, Copy.class, - t -> { - t.into(pluginsDir); - t.from(pluginsConfiguration); - }); + return project.getTasks().register(COPY_PLUGINS_TASK, Copy.class, t -> { + t.into(pluginsDir); + t.from(pluginsConfiguration); + }); } - private static TaskProvider configureVMWrapperTask(Project project, String type, String destructiveTaskPath, - List dependsOn) { + private static TaskProvider configureVMWrapperTask( + Project project, + String type, + String destructiveTaskPath, + List dependsOn + ) { int taskNameStart = destructiveTaskPath.lastIndexOf(':') + "destructive".length() + 1; String taskname = destructiveTaskPath.substring(taskNameStart); taskname = taskname.substring(0, 1).toLowerCase(Locale.ROOT) + taskname.substring(1); - return project.getTasks().register(taskname, GradleDistroTestTask.class, - t -> { - t.setGroup(JavaBasePlugin.VERIFICATION_GROUP); - t.setDescription("Runs " + type + " tests within vagrant"); - t.setTaskName(destructiveTaskPath); - t.extraArg("-D'" + IN_VM_SYSPROP + "'"); - t.dependsOn(dependsOn); - }); + return project.getTasks().register(taskname, GradleDistroTestTask.class, t -> { + t.setGroup(JavaBasePlugin.VERIFICATION_GROUP); + t.setDescription("Runs " + type + " tests within vagrant"); + t.setTaskName(destructiveTaskPath); + t.extraArg("-D'" + IN_VM_SYSPROP + "'"); + t.dependsOn(dependsOn); + }); } private static TaskProvider configureDistroTest(Project project, ElasticsearchDistribution distribution) { - return project.getTasks().register(destructiveDistroTestTaskName(distribution), Test.class, - t -> { - t.setMaxParallelForks(1); - t.setWorkingDir(project.getProjectDir()); - t.systemProperty(DISTRIBUTION_SYSPROP, distribution.toString()); - if (System.getProperty(IN_VM_SYSPROP) == null) { - t.dependsOn(distribution); - } - }); + return project.getTasks().register(destructiveDistroTestTaskName(distribution), Test.class, t -> { + t.setMaxParallelForks(1); + t.setWorkingDir(project.getProjectDir()); + t.systemProperty(DISTRIBUTION_SYSPROP, distribution.toString()); + if (System.getProperty(IN_VM_SYSPROP) == null) { + t.dependsOn(distribution); + } + }); } - private static TaskProvider configureBatsTest(Project project, String type, Provider distributionsDir, - Object... deps) { - return project.getTasks().register("destructiveBatsTest." + type, BatsTestTask.class, - t -> { - Directory batsDir = project.getLayout().getProjectDirectory().dir("bats"); - t.setTestsDir(batsDir.dir(type)); - t.setUtilsDir(batsDir.dir("utils")); - t.setDistributionsDir(distributionsDir); - t.setPackageName("elasticsearch" + (type.equals("oss") ? "-oss" : "")); - if (System.getProperty(IN_VM_SYSPROP) == null) { - t.dependsOn(deps); - } - }); + private static TaskProvider configureBatsTest( + Project project, + String type, + Provider distributionsDir, + Object... deps + ) { + return project.getTasks().register("destructiveBatsTest." + type, BatsTestTask.class, t -> { + Directory batsDir = project.getLayout().getProjectDirectory().dir("bats"); + t.setTestsDir(batsDir.dir(type)); + t.setUtilsDir(batsDir.dir("utils")); + t.setDistributionsDir(distributionsDir); + t.setPackageName("elasticsearch" + (type.equals("oss") ? "-oss" : "")); + if (System.getProperty(IN_VM_SYSPROP) == null) { + t.dependsOn(deps); + } + }); } private List configureDistributions(Project project, Version upgradeVersion, boolean runDockerTests) { @@ -384,8 +390,15 @@ private List configureDistributions(Project project, for (Platform platform : Arrays.asList(Platform.LINUX, Platform.WINDOWS)) { for (Flavor flavor : Flavor.values()) { for (boolean bundledJdk : Arrays.asList(true, false)) { - addDistro(distributions, Type.ARCHIVE, platform, flavor, bundledJdk, - VersionProperties.getElasticsearch(), currentDistros); + addDistro( + distributions, + Type.ARCHIVE, + platform, + flavor, + bundledJdk, + VersionProperties.getElasticsearch(), + currentDistros + ); } } } @@ -399,16 +412,23 @@ private List configureDistributions(Project project, packagingConfig.setExtendsFrom(distroConfigs); Configuration packagingUpgradeConfig = project.getConfigurations().create(UPGRADE_CONFIGURATION); - List distroUpgradeConfigs = upgradeDistros.stream().map(ElasticsearchDistribution::getConfiguration) + List distroUpgradeConfigs = upgradeDistros.stream() + .map(ElasticsearchDistribution::getConfiguration) .collect(Collectors.toList()); packagingUpgradeConfig.setExtendsFrom(distroUpgradeConfigs); return currentDistros; } - private static void addDistro(NamedDomainObjectContainer distributions, - Type type, Platform platform, Flavor flavor, boolean bundledJdk, String version, - List container) { + private static void addDistro( + NamedDomainObjectContainer distributions, + Type type, + Platform platform, + Flavor flavor, + boolean bundledJdk, + String version, + List container + ) { String name = distroId(type, platform, flavor, bundledJdk) + "-" + version; if (distributions.findByName(name) != null) { @@ -437,11 +457,7 @@ private static String distroId(Type type, Platform platform, Flavor flavor, bool private static String destructiveDistroTestTaskName(ElasticsearchDistribution distro) { Type type = distro.getType(); - return "destructiveDistroTest." + distroId( - type, - distro.getPlatform(), - distro.getFlavor(), - distro.getBundledJdk()); + return "destructiveDistroTest." + distroId(type, distro.getPlatform(), distro.getFlavor(), distro.getBundledJdk()); } static Map parseOsRelease(final List osReleaseLines) { @@ -485,7 +501,6 @@ private static List getLinuxExclusionList(Project project) { * method determines whether the Docker tests should be run on the host * OS. Essentially, unless an OS and version is specifically excluded, we expect * to be able to run Docker and test the Docker images. - * @param project */ private static boolean shouldRunDockerTests(Project project) { switch (OS.current()) { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java b/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java index ce806b48e56a7..37811f335763c 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java @@ -193,12 +193,12 @@ public String getFullName() { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; Descriptor that = (Descriptor) o; - return Objects.equals(name, that.name) && - Objects.equals(className, that.className) && - Objects.equals(parent, that.parent); + return Objects.equals(name, that.name) && Objects.equals(className, that.className) && Objects.equals(parent, that.parent); } @Override diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/JNAKernel32Library.java b/buildSrc/src/main/java/org/elasticsearch/gradle/test/JNAKernel32Library.java similarity index 100% rename from buildSrc/src/main/groovy/org/elasticsearch/gradle/test/JNAKernel32Library.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/test/JNAKernel32Library.java diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index 32789f69e9f2f..9359f59d762b8 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -62,22 +62,16 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); private final Project project; private final ReaperService reaper; - private int nodeIndex = 0; + private int nodeIndex = 0; - public ElasticsearchCluster(String path, String clusterName, Project project, - ReaperService reaper, File workingDirBase) { + public ElasticsearchCluster(String path, String clusterName, Project project, ReaperService reaper, File workingDirBase) { this.path = path; this.clusterName = clusterName; this.project = project; this.reaper = reaper; this.workingDirBase = workingDirBase; this.nodes = project.container(ElasticsearchNode.class); - this.nodes.add( - new ElasticsearchNode( - path, clusterName + "-0", - project, reaper, workingDirBase - ) - ); + this.nodes.add(new ElasticsearchNode(path, clusterName + "-0", project, reaper, workingDirBase)); // configure the cluster name eagerly so nodes know about it this.nodes.all((node) -> node.defaultConfig.put("cluster.name", safeName(clusterName))); @@ -97,10 +91,8 @@ public void setNumberOfNodes(int numberOfNodes) { ); } - for (int i = nodes.size() ; i < numberOfNodes; i++) { - this.nodes.add(new ElasticsearchNode( - path, clusterName + "-" + i, project, reaper, workingDirBase - )); + for (int i = nodes.size(); i < numberOfNodes; i++) { + this.nodes.add(new ElasticsearchNode(path, clusterName + "-" + i, project, reaper, workingDirBase)); } } @@ -274,7 +266,8 @@ private void commonNodeConfig() { // Can only configure master nodes if we have node names defined if (nodeNames != null) { if (node.getVersion().onOrAfter("7.0.0")) { - node.defaultConfig.keySet().stream() + node.defaultConfig.keySet() + .stream() .filter(name -> name.startsWith("discovery.zen.")) .collect(Collectors.toList()) .forEach(node.defaultConfig::remove); @@ -413,9 +406,7 @@ public boolean isProcessAlive() { public ElasticsearchNode singleNode() { if (nodes.size() != 1) { - throw new IllegalStateException( - "Can't treat " + this + " as single node as it has " + nodes.size() + " nodes" - ); + throw new IllegalStateException("Can't treat " + this + " as single node as it has " + nodes.size() + " nodes"); } return getFirstNode(); } @@ -456,11 +447,12 @@ public NamedDomainObjectContainer getNodes() { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; ElasticsearchCluster that = (ElasticsearchCluster) o; - return Objects.equals(clusterName, that.clusterName) && - Objects.equals(path, that.path); + return Objects.equals(clusterName, that.clusterName) && Objects.equals(path, that.path); } @Override diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 36656a2516634..aeb8e3e359d7a 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -430,26 +430,20 @@ public synchronized void start() { if (plugins.isEmpty() == false) { logToProcessStdout("Installing " + plugins.size() + " plugins"); - plugins.forEach(plugin -> runElasticsearchBinScript( - "elasticsearch-plugin", - "install", "--batch", plugin.toString()) - ); + plugins.forEach(plugin -> runElasticsearchBinScript("elasticsearch-plugin", "install", "--batch", plugin.toString())); } if (getVersion().before("6.3.0") && testDistribution == TestDistribution.DEFAULT) { LOGGER.info("emulating the {} flavor for {} by installing x-pack", testDistribution, getVersion()); - runElasticsearchBinScript( - "elasticsearch-plugin", - "install", "--batch", "x-pack" - ); + runElasticsearchBinScript("elasticsearch-plugin", "install", "--batch", "x-pack"); } if (keystoreSettings.isEmpty() == false || keystoreFiles.isEmpty() == false) { logToProcessStdout("Adding " + keystoreSettings.size() + " keystore settings and " + keystoreFiles.size() + " keystore files"); runElasticsearchBinScript("elasticsearch-keystore", "create"); - keystoreSettings.forEach((key, value) -> - runElasticsearchBinScriptWithInput(value.toString(), "elasticsearch-keystore", "add", "-x", key) + keystoreSettings.forEach( + (key, value) -> runElasticsearchBinScriptWithInput(value.toString(), "elasticsearch-keystore", "add", "-x", key) ); for (Map.Entry entry : keystoreFiles.entrySet()) { @@ -473,12 +467,12 @@ public synchronized void start() { if (credentials.isEmpty() == false) { logToProcessStdout("Setting up " + credentials.size() + " users"); - credentials.forEach(paramMap -> runElasticsearchBinScript( - getVersion().onOrAfter("6.3.0") ? "elasticsearch-users" : "x-pack/users", - paramMap.entrySet().stream() - .flatMap(entry -> Stream.of(entry.getKey(), entry.getValue())) - .toArray(String[]::new) - )); + credentials.forEach( + paramMap -> runElasticsearchBinScript( + getVersion().onOrAfter("6.3.0") ? "elasticsearch-users" : "x-pack/users", + paramMap.entrySet().stream().flatMap(entry -> Stream.of(entry.getKey(), entry.getValue())).toArray(String[]::new) + ) + ); } if (cliSetup.isEmpty() == false) { @@ -501,7 +495,8 @@ private void logToProcessStdout(String message) { Files.write( esStdoutFile, ("[" + Instant.now().toString() + "] [BUILD] " + message + "\n").getBytes(StandardCharsets.UTF_8), - StandardOpenOption.CREATE, StandardOpenOption.APPEND + StandardOpenOption.CREATE, + StandardOpenOption.APPEND ); } catch (IOException e) { throw new UncheckedIOException(e); @@ -534,8 +529,7 @@ private void copyExtraConfigFiles() { } extraConfigFiles.forEach((destination, from) -> { if (Files.exists(from.toPath()) == false) { - throw new TestClustersException("Can't create extra config file from " + from + " for " + this + - " as it does not exist"); + throw new TestClustersException("Can't create extra config file from " + from + " for " + this + " as it does not exist"); } Path dst = configFile.getParent().resolve(destination); try { @@ -553,7 +547,7 @@ private void copyExtraConfigFiles() { * //TODO: Remove this when system modules are available */ private void copyExtraJars() { - if (extraJarFiles.isEmpty() == false){ + if (extraJarFiles.isEmpty() == false) { logToProcessStdout("Setting up " + extraJarFiles.size() + " additional jar dependencies"); } extraJarFiles.forEach(from -> { @@ -571,9 +565,8 @@ private void installModules() { if (testDistribution == TestDistribution.INTEG_TEST) { logToProcessStdout("Installing " + modules.size() + "modules"); for (File module : modules) { - Path destination = getDistroDir().resolve("modules").resolve(module.getName().replace(".zip", "") - .replace("-" + getVersion(), "") - .replace("-SNAPSHOT", "")); + Path destination = getDistroDir().resolve("modules") + .resolve(module.getName().replace(".zip", "").replace("-" + getVersion(), "").replace("-SNAPSHOT", "")); // only install modules that are not already bundled with the integ-test distribution if (Files.exists(destination) == false) { @@ -590,16 +583,14 @@ private void installModules() { } } } else { - LOGGER.info("Not installing " + modules.size() + "(s) since the " + distributions + " distribution already " + - "has them"); + LOGGER.info("Not installing " + modules.size() + "(s) since the " + distributions + " distribution already has them"); } } @Override public void extraConfigFile(String destination, File from) { if (destination.contains("..")) { - throw new IllegalArgumentException("extra config file destination can't be relative, was " + destination + - " for " + this); + throw new IllegalArgumentException("extra config file destination can't be relative, was " + destination + " for " + this); } extraConfigFiles.put(destination, from); } @@ -607,8 +598,7 @@ public void extraConfigFile(String destination, File from) { @Override public void extraConfigFile(String destination, File from, PropertyNormalization normalization) { if (destination.contains("..")) { - throw new IllegalArgumentException("extra config file destination can't be relative, was " + destination + - " for " + this); + throw new IllegalArgumentException("extra config file destination can't be relative, was " + destination + " for " + this); } extraConfigFiles.put(destination, from, normalization); } @@ -638,37 +628,26 @@ public void user(Map userSpec) { } private void runElasticsearchBinScriptWithInput(String input, String tool, CharSequence... args) { - if ( - Files.exists(getDistroDir().resolve("bin").resolve(tool)) == false && - Files.exists(getDistroDir().resolve("bin").resolve(tool + ".bat")) == false - ) { - throw new TestClustersException("Can't run bin script: `" + tool + "` does not exist. " + - "Is this the distribution you expect it to be ?"); + if (Files.exists(getDistroDir().resolve("bin").resolve(tool)) == false + && Files.exists(getDistroDir().resolve("bin").resolve(tool + ".bat")) == false) { + throw new TestClustersException( + "Can't run bin script: `" + tool + "` does not exist. " + "Is this the distribution you expect it to be ?" + ); } try (InputStream byteArrayInputStream = new ByteArrayInputStream(input.getBytes(StandardCharsets.UTF_8))) { LoggedExec.exec(project, spec -> { spec.setEnvironment(getESEnvironment()); spec.workingDir(getDistroDir()); - spec.executable( - OS.conditionalString() - .onUnix(() -> "./bin/" + tool) - .onWindows(() -> "cmd") - .supply() - ); - spec.args( - OS.>conditional() - .onWindows(() -> { - ArrayList result = new ArrayList<>(); - result.add("/c"); - result.add("bin\\" + tool + ".bat"); - for (CharSequence arg : args) { - result.add(arg); - } - return result; - }) - .onUnix(() -> Arrays.asList(args)) - .supply() - ); + spec.executable(OS.conditionalString().onUnix(() -> "./bin/" + tool).onWindows(() -> "cmd").supply()); + spec.args(OS.>conditional().onWindows(() -> { + ArrayList result = new ArrayList<>(); + result.add("/c"); + result.add("bin\\" + tool + ".bat"); + for (CharSequence arg : args) { + result.add(arg); + } + return result; + }).onUnix(() -> Arrays.asList(args)).supply()); spec.setStandardInput(byteArrayInputStream); }); @@ -683,38 +662,38 @@ private void runElasticsearchBinScript(String tool, CharSequence... args) { private Map getESEnvironment() { Map defaultEnv = new HashMap<>(); - if ( getJavaHome() != null) { + if (getJavaHome() != null) { defaultEnv.put("JAVA_HOME", getJavaHome().getAbsolutePath()); } defaultEnv.put("ES_PATH_CONF", configFile.getParent().toString()); String systemPropertiesString = ""; if (systemProperties.isEmpty() == false) { - systemPropertiesString = " " + systemProperties.entrySet().stream() - .map(entry -> "-D" + entry.getKey() + "=" + entry.getValue()) - // ES_PATH_CONF is also set as an environment variable and for a reference to ${ES_PATH_CONF} - // to work ES_JAVA_OPTS, we need to make sure that ES_PATH_CONF before ES_JAVA_OPTS. Instead, - // we replace the reference with the actual value in other environment variables - .map(p -> p.replace("${ES_PATH_CONF}", configFile.getParent().toString())) - .collect(Collectors.joining(" ")); + systemPropertiesString = " " + + systemProperties.entrySet() + .stream() + .map(entry -> "-D" + entry.getKey() + "=" + entry.getValue()) + // ES_PATH_CONF is also set as an environment variable and for a reference to ${ES_PATH_CONF} + // to work ES_JAVA_OPTS, we need to make sure that ES_PATH_CONF before ES_JAVA_OPTS. Instead, + // we replace the reference with the actual value in other environment variables + .map(p -> p.replace("${ES_PATH_CONF}", configFile.getParent().toString())) + .collect(Collectors.joining(" ")); } String jvmArgsString = ""; if (jvmArgs.isEmpty() == false) { - jvmArgsString = " " + jvmArgs.stream() - .peek(argument -> { - if (argument.toString().startsWith("-D")) { - throw new TestClustersException("Invalid jvm argument `" + argument + - "` configure as systemProperty instead for " + this - ); - } - }) - .collect(Collectors.joining(" ")); + jvmArgsString = " " + jvmArgs.stream().peek(argument -> { + if (argument.toString().startsWith("-D")) { + throw new TestClustersException( + "Invalid jvm argument `" + argument + "` configure as systemProperty instead for " + this + ); + } + }).collect(Collectors.joining(" ")); } String heapSize = System.getProperty("tests.heap.size", "512m"); - defaultEnv.put("ES_JAVA_OPTS", "-Xms" + heapSize + " -Xmx" + heapSize + " -ea -esa " + - systemPropertiesString + " " + - jvmArgsString + " " + - // Support passing in additional JVM arguments - System.getProperty("tests.jvm.argline", "") + defaultEnv.put( + "ES_JAVA_OPTS", + "-Xms" + heapSize + " -Xmx" + heapSize + " -ea -esa " + systemPropertiesString + " " + jvmArgsString + " " + + // Support passing in additional JVM arguments + System.getProperty("tests.jvm.argline", "") ); defaultEnv.put("ES_TMPDIR", tmpDir.toString()); // Windows requires this as it defaults to `c:\windows` despite ES_TMPDIR @@ -727,9 +706,7 @@ private Map getESEnvironment() { Set commonKeys = new HashSet<>(environment.keySet()); commonKeys.retainAll(defaultEnv.keySet()); if (commonKeys.isEmpty() == false) { - throw new IllegalStateException( - "testcluster does not allow overwriting the following env vars " + commonKeys + " for " + this - ); + throw new IllegalStateException("testcluster does not allow overwriting the following env vars " + commonKeys + " for " + this); } environment.forEach((key, value) -> defaultEnv.put(key, value.toString())); @@ -746,7 +723,7 @@ private void startElasticsearchProcess() { processBuilder.command(command); processBuilder.directory(workingDir.toFile()); Map environment = processBuilder.environment(); - // Don't inherit anything from the environment for as that would lack reproducibility + // Don't inherit anything from the environment for as that would lack reproducibility environment.clear(); environment.putAll(getESEnvironment()); @@ -858,10 +835,7 @@ private void stopHandle(ProcessHandle processHandle, boolean forcibly) { // and in that case the ML processes will be grandchildren of the wrapper. List children = processHandle.children().collect(Collectors.toList()); try { - logProcessInfo( - "Terminating elasticsearch process" + (forcibly ? " forcibly " : "gracefully") + ":", - processHandle.info() - ); + logProcessInfo("Terminating elasticsearch process" + (forcibly ? " forcibly " : "gracefully") + ":", processHandle.info()); if (forcibly) { processHandle.destroyForcibly(); @@ -871,8 +845,7 @@ private void stopHandle(ProcessHandle processHandle, boolean forcibly) { if (processHandle.isAlive() == false) { return; } - LOGGER.info("process did not terminate after {} {}, stopping it forcefully", - ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT); + LOGGER.info("process did not terminate after {} {}, stopping it forcefully", ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT); processHandle.destroyForcibly(); } @@ -886,11 +859,11 @@ private void stopHandle(ProcessHandle processHandle, boolean forcibly) { } private void logProcessInfo(String prefix, ProcessHandle.Info info) { - LOGGER.info(prefix + " commandLine:`{}` command:`{}` args:`{}`", - info.commandLine().orElse("-"), info.command().orElse("-"), - Arrays.stream(info.arguments().orElse(new String[]{})) - .map(each -> "'" + each + "'") - .collect(Collectors.joining(" ")) + LOGGER.info( + prefix + " commandLine:`{}` command:`{}` args:`{}`", + info.commandLine().orElse("-"), + info.command().orElse("-"), + Arrays.stream(info.arguments().orElse(new String[] {})).map(each -> "'" + each + "'").collect(Collectors.joining(" ")) ); } @@ -898,7 +871,7 @@ private void logFileContents(String description, Path from) { final Map errorsAndWarnings = new LinkedHashMap<>(); LinkedList ring = new LinkedList<>(); try (LineNumberReader reader = new LineNumberReader(Files.newBufferedReader(from))) { - for (String line = reader.readLine(); line != null ; line = reader.readLine()) { + for (String line = reader.readLine(); line != null; line = reader.readLine()) { final String lineToAdd; if (ring.isEmpty()) { lineToAdd = line; @@ -908,12 +881,9 @@ private void logFileContents(String description, Path from) { // check to see if the previous message (possibly combined from multiple lines) was an error or // warning as we want to show all of them String previousMessage = normalizeLogLine(ring.getLast()); - if (MESSAGES_WE_DONT_CARE_ABOUT.stream().noneMatch(previousMessage::contains) && - (previousMessage.contains("ERROR") || previousMessage.contains("WARN"))) { - errorsAndWarnings.put( - previousMessage, - errorsAndWarnings.getOrDefault(previousMessage, 0) + 1 - ); + if (MESSAGES_WE_DONT_CARE_ABOUT.stream().noneMatch(previousMessage::contains) + && (previousMessage.contains("ERROR") || previousMessage.contains("WARN"))) { + errorsAndWarnings.put(previousMessage, errorsAndWarnings.getOrDefault(previousMessage, 0) + 1); } } else { // We combine multi line log messages to make sure we never break exceptions apart @@ -1026,9 +996,7 @@ private void syncWithLinks(Path sourceRoot, Path destinationRoot) { Files.createLink(destination, source); } catch (IOException e) { // Note does not work for network drives, e.g. Vagrant - throw new UncheckedIOException( - "Failed to create hard link " + destination + " pointing to " + source, e - ); + throw new UncheckedIOException("Failed to create hard link " + destination + " pointing to " + source, e); } } }); @@ -1085,21 +1053,17 @@ private void createConfiguration() { ); } // Make sure no duplicate config keys - settings.keySet().stream() - .filter(OVERRIDABLE_SETTINGS::contains) - .forEach(defaultConfig::remove); + settings.keySet().stream().filter(OVERRIDABLE_SETTINGS::contains).forEach(defaultConfig::remove); try { Files.write( configFile, - Stream.concat( - settings.entrySet().stream(), - defaultConfig.entrySet().stream() - ) + Stream.concat(settings.entrySet().stream(), defaultConfig.entrySet().stream()) .map(entry -> entry.getKey() + ": " + entry.getValue()) .collect(Collectors.joining("\n")) .getBytes(StandardCharsets.UTF_8), - StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE + StandardOpenOption.TRUNCATE_EXISTING, + StandardOpenOption.CREATE ); final List configFiles; @@ -1129,9 +1093,7 @@ private List getTransportPortInternal() { try { return readPortsFile(transportPortFile); } catch (IOException e) { - throw new UncheckedIOException( - "Failed to read transport ports file: " + transportPortFile + " for " + this, e - ); + throw new UncheckedIOException("Failed to read transport ports file: " + transportPortFile + " for " + this, e); } } @@ -1139,9 +1101,7 @@ private List getHttpPortInternal() { try { return readPortsFile(httpPortsFile); } catch (IOException e) { - throw new UncheckedIOException( - "Failed to read http ports file: " + httpPortsFile + " for " + this, e - ); + throw new UncheckedIOException("Failed to read http ports file: " + httpPortsFile + " for " + this, e); } } @@ -1156,10 +1116,7 @@ private Path getExtractedDistributionDir() { } private List getInstalledFileSet(Action filter) { - return Stream.concat( - plugins.stream().filter(uri -> uri.getScheme().equalsIgnoreCase("file")).map(File::new), - modules.stream() - ) + return Stream.concat(plugins.stream().filter(uri -> uri.getScheme().equalsIgnoreCase("file")).map(File::new), modules.stream()) .filter(File::exists) // TODO: We may be able to simplify this with Gradle 5.6 // https://docs.gradle.org/nightly/release-notes.html#improved-handling-of-zip-archives-on-classpaths @@ -1200,11 +1157,7 @@ public Set getDistributionFiles() { private Set getDistributionFiles(Action patternFilter) { Set files = new TreeSet<>(); for (ElasticsearchDistribution distribution : distributions) { - files.addAll( - project.fileTree(Paths.get(distribution.getExtracted().toString())) - .matching(patternFilter) - .getFiles() - ); + files.addAll(project.fileTree(Paths.get(distribution.getExtracted().toString())).matching(patternFilter).getFiles()); } return files; } @@ -1252,40 +1205,27 @@ public List getExtraConfigFiles() { @Override @Internal public boolean isProcessAlive() { - requireNonNull( - esProcess, - "Can't wait for `" + this + "` as it's not started. Does the task have `useCluster` ?" - ); + requireNonNull(esProcess, "Can't wait for `" + this + "` as it's not started. Does the task have `useCluster` ?"); return esProcess.isAlive(); } void waitForAllConditions() { - waitForConditions( - waitConditions, - System.currentTimeMillis(), - NODE_UP_TIMEOUT_UNIT.toMillis(NODE_UP_TIMEOUT) + - // Installing plugins at config time and loading them when nods start requires additional time we need to - // account for - ADDITIONAL_CONFIG_TIMEOUT_UNIT.toMillis(ADDITIONAL_CONFIG_TIMEOUT * - ( - plugins.size() + - keystoreFiles.size() + - keystoreSettings.size() + - credentials.size() - ) - ), - TimeUnit.MILLISECONDS, - this - ); + waitForConditions(waitConditions, System.currentTimeMillis(), NODE_UP_TIMEOUT_UNIT.toMillis(NODE_UP_TIMEOUT) + + // Installing plugins at config time and loading them when nods start requires additional time we need to + // account for + ADDITIONAL_CONFIG_TIMEOUT_UNIT.toMillis( + ADDITIONAL_CONFIG_TIMEOUT * (plugins.size() + keystoreFiles.size() + keystoreSettings.size() + credentials.size()) + ), TimeUnit.MILLISECONDS, this); } @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; ElasticsearchNode that = (ElasticsearchNode) o; - return Objects.equals(name, that.name) && - Objects.equals(path, that.path); + return Objects.equals(name, that.name) && Objects.equals(path, that.path); } @Override @@ -1318,37 +1258,23 @@ private boolean checkPortsFilesExistWithDelay(TestClusterConfiguration node) { @Internal public boolean isHttpSslEnabled() { - return Boolean.valueOf( - settings.getOrDefault("xpack.security.http.ssl.enabled", "false").toString() - ); + return Boolean.valueOf(settings.getOrDefault("xpack.security.http.ssl.enabled", "false").toString()); } void configureHttpWait(WaitForHttpResource wait) { if (settings.containsKey("xpack.security.http.ssl.certificate_authorities")) { wait.setCertificateAuthorities( - getConfigDir() - .resolve(settings.get("xpack.security.http.ssl.certificate_authorities").toString()) - .toFile() + getConfigDir().resolve(settings.get("xpack.security.http.ssl.certificate_authorities").toString()).toFile() ); } if (settings.containsKey("xpack.security.http.ssl.certificate")) { - wait.setCertificateAuthorities( - getConfigDir() - .resolve(settings.get("xpack.security.http.ssl.certificate").toString()) - .toFile() - ); + wait.setCertificateAuthorities(getConfigDir().resolve(settings.get("xpack.security.http.ssl.certificate").toString()).toFile()); } if (settings.containsKey("xpack.security.http.ssl.keystore.path")) { - wait.setTrustStoreFile( - getConfigDir() - .resolve(settings.get("xpack.security.http.ssl.keystore.path").toString()) - .toFile() - ); + wait.setTrustStoreFile(getConfigDir().resolve(settings.get("xpack.security.http.ssl.keystore.path").toString()).toFile()); } if (keystoreSettings.containsKey("xpack.security.http.ssl.keystore.secure_password")) { - wait.setTrustStorePassword( - keystoreSettings.get("xpack.security.http.ssl.keystore.secure_password").toString() - ); + wait.setTrustStorePassword(keystoreSettings.get("xpack.security.http.ssl.keystore.secure_password").toString()); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RestTestRunnerTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RestTestRunnerTask.java index 9698e4a664b74..f4de829009de3 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RestTestRunnerTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RestTestRunnerTask.java @@ -19,17 +19,21 @@ public class RestTestRunnerTask extends Test implements TestClustersAware { private Collection clusters = new HashSet<>(); public RestTestRunnerTask() { - this.getOutputs().doNotCacheIf("Caching disabled for this task since it uses a cluster shared by other tasks", - /* - * Look for any other tasks which use the same cluster as this task. Since tests often have side effects for the cluster they - * execute against, this state can cause issues when trying to cache tests results of tasks that share a cluster. To avoid any - * undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between multiple tasks. - */ - t -> getProject().getTasks().withType(RestTestRunnerTask.class) - .stream() - .filter(task -> task != this) - .anyMatch(task -> Collections.disjoint(task.getClusters(), getClusters()) == false) - ); + this.getOutputs() + .doNotCacheIf( + "Caching disabled for this task since it uses a cluster shared by other tasks", + /* + * Look for any other tasks which use the same cluster as this task. Since tests often have side effects for the cluster + * they execute against, this state can cause issues when trying to cache tests results of tasks that share a cluster. To + * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between + * multiple tasks. + */ + t -> getProject().getTasks() + .withType(RestTestRunnerTask.class) + .stream() + .filter(task -> task != this) + .anyMatch(task -> Collections.disjoint(task.getClusters(), getClusters()) == false) + ); } @Override diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java index cedeff6a9cf5d..99114ed15ec2c 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java @@ -28,10 +28,7 @@ public class RunTask extends DefaultTestClustersTask { private Path dataDir = null; - @Option( - option = "debug-jvm", - description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch." - ) + @Option(option = "debug-jvm", description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch.") public void setDebug(boolean enabled) { this.debug = enabled; } @@ -41,10 +38,7 @@ public Boolean getDebug() { return debug; } - @Option( - option = "data-dir", - description = "Override the base data directory used by the testcluster" - ) + @Option(option = "data-dir", description = "Override the base data directory used by the testcluster") public void setDataDir(String dataDirStr) { dataDir = Paths.get(dataDirStr).toAbsolutePath(); } @@ -52,7 +46,9 @@ public void setDataDir(String dataDirStr) { @Input @Optional public String getDataDir() { - if (dataDir == null) { return null;} + if (dataDir == null) { + return null; + } return dataDir.toString(); } @@ -61,12 +57,16 @@ public void beforeStart() { int debugPort = 5005; int httpPort = 9200; int transportPort = 9300; - Map additionalSettings = System.getProperties().entrySet().stream() + Map additionalSettings = System.getProperties() + .entrySet() + .stream() .filter(entry -> entry.getKey().toString().startsWith(CUSTOM_SETTINGS_PREFIX)) - .collect(Collectors.toMap( - entry -> entry.getKey().toString().substring(CUSTOM_SETTINGS_PREFIX.length()), - entry -> entry.getValue().toString() - )); + .collect( + Collectors.toMap( + entry -> entry.getKey().toString().substring(CUSTOM_SETTINGS_PREFIX.length()), + entry -> entry.getValue().toString() + ) + ); boolean singleNode = getClusters().stream().flatMap(c -> c.getNodes().stream()).count() == 1; final Function getDataPath; if (singleNode) { @@ -86,10 +86,7 @@ public void beforeStart() { node.setDataPath(getDataPath.apply(node)); } if (debug) { - logger.lifecycle( - "Running elasticsearch in debug mode, {} suspending until connected on debugPort {}", - node, debugPort - ); + logger.lifecycle("Running elasticsearch in debug mode, {} suspending until connected on debugPort {}", node, debugPort); node.jvmArgs("-agentlib:jdwp=transport=dt_socket,server=n,suspend=y,address=" + debugPort); debugPort += 1; } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java index 11e5a35604837..b484bc58a1380 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java @@ -27,13 +27,13 @@ import java.net.URI; import java.util.LinkedHashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; - public interface TestClusterConfiguration { void setVersion(String version); @@ -113,7 +113,8 @@ public interface TestClusterConfiguration { default void waitForConditions( LinkedHashMap> waitConditions, long startedAtMillis, - long nodeUpTimeout, TimeUnit nodeUpTimeoutUnit, + long nodeUpTimeout, + TimeUnit nodeUpTimeoutUnit, TestClusterConfiguration context ) { Logger logger = Logging.getLogger(TestClusterConfiguration.class); @@ -121,17 +122,13 @@ default void waitForConditions( long thisConditionStartedAt = System.currentTimeMillis(); boolean conditionMet = false; Throwable lastException = null; - while ( - System.currentTimeMillis() - startedAtMillis < TimeUnit.MILLISECONDS.convert(nodeUpTimeout, nodeUpTimeoutUnit) - ) { + while (System.currentTimeMillis() - startedAtMillis < TimeUnit.MILLISECONDS.convert(nodeUpTimeout, nodeUpTimeoutUnit)) { if (context.isProcessAlive() == false) { - throw new TestClustersException( - "process was found dead while waiting for " + description + ", " + this - ); + throw new TestClustersException("process was found dead while waiting for " + description + ", " + this); } try { - if(predicate.test(context)) { + if (predicate.test(context)) { conditionMet = true; break; } @@ -142,8 +139,14 @@ default void waitForConditions( } } if (conditionMet == false) { - String message = "`" + context + "` failed to wait for " + description + " after " + - nodeUpTimeout + " " + nodeUpTimeoutUnit; + String message = String.format( + Locale.ROOT, + "`%s` failed to wait for %s after %d %s", + context, + description, + nodeUpTimeout, + nodeUpTimeoutUnit + ); if (lastException == null) { throw new TestClustersException(message); } else { @@ -160,18 +163,12 @@ default void waitForConditions( throw new TestClustersException(message + extraCause, lastException); } } - logger.info( - "{}: {} took {} seconds", - this, description, - (System.currentTimeMillis() - thisConditionStartedAt) / 1000.0 - ); + logger.info("{}: {} took {} seconds", this, description, (System.currentTimeMillis() - thisConditionStartedAt) / 1000.0); }); } default String safeName(String name) { - return name - .replaceAll("^[^a-zA-Z0-9]+", "") - .replaceAll("[^a-zA-Z0-9\\.]+", "-"); + return name.replaceAll("^[^a-zA-Z0-9]+", "").replaceAll("[^a-zA-Z0-9\\.]+", "-"); } boolean isProcessAlive(); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java index 8cbb4ca133214..2e212f805e99e 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java @@ -12,19 +12,13 @@ interface TestClustersAware extends Task { default void useCluster(ElasticsearchCluster cluster) { if (cluster.getPath().equals(getProject().getPath()) == false) { - throw new TestClustersException( - "Task " + getPath() + " can't use test cluster from" + - " another project " + cluster - ); + throw new TestClustersException("Task " + getPath() + " can't use test cluster from" + " another project " + cluster); } - cluster.getNodes().stream().flatMap(node -> node.getDistributions().stream()).forEach(distro -> - dependsOn(distro.getExtracted()) - ); + cluster.getNodes().stream().flatMap(node -> node.getDistributions().stream()).forEach(distro -> dependsOn(distro.getExtracted())); getClusters().add(cluster); } - default void beforeStart() { - } + default void beforeStart() {} } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index a96f8913e8208..e45119670d91f 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -40,7 +40,7 @@ public class TestClustersPlugin implements Plugin { public static final String EXTENSION_NAME = "testClusters"; private static final String REGISTRY_EXTENSION_NAME = "testClustersRegistry"; - private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); + private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); private ReaperService reaper; @@ -58,7 +58,8 @@ public void apply(Project project) { createListClustersTask(project, container); if (project.getRootProject().getExtensions().findByName(REGISTRY_EXTENSION_NAME) == null) { - TestClustersRegistry registry = project.getRootProject().getExtensions() + TestClustersRegistry registry = project.getRootProject() + .getExtensions() .create(REGISTRY_EXTENSION_NAME, TestClustersRegistry.class); // When we know what tasks will run, we claim the clusters of those task to differentiate between clusters @@ -79,27 +80,18 @@ private NamedDomainObjectContainer createTestClustersConta // Create an extensions that allows describing clusters NamedDomainObjectContainer container = project.container( ElasticsearchCluster.class, - name -> new ElasticsearchCluster( - project.getPath(), - name, - project, - reaper, - new File(project.getBuildDir(), "testclusters") - ) + name -> new ElasticsearchCluster(project.getPath(), name, project, reaper, new File(project.getBuildDir(), "testclusters")) ); project.getExtensions().add(EXTENSION_NAME, container); return container; } - private void createListClustersTask(Project project, NamedDomainObjectContainer container) { Task listTask = project.getTasks().create(LIST_TASK_NAME); listTask.setGroup("ES cluster formation"); listTask.setDescription("Lists all ES clusters configured for this project"); - listTask.doLast((Task task) -> - container.forEach(cluster -> - logger.lifecycle(" * {}: {}", cluster.getName(), cluster.getNumberOfNodes()) - ) + listTask.doLast( + (Task task) -> container.forEach(cluster -> logger.lifecycle(" * {}: {}", cluster.getName(), cluster.getNumberOfNodes())) ); } @@ -107,7 +99,8 @@ private static void configureClaimClustersHook(Gradle gradle, TestClustersRegist // Once we know all the tasks that need to execute, we claim all the clusters that belong to those and count the // claims so we'll know when it's safe to stop them. gradle.getTaskGraph().whenReady(taskExecutionGraph -> { - taskExecutionGraph.getAllTasks().stream() + taskExecutionGraph.getAllTasks() + .stream() .filter(task -> task instanceof TestClustersAware) .map(task -> (TestClustersAware) task) .flatMap(task -> task.getClusters().stream()) @@ -116,42 +109,38 @@ private static void configureClaimClustersHook(Gradle gradle, TestClustersRegist } private static void configureStartClustersHook(Gradle gradle, TestClustersRegistry registry) { - gradle.addListener( - new TaskActionListener() { - @Override - public void beforeActions(Task task) { - if (task instanceof TestClustersAware == false) { - return; - } - // we only start the cluster before the actions, so we'll not start it if the task is up-to-date - TestClustersAware awareTask = (TestClustersAware) task; - awareTask.beforeStart(); - awareTask.getClusters().forEach(registry::maybeStartCluster); + gradle.addListener(new TaskActionListener() { + @Override + public void beforeActions(Task task) { + if (task instanceof TestClustersAware == false) { + return; } - @Override - public void afterActions(Task task) {} + // we only start the cluster before the actions, so we'll not start it if the task is up-to-date + TestClustersAware awareTask = (TestClustersAware) task; + awareTask.beforeStart(); + awareTask.getClusters().forEach(registry::maybeStartCluster); } - ); + + @Override + public void afterActions(Task task) {} + }); } private static void configureStopClustersHook(Gradle gradle, TestClustersRegistry registry) { - gradle.addListener( - new TaskExecutionListener() { - @Override - public void afterExecute(Task task, TaskState state) { - if (task instanceof TestClustersAware == false) { - return; - } - // always unclaim the cluster, even if _this_ task is up-to-date, as others might not have been - // and caused the cluster to start. - ((TestClustersAware) task).getClusters() - .forEach(cluster -> registry.stopCluster(cluster, state.getFailure() != null)); + gradle.addListener(new TaskExecutionListener() { + @Override + public void afterExecute(Task task, TaskState state) { + if (task instanceof TestClustersAware == false) { + return; } - @Override - public void beforeExecute(Task task) {} + // always unclaim the cluster, even if _this_ task is up-to-date, as others might not have been + // and caused the cluster to start. + ((TestClustersAware) task).getClusters().forEach(cluster -> registry.stopCluster(cluster, state.getFailure() != null)); } - ); - } + @Override + public void beforeExecute(Task task) {} + }); + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java index fe1c75cdd44be..d78aecc82185a 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java @@ -9,7 +9,7 @@ import java.util.Set; public class TestClustersRegistry { - private static final Logger logger = Logging.getLogger(TestClustersRegistry.class); + private static final Logger logger = Logging.getLogger(TestClustersRegistry.class); private static final String TESTCLUSTERS_INSPECT_FAILURE = "testclusters.inspect.failure"; private final Boolean allowClusterToSurvive = Boolean.valueOf(System.getProperty(TESTCLUSTERS_INSPECT_FAILURE, "false")); private final Map claimsInventory = new HashMap<>(); @@ -35,10 +35,10 @@ public void stopCluster(ElasticsearchCluster cluster, boolean taskFailed) { if (allowClusterToSurvive) { logger.info("Not stopping clusters, disabled by property"); // task failed or this is the last one to stop - for (int i = 1; ; i += i) { + for (int i = 1;; i += i) { logger.lifecycle( - "No more test clusters left to run, going to sleep because {} was set," + - " interrupt (^C) to stop clusters.", TESTCLUSTERS_INSPECT_FAILURE + "No more test clusters left to run, going to sleep because {} was set," + " interrupt (^C) to stop clusters.", + TESTCLUSTERS_INSPECT_FAILURE ); try { Thread.sleep(1000 * i); @@ -52,7 +52,7 @@ public void stopCluster(ElasticsearchCluster cluster, boolean taskFailed) { runningClusters.remove(cluster); } } else { - int currentClaims = claimsInventory.getOrDefault(cluster, 0) - 1; + int currentClaims = claimsInventory.getOrDefault(cluster, 0) - 1; claimsInventory.put(cluster, currentClaims); if (currentClaims <= 0 && runningClusters.contains(cluster)) { @@ -62,5 +62,4 @@ public void stopCluster(ElasticsearchCluster cluster, boolean taskFailed) { } } - } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixtureExtension.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixtureExtension.java index 1521b7971333b..8d34ce5a89844 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixtureExtension.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixtureExtension.java @@ -23,6 +23,7 @@ import org.gradle.api.Project; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import java.util.Optional; @@ -54,9 +55,16 @@ public void useFixture(String path, String serviceName) { Optional otherProject = this.findOtherProjectUsingService(key); if (otherProject.isPresent()) { throw new GradleException( - "Projects " + otherProject.get() + " and " + this.project.getPath() + " both claim the "+ serviceName + - " service defined in the docker-compose.yml of " + path + "This is not supported because it breaks " + - "running in parallel. Configure dedicated services for each project and use those instead." + String.format( + Locale.ROOT, + "Projects %s and %s both claim the %s service defined in the docker-compose.yml of " + + "%sThis is not supported because it breaks running in parallel. Configure dedicated " + + "services for each project and use those instead.", + otherProject.get(), + this.project.getPath(), + serviceName, + path + ) ); } } @@ -66,7 +74,9 @@ private String getServiceNameKey(String fixtureProjectPath, String serviceName) } private Optional findOtherProjectUsingService(String serviceName) { - return this.project.getRootProject().getAllprojects().stream() + return this.project.getRootProject() + .getAllprojects() + .stream() .filter(p -> p.equals(this.project) == false) .filter(p -> p.getExtensions().findByType(TestFixtureExtension.class) != null) .map(project -> project.getExtensions().getByType(TestFixtureExtension.class)) @@ -90,9 +100,16 @@ private void addFixtureProject(String path) { // Check for exclusive access Optional otherProject = this.findOtherProjectUsingService(path); if (otherProject.isPresent()) { - throw new GradleException("Projects " + otherProject.get() + " and " + this.project.getPath() + " both " + - "claim all services from " + path + ". This is not supported because it breaks running in parallel. " + - "Configure specific services in docker-compose.yml for each and add the service name to `useFixture`" + throw new GradleException( + String.format( + Locale.ROOT, + "Projects %s and %s both claim all services from %s. This is not supported because it" + + " breaks running in parallel. Configure specific services in docker-compose.yml " + + "for each and add the service name to `useFixture`", + otherProject.get(), + this.project.getPath(), + path + ) ); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java index 93c91cbee51d3..9146c1f71beea 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java @@ -50,9 +50,7 @@ public class TestFixturesPlugin implements Plugin { public void apply(Project project) { TaskContainer tasks = project.getTasks(); - TestFixtureExtension extension = project.getExtensions().create( - "testFixtures", TestFixtureExtension.class, project - ); + TestFixtureExtension extension = project.getExtensions().create("testFixtures", TestFixtureExtension.class, project); ExtraPropertiesExtension ext = project.getExtensions().getByType(ExtraPropertiesExtension.class); File testfixturesDir = project.file("testfixtures_shared"); @@ -89,38 +87,34 @@ public void apply(Project project) { composeExtension.setUseComposeFiles(Collections.singletonList(DOCKER_COMPOSE_YML)); composeExtension.setRemoveContainers(true); composeExtension.setExecutable( - project.file("/usr/local/bin/docker-compose").exists() ? - "/usr/local/bin/docker-compose" : "/usr/bin/docker-compose" + project.file("/usr/local/bin/docker-compose").exists() ? "/usr/local/bin/docker-compose" : "/usr/bin/docker-compose" ); buildFixture.dependsOn(tasks.getByName("composeUp")); pullFixture.dependsOn(tasks.getByName("composePull")); tasks.getByName("composeUp").mustRunAfter(preProcessFixture); tasks.getByName("composePull").mustRunAfter(preProcessFixture); - tasks.getByName("composeDown").doLast((task) -> { - project.delete(testfixturesDir); - }); + tasks.getByName("composeDown").doLast((task) -> { project.delete(testfixturesDir); }); configureServiceInfoForTask( postProcessFixture, project, false, - (name, port) -> postProcessFixture.getExtensions() - .getByType(ExtraPropertiesExtension.class).set(name, port) + (name, port) -> postProcessFixture.getExtensions().getByType(ExtraPropertiesExtension.class).set(name, port) ); } } else { project.afterEvaluate(spec -> { if (extension.fixtures.isEmpty()) { // if only one fixture is used, that's this one, but without a compose file that's not a valid configuration - throw new IllegalStateException("No " + DOCKER_COMPOSE_YML + " found for " + project.getPath() + - " nor does it use other fixtures."); + throw new IllegalStateException( + "No " + DOCKER_COMPOSE_YML + " found for " + project.getPath() + " nor does it use other fixtures." + ); } }); } - extension.fixtures - .matching(fixtureProject -> fixtureProject.equals(project) == false) + extension.fixtures.matching(fixtureProject -> fixtureProject.equals(project) == false) .all(fixtureProject -> project.evaluationDependsOn(fixtureProject.getPath())); conditionTaskByType(tasks, extension, Test.class); @@ -129,89 +123,81 @@ public void apply(Project project) { conditionTaskByType(tasks, extension, ComposeUp.class); if (dockerComposeSupported() == false) { - project.getLogger().warn( - "Tests for {} require docker-compose at /usr/local/bin/docker-compose or /usr/bin/docker-compose " + - "but none could be found so these will be skipped", project.getPath() - ); + project.getLogger() + .warn( + "Tests for {} require docker-compose at /usr/local/bin/docker-compose or /usr/bin/docker-compose " + + "but none could be found so these will be skipped", + project.getPath() + ); return; } - tasks.withType(Test.class, task -> - extension.fixtures.all(fixtureProject -> { - fixtureProject.getTasks().matching(it -> it.getName().equals("buildFixture")).all(task::dependsOn); - fixtureProject.getTasks().matching(it -> it.getName().equals("composeDown")).all(task::finalizedBy); - configureServiceInfoForTask( - task, - fixtureProject, - true, - (name, host) -> - task.getExtensions().getByType(SystemPropertyCommandLineArgumentProvider.class).systemProperty(name, host) - ); - task.dependsOn(fixtureProject.getTasks().getByName("postProcessFixture")); - }) - ); + tasks.withType(Test.class, task -> extension.fixtures.all(fixtureProject -> { + fixtureProject.getTasks().matching(it -> it.getName().equals("buildFixture")).all(task::dependsOn); + fixtureProject.getTasks().matching(it -> it.getName().equals("composeDown")).all(task::finalizedBy); + configureServiceInfoForTask( + task, + fixtureProject, + true, + (name, host) -> task.getExtensions().getByType(SystemPropertyCommandLineArgumentProvider.class).systemProperty(name, host) + ); + task.dependsOn(fixtureProject.getTasks().getByName("postProcessFixture")); + })); } private void conditionTaskByType(TaskContainer tasks, TestFixtureExtension extension, Class taskClass) { tasks.withType( taskClass, - task -> task.onlyIf(spec -> - extension.fixtures.stream() - .anyMatch(fixtureProject -> - fixtureProject.getTasks().getByName("buildFixture").getEnabled() == false - ) == false + task -> task.onlyIf( + spec -> extension.fixtures.stream() + .anyMatch(fixtureProject -> fixtureProject.getTasks().getByName("buildFixture").getEnabled() == false) == false ) ); } private void configureServiceInfoForTask( - Task task, Project fixtureProject, boolean enableFilter, BiConsumer consumer + Task task, + Project fixtureProject, + boolean enableFilter, + BiConsumer consumer ) { // Configure ports for the tests as system properties. // We only know these at execution time so we need to do it in doFirst TestFixtureExtension extension = task.getProject().getExtensions().getByType(TestFixtureExtension.class); task.doFirst(new Action() { - @Override - public void execute(Task theTask) { - fixtureProject.getExtensions().getByType(ComposeExtension.class).getServicesInfos() - .entrySet().stream() - .filter(entry -> enableFilter == false || - extension.isServiceRequired(entry.getKey(), fixtureProject.getPath()) - ) - .forEach(entry -> { - String service = entry.getKey(); - ServiceInfo infos = entry.getValue(); - infos.getTcpPorts() - .forEach((container, host) -> { - String name = "test.fixtures." + service + ".tcp." + container; - theTask.getLogger().info("port mapping property: {}={}", name, host); - consumer.accept( - name, - host - ); - }); - infos.getUdpPorts() - .forEach((container, host) -> { - String name = "test.fixtures." + service + ".udp." + container; - theTask.getLogger().info("port mapping property: {}={}", name, host); - consumer.accept( - name, - host - ); - }); - }); - } - } - ); + @Override + public void execute(Task theTask) { + fixtureProject.getExtensions() + .getByType(ComposeExtension.class) + .getServicesInfos() + .entrySet() + .stream() + .filter(entry -> enableFilter == false || extension.isServiceRequired(entry.getKey(), fixtureProject.getPath())) + .forEach(entry -> { + String service = entry.getKey(); + ServiceInfo infos = entry.getValue(); + infos.getTcpPorts().forEach((container, host) -> { + String name = "test.fixtures." + service + ".tcp." + container; + theTask.getLogger().info("port mapping property: {}={}", name, host); + consumer.accept(name, host); + }); + infos.getUdpPorts().forEach((container, host) -> { + String name = "test.fixtures." + service + ".udp." + container; + theTask.getLogger().info("port mapping property: {}={}", name, host); + consumer.accept(name, host); + }); + }); + } + }); } public static boolean dockerComposeSupported() { if (OS.current().equals(OS.WINDOWS)) { return false; } - final boolean hasDockerCompose = (new File("/usr/local/bin/docker-compose")).exists() || - (new File("/usr/bin/docker-compose").exists()); + final boolean hasDockerCompose = (new File("/usr/local/bin/docker-compose")).exists() + || (new File("/usr/bin/docker-compose").exists()); return hasDockerCompose && Boolean.parseBoolean(System.getProperty("tests.fixture.enabled", "true")); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/tool/Boilerplate.java b/buildSrc/src/main/java/org/elasticsearch/gradle/tool/Boilerplate.java index f4ce626f7d600..3652944e1647e 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/tool/Boilerplate.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/tool/Boilerplate.java @@ -38,28 +38,25 @@ public static SourceSetContainer getJavaSourceSets(Project project) { } public static T maybeCreate(NamedDomainObjectContainer collection, String name) { - return Optional.ofNullable(collection.findByName(name)) - .orElse(collection.create(name)); + return Optional.ofNullable(collection.findByName(name)).orElse(collection.create(name)); } public static T maybeCreate(NamedDomainObjectContainer collection, String name, Action action) { - return Optional.ofNullable(collection.findByName(name)) - .orElseGet(() -> { - T result = collection.create(name); - action.execute(result); - return result; - }); + return Optional.ofNullable(collection.findByName(name)).orElseGet(() -> { + T result = collection.create(name); + action.execute(result); + return result; + }); } public static T maybeCreate(PolymorphicDomainObjectContainer collection, String name, Class type, Action action) { - return Optional.ofNullable(collection.findByName(name)) - .orElseGet(() -> { - T result = collection.create(name, type); - action.execute(result); - return result; - }); + return Optional.ofNullable(collection.findByName(name)).orElseGet(() -> { + T result = collection.create(name, type); + action.execute(result); + return result; + }); } @@ -83,7 +80,8 @@ public static void maybeConfigure(TaskContainer tasks, String name, Action void maybeConfigure( - TaskContainer tasks, String name, + TaskContainer tasks, + String name, Class type, Action config ) { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/tool/ClasspathUtils.java b/buildSrc/src/main/java/org/elasticsearch/gradle/tool/ClasspathUtils.java index ed064b4ff046b..45c3b55f61066 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/tool/ClasspathUtils.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/tool/ClasspathUtils.java @@ -5,8 +5,7 @@ public class ClasspathUtils { - private ClasspathUtils() { - } + private ClasspathUtils() {} /** * Determine if we are running in the context of the `elastic/elasticsearch` project. This method will return {@code false} when diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/tool/DockerUtils.java b/buildSrc/src/main/java/org/elasticsearch/gradle/tool/DockerUtils.java index 2442ffce427c8..657087afcb848 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/tool/DockerUtils.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/tool/DockerUtils.java @@ -31,10 +31,7 @@ public class DockerUtils { */ public static Optional getDockerPath() { // Check if the Docker binary exists - return List.of(DOCKER_BINARIES) - .stream() - .filter(path -> new File(path).exists()) - .findFirst(); + return List.of(DOCKER_BINARIES).stream().filter(path -> new File(path).exists()).findFirst(); } /** @@ -144,7 +141,8 @@ public static void assertDockerIsAvailable(Project project, List tasks) "Docker (checked [%s]) is required to run the following task%s: \n%s", String.join(", ", DOCKER_BINARIES), tasks.size() > 1 ? "s" : "", - String.join("\n", tasks)); + String.join("\n", tasks) + ); throwDockerRequiredException(message); } @@ -153,7 +151,8 @@ public static void assertDockerIsAvailable(Project project, List tasks) Locale.ROOT, "Docker is required to run the following task%s, but it doesn't appear to be running: \n%s", tasks.size() > 1 ? "s" : "", - String.join("\n", tasks)); + String.join("\n", tasks) + ); throwDockerRequiredException(message); } @@ -161,20 +160,22 @@ public static void assertDockerIsAvailable(Project project, List tasks) final String message = String.format( Locale.ROOT, "building Docker images requires Docker version 17.05+ due to use of multi-stage builds yet was [%s]", - availability.version); + availability.version + ); throwDockerRequiredException(message); } // Some other problem, print the error final String message = String.format( Locale.ROOT, - "a problem occurred running Docker from [%s] yet it is required to run the following task%s: \n%s\n" + - "the problem is that Docker exited with exit code [%d] with standard error output [%s]", + "a problem occurred running Docker from [%s] yet it is required to run the following task%s: \n%s\n" + + "the problem is that Docker exited with exit code [%d] with standard error output [%s]", availability.path, tasks.size() > 1 ? "s" : "", String.join("\n", tasks), availability.lastCommand.exitCode, - availability.lastCommand.stderr.trim()); + availability.lastCommand.stderr.trim() + ); throwDockerRequiredException(message); } @@ -184,9 +185,12 @@ private static void throwDockerRequiredException(final String message) { private static void throwDockerRequiredException(final String message, Exception e) { throw new GradleException( - message + "\nyou can address this by attending to the reported issue, " + message + + "\nyou can address this by attending to the reported issue, " + "removing the offending tasks from being executed, " - + "or by passing -Dbuild.docker=false", e); + + "or by passing -Dbuild.docker=false", + e + ); } /** diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/BatsProgressLogger.java b/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/BatsProgressLogger.java index 8db4e704fb469..3ace00ef4a920 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/BatsProgressLogger.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/BatsProgressLogger.java @@ -41,8 +41,9 @@ */ public class BatsProgressLogger implements UnaryOperator { - private static final Pattern lineRegex = - Pattern.compile("(?ok|not ok) \\d+(? # skip (?\\(.+\\))?)? \\[(?.+)\\] (?.+)"); + private static final Pattern lineRegex = Pattern.compile( + "(?ok|not ok) \\d+(? # skip (?\\(.+\\))?)? \\[(?.+)\\] (?.+)" + ); private static final Pattern startRegex = Pattern.compile("1..(\\d+)"); private final Logger logger; @@ -67,7 +68,7 @@ public String apply(String line) { testCount = Integer.parseInt(m.group(1)); int length = String.valueOf(testCount).length(); String count = "%0" + length + "d"; - countsFormat = "[" + count +"|" + count + "|" + count + "/" + count + "]"; + countsFormat = "[" + count + "|" + count + "|" + count + "/" + count + "]"; return null; } Matcher m = lineRegex.matcher(line); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantBasePlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantBasePlugin.java index f77fe982f74a3..341efedd8e851 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantBasePlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantBasePlugin.java @@ -31,6 +31,7 @@ import java.io.ByteArrayOutputStream; import java.nio.charset.StandardCharsets; import java.util.List; +import java.util.Locale; import java.util.function.Consumer; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -49,14 +50,17 @@ public void apply(Project project) { VagrantExtension extension = project.getExtensions().create("vagrant", VagrantExtension.class, project); VagrantMachine service = project.getExtensions().create("vagrantService", VagrantMachine.class, project, extension, reaper); - project.getGradle().getTaskGraph().whenReady(graph -> - service.refs = graph.getAllTasks().stream() - .filter(t -> t instanceof VagrantShellTask) - .filter(t -> t.getProject() == project) - .count()); + project.getGradle() + .getTaskGraph() + .whenReady( + graph -> service.refs = graph.getAllTasks() + .stream() + .filter(t -> t instanceof VagrantShellTask) + .filter(t -> t.getProject() == project) + .count() + ); } - /** * Check vagrant and virtualbox versions, if any vagrant test tasks will be run. */ @@ -89,8 +93,9 @@ void checkVersion(Project project, String tool, Pattern versionRegex, int... min String output = pipe.toString(StandardCharsets.UTF_8).trim(); Matcher matcher = versionRegex.matcher(output); if (matcher.find() == false) { - throw new IllegalStateException(tool + - " version output [" + output + "] did not match regex [" + versionRegex.pattern() + "]"); + throw new IllegalStateException( + tool + " version output [" + output + "] did not match regex [" + versionRegex.pattern() + "]" + ); } String version = matcher.group(1); @@ -100,8 +105,15 @@ void checkVersion(Project project, String tool, Pattern versionRegex, int... min if (found > minVersion[i]) { break; // most significant version is good } else if (found < minVersion[i]) { - throw new IllegalStateException("Unsupported version of " + tool + ". Found [" + version + "], expected [" + - Stream.of(minVersion).map(String::valueOf).collect(Collectors.joining(".")) + "+"); + final String exceptionMessage = String.format( + Locale.ROOT, + "Unsupported version of %s. Found [%s], expected [%s+]", + tool, + version, + Stream.of(minVersion).map(String::valueOf).collect(Collectors.joining(".")) + ); + + throw new IllegalStateException(exceptionMessage); } // else equal, so check next element } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantShellTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantShellTask.java index 89d95de5d4415..7576defe78308 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantShellTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantShellTask.java @@ -78,13 +78,16 @@ public void runScript() { script.add("cd " + convertWindowsPath(getProject(), rootDir)); extension.getVmEnv().forEach((k, v) -> script.add("$Env:" + k + " = \"" + v + "\"")); script.addAll(getWindowsScript().stream().map(s -> " " + s).collect(Collectors.toList())); - script.addAll(Arrays.asList( - " exit $LASTEXITCODE", - "} catch {", - // catch if we have a failure to even run the script at all above, equivalent to set -e, sort of - " echo $_.Exception.Message", - " exit 1", - "}")); + script.addAll( + Arrays.asList( + " exit $LASTEXITCODE", + "} catch {", + // catch if we have a failure to even run the script at all above, equivalent to set -e, sort of + " echo $_.Exception.Message", + " exit 1", + "}" + ) + ); spec.setArgs("--elevated", "--command", String.join("\n", script)); spec.setProgressHandler(progressHandler); }); @@ -118,5 +121,4 @@ public void runScript() { } } - } diff --git a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/JdkJarHellCheck.java b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/JdkJarHellCheck.java index 7a2504efdd0fc..2fda233f1db2f 100644 --- a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/JdkJarHellCheck.java +++ b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/JdkJarHellCheck.java @@ -45,11 +45,7 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { String entry = root.relativize(file).toString().replace('\\', '/'); if (entry.endsWith(".class") && entry.endsWith("module-info.class") == false) { if (ext.getResource(entry) != null) { - detected.add( - entry - .replace("/", ".") - .replace(".class","") - ); + detected.add(entry.replace("/", ".").replace(".class", "")); } } return FileVisitResult.CONTINUE; diff --git a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/LazyFileOutputStream.java b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/LazyFileOutputStream.java index d3101868e84b6..e0da9afc8ab12 100644 --- a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/LazyFileOutputStream.java +++ b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/LazyFileOutputStream.java @@ -37,11 +37,13 @@ private void bootstrap() throws IOException { file.getParentFile().mkdirs(); delegate = new FileOutputStream(file); } + @Override public void write(int b) throws IOException { bootstrap(); delegate.write(b); } + @Override public void write(byte b[], int off, int len) throws IOException { bootstrap(); diff --git a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/LoggedExec.java index aff3faf10e0cf..233d3eab7c0fb 100644 --- a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/LoggedExec.java +++ b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/LoggedExec.java @@ -103,9 +103,9 @@ public static ExecResult javaexec(Project project, Action action) private static final Pattern NEWLINE = Pattern.compile(System.lineSeparator()); - private static ExecResult genericExec( + private static ExecResult genericExec( Project project, - Function,ExecResult> function, + Function, ExecResult> function, Action action ) { if (project.getLogger().isInfoEnabled()) { diff --git a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/Version.java b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/Version.java index d31e15b842b2c..80dd4c622b598 100644 --- a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/Version.java +++ b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/Version.java @@ -29,11 +29,9 @@ public enum Mode { RELAXED } - private static final Pattern pattern = - Pattern.compile("(\\d)+\\.(\\d+)\\.(\\d+)(-alpha\\d+|-beta\\d+|-rc\\d+)?(-SNAPSHOT)?"); + private static final Pattern pattern = Pattern.compile("(\\d)+\\.(\\d+)\\.(\\d+)(-alpha\\d+|-beta\\d+|-rc\\d+)?(-SNAPSHOT)?"); - private static final Pattern relaxedPattern = - Pattern.compile("(\\d)+\\.(\\d+)\\.(\\d+)(-[a-zA-Z0-9_]+)*?"); + private static final Pattern relaxedPattern = Pattern.compile("(\\d)+\\.(\\d+)\\.(\\d+)(-[a-zA-Z0-9_]+)*?"); public Version(int major, int minor, int revision) { Objects.requireNonNull(major, "major version can't be null"); @@ -65,16 +63,10 @@ public static Version fromString(final String s, final Mode mode) { String expected = mode == Mode.STRICT == true ? "major.minor.revision[-(alpha|beta|rc)Number][-SNAPSHOT]" : "major.minor.revision[-extra]"; - throw new IllegalArgumentException( - "Invalid version format: '" + s + "'. Should be " + expected - ); + throw new IllegalArgumentException("Invalid version format: '" + s + "'. Should be " + expected); } - return new Version( - Integer.parseInt(matcher.group(1)), - parseSuffixNumber(matcher.group(2)), - parseSuffixNumber(matcher.group(3)) - ); + return new Version(Integer.parseInt(matcher.group(1)), parseSuffixNumber(matcher.group(2)), parseSuffixNumber(matcher.group(3))); } @Override @@ -116,12 +108,14 @@ public boolean after(String compareTo) { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } Version version = (Version) o; - return major == version.major && - minor == version.minor && - revision == version.revision; + return major == version.major && minor == version.minor && revision == version.revision; } @Override diff --git a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/VersionProperties.java b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/VersionProperties.java index e4b75c94aa5f2..4509695fb829b 100644 --- a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/VersionProperties.java +++ b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/VersionProperties.java @@ -15,7 +15,7 @@ public static String getElasticsearch() { return elasticsearch; } - public static Version getElasticsearchVersion() { + public static Version getElasticsearchVersion() { return Version.fromString(elasticsearch); } diff --git a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/info/BuildParams.java b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/info/BuildParams.java index 19748de1b96f1..0c00d99c120d3 100644 --- a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/info/BuildParams.java +++ b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/info/BuildParams.java @@ -124,8 +124,8 @@ private static T value(T object) { String message = "Build parameter '" + propertyName(callingMethod) + "' has not been initialized. "; if (executionTime) { - message += "This property is initialized at execution time, " + - "please ensure you are not attempting to access it during project configuration."; + message += "This property is initialized at execution time, " + + "please ensure you are not attempting to access it during project configuration."; } else { message += "Perhaps the plugin responsible for initializing this property has not been applied."; } @@ -144,24 +144,22 @@ private static String propertyName(String methodName) { public static class MutableBuildParams { private static MutableBuildParams INSTANCE = new MutableBuildParams(); - private MutableBuildParams() { } + private MutableBuildParams() {} /** * Resets any existing values from previous initializations. */ public void reset() { - Arrays.stream(BuildParams.class.getDeclaredFields()) - .filter(f -> Modifier.isStatic(f.getModifiers())) - .forEach(f -> { - try { - // Since we are mutating private static fields from a public static inner class we need to suppress - // accessibility controls here. - f.setAccessible(true); - f.set(null, null); - } catch (IllegalAccessException e) { - throw new RuntimeException(e); - } - }); + Arrays.stream(BuildParams.class.getDeclaredFields()).filter(f -> Modifier.isStatic(f.getModifiers())).forEach(f -> { + try { + // Since we are mutating private static fields from a public static inner class we need to suppress + // accessibility controls here. + f.setAccessible(true); + f.set(null, null); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + }); } public void setCompilerJavaHome(File compilerJavaHome) { @@ -233,8 +231,9 @@ public void setDefaultParallel(int defaultParallel) { * Indicates that a build parameter is initialized at task execution time and is not available at project configuration time. * Attempts to read an uninitialized parameter wil result in an {@link IllegalStateException}. */ - @Target({ElementType.METHOD, ElementType.FIELD}) + @Target({ ElementType.METHOD, ElementType.FIELD }) @Retention(RetentionPolicy.RUNTIME) @Documented - public @interface ExecutionTime {} + public @interface ExecutionTime { + } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java index c1c8a8b5e52e8..c69a23021f535 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java @@ -42,17 +42,13 @@ public class BuildPluginIT extends GradleIntegrationTestCase { public TemporaryFolder tmpDir = new TemporaryFolder(); public void testPluginCanBeApplied() { - BuildResult result = getGradleRunner("elasticsearch.build") - .withArguments("hello", "-s") - .build(); + BuildResult result = getGradleRunner("elasticsearch.build").withArguments("hello", "-s").build(); assertTaskSuccessful(result, ":hello"); assertOutputContains("build plugin can be applied"); } public void testCheckTask() { - BuildResult result = getGradleRunner("elasticsearch.build") - .withArguments("check", "assemble", "-s") - .build(); + BuildResult result = getGradleRunner("elasticsearch.build").withArguments("check", "assemble", "-s").build(); assertTaskSuccessful(result, ":check"); } @@ -64,9 +60,10 @@ public void testInsecureMavenRepository() throws IOException { "repositories {", " maven {", " name \"elastic-maven\"", - " url \"" + url + "\"\n", + " url \"" + url + "\"\n", " }", - "}"); + "}" + ); runInsecureArtifactRepositoryTest(name, url, lines); } @@ -78,17 +75,17 @@ public void testInsecureIvyRepository() throws IOException { "repositories {", " ivy {", " name \"elastic-ivy\"", - " url \"" + url + "\"\n", + " url \"" + url + "\"\n", " }", - "}"); + "}" + ); runInsecureArtifactRepositoryTest(name, url, lines); } private void runInsecureArtifactRepositoryTest(final String name, final String url, final List lines) throws IOException { final File projectDir = getProjectDir("elasticsearch.build"); FileUtils.copyDirectory(projectDir, tmpDir.getRoot(), pathname -> pathname.getPath().contains("/build/") == false); - final List buildGradleLines = - Files.readAllLines(tmpDir.getRoot().toPath().resolve("build.gradle"), StandardCharsets.UTF_8); + final List buildGradleLines = Files.readAllLines(tmpDir.getRoot().toPath().resolve("build.gradle"), StandardCharsets.UTF_8); buildGradleLines.addAll(lines); Files.write(tmpDir.getRoot().toPath().resolve("build.gradle"), buildGradleLines, StandardCharsets.UTF_8); final BuildResult result = GradleRunner.create() @@ -98,34 +95,27 @@ private void runInsecureArtifactRepositoryTest(final String name, final String u .buildAndFail(); assertOutputContains( result.getOutput(), - "repository [" + name + "] on project with path [:] is not using a secure protocol for artifacts on [" + url + "]"); + "repository [" + name + "] on project with path [:] is not using a secure protocol for artifacts on [" + url + "]" + ); } public void testLicenseAndNotice() throws IOException { - BuildResult result = getGradleRunner("elasticsearch.build") - .withArguments("clean", "assemble") - .build(); + BuildResult result = getGradleRunner("elasticsearch.build").withArguments("clean", "assemble").build(); assertTaskSuccessful(result, ":assemble"); assertBuildFileExists(result, "elasticsearch.build", "distributions/elasticsearch.build.jar"); - try (ZipFile zipFile = new ZipFile(new File( - getBuildDir("elasticsearch.build"), "distributions/elasticsearch.build.jar" - ))) { + try (ZipFile zipFile = new ZipFile(new File(getBuildDir("elasticsearch.build"), "distributions/elasticsearch.build.jar"))) { ZipEntry licenseEntry = zipFile.getEntry("META-INF/LICENSE.txt"); ZipEntry noticeEntry = zipFile.getEntry("META-INF/NOTICE.txt"); assertNotNull("Jar does not have META-INF/LICENSE.txt", licenseEntry); assertNotNull("Jar does not have META-INF/NOTICE.txt", noticeEntry); - try ( - InputStream license = zipFile.getInputStream(licenseEntry); - InputStream notice = zipFile.getInputStream(noticeEntry) - ) { + try (InputStream license = zipFile.getInputStream(licenseEntry); InputStream notice = zipFile.getInputStream(noticeEntry)) { assertEquals("this is a test license file", IOUtils.toString(license, StandardCharsets.UTF_8.name())); assertEquals("this is a test notice file", IOUtils.toString(notice, StandardCharsets.UTF_8.name())); } } } - } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginTests.java index 8d6d0be00dbe8..4389af00c0304 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginTests.java @@ -25,7 +25,6 @@ import java.net.URI; import java.net.URISyntaxException; - public class BuildPluginTests extends GradleUnitTestCase { @Test(expected = GradleException.class) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BwcVersionsTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BwcVersionsTests.java index 27e84fcf85404..dd6ee426ee78b 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BwcVersionsTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BwcVersionsTests.java @@ -42,48 +42,252 @@ public class BwcVersionsTests extends GradleUnitTestCase { static { // unreleased major and two unreleased minors ( minor in feature freeze ) - sampleVersions.put("8.0.0", asList( - "7_0_0", "7_0_1", "7_1_0", "7_1_1", "7_2_0", "7_3_0", "8.0.0" - )); - sampleVersions.put("7.0.0-alpha1", asList( - "6_0_0_alpha1", "6_0_0_alpha2", "6_0_0_beta1", "6_0_0_beta2", "6_0_0_rc1", "6_0_0_rc2", - "6_0_0", "6_0_1", "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", - "6_2_0", "6_2_1", "6_2_2", "6_2_3", "6_2_4", - "6_3_0", "6_3_1", "6_3_2", - "6_4_0", "6_4_1", "6_4_2", - "6_5_0", "7_0_0_alpha1" - )); - sampleVersions.put("6.5.0", asList( - "5_0_0_alpha1", "5_0_0_alpha2", "5_0_0_alpha3", "5_0_0_alpha4", "5_0_0_alpha5", "5_0_0_beta1", "5_0_0_rc1", - "5_0_0", "5_0_1", "5_0_2", "5_1_1", "5_1_2", "5_2_0", "5_2_1", "5_2_2", "5_3_0", "5_3_1", "5_3_2", "5_3_3", - "5_4_0", "5_4_1", "5_4_2", "5_4_3", "5_5_0", "5_5_1", "5_5_2", "5_5_3", "5_6_0", "5_6_1", "5_6_2", "5_6_3", - "5_6_4", "5_6_5", "5_6_6", "5_6_7", "5_6_8", "5_6_9", "5_6_10", "5_6_11", "5_6_12", "5_6_13", - "6_0_0_alpha1", "6_0_0_alpha2", "6_0_0_beta1", "6_0_0_beta2", "6_0_0_rc1", "6_0_0_rc2", "6_0_0", "6_0_1", - "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", "6_2_0", "6_2_1", "6_2_2", "6_2_3", "6_2_4", "6_3_0", "6_3_1", - "6_3_2", "6_4_0", "6_4_1", "6_4_2", "6_5_0" - )); - sampleVersions.put("6.6.0", asList( - "5_0_0_alpha1", "5_0_0_alpha2", "5_0_0_alpha3", "5_0_0_alpha4", "5_0_0_alpha5", "5_0_0_beta1", "5_0_0_rc1", - "5_0_0", "5_0_1", "5_0_2", "5_1_1", "5_1_2", "5_2_0", "5_2_1", "5_2_2", "5_3_0", "5_3_1", "5_3_2", "5_3_3", - "5_4_0", "5_4_1", "5_4_2", "5_4_3", "5_5_0", "5_5_1", "5_5_2", "5_5_3", "5_6_0", "5_6_1", "5_6_2", "5_6_3", - "5_6_4", "5_6_5", "5_6_6", "5_6_7", "5_6_8", "5_6_9", "5_6_10", "5_6_11", "5_6_12", "5_6_13", - "6_0_0_alpha1", "6_0_0_alpha2", "6_0_0_beta1", "6_0_0_beta2", "6_0_0_rc1", "6_0_0_rc2", "6_0_0", "6_0_1", - "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", "6_2_0", "6_2_1", "6_2_2", "6_2_3", "6_2_4", "6_3_0", "6_3_1", - "6_3_2", "6_4_0", "6_4_1", "6_4_2", "6_5_0", "6_6_0" - )); - sampleVersions.put("6.4.2", asList( - "5_0_0_alpha1", "5_0_0_alpha2", "5_0_0_alpha3", "5_0_0_alpha4", "5_0_0_alpha5", "5_0_0_beta1", "5_0_0_rc1", - "5_0_0", "5_0_1", "5_0_2", "5_1_1", "5_1_2", "5_2_0", "5_2_1", "5_2_2", "5_3_0", - "5_3_1", "5_3_2", "5_3_3", "5_4_0", "5_4_1", "5_4_2", "5_4_3", "5_5_0", "5_5_1", "5_5_2", "5_5_3", - "5_6_0", "5_6_1", "5_6_2", "5_6_3", "5_6_4", "5_6_5", "5_6_6", "5_6_7", "5_6_8", "5_6_9", "5_6_10", - "5_6_11", "5_6_12", "5_6_13", - "6_0_0_alpha1", "6_0_0_alpha2", "6_0_0_beta1", "6_0_0_beta2", "6_0_0_rc1", "6_0_0_rc2", - "6_0_0", "6_0_1", "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", "6_2_0", "6_2_1", "6_2_2", "6_2_3", - "6_2_4", "6_3_0", "6_3_1", "6_3_2", "6_4_0", "6_4_1", "6_4_2" - )); - sampleVersions.put("7.1.0", asList( - "7_1_0", "7_0_0", "6_7_0", "6_6_1", "6_6_0" - )); + sampleVersions.put("8.0.0", asList("7_0_0", "7_0_1", "7_1_0", "7_1_1", "7_2_0", "7_3_0", "8.0.0")); + sampleVersions.put( + "7.0.0-alpha1", + asList( + "6_0_0_alpha1", + "6_0_0_alpha2", + "6_0_0_beta1", + "6_0_0_beta2", + "6_0_0_rc1", + "6_0_0_rc2", + "6_0_0", + "6_0_1", + "6_1_0", + "6_1_1", + "6_1_2", + "6_1_3", + "6_1_4", + "6_2_0", + "6_2_1", + "6_2_2", + "6_2_3", + "6_2_4", + "6_3_0", + "6_3_1", + "6_3_2", + "6_4_0", + "6_4_1", + "6_4_2", + "6_5_0", + "7_0_0_alpha1" + ) + ); + sampleVersions.put( + "6.5.0", + asList( + "5_0_0_alpha1", + "5_0_0_alpha2", + "5_0_0_alpha3", + "5_0_0_alpha4", + "5_0_0_alpha5", + "5_0_0_beta1", + "5_0_0_rc1", + "5_0_0", + "5_0_1", + "5_0_2", + "5_1_1", + "5_1_2", + "5_2_0", + "5_2_1", + "5_2_2", + "5_3_0", + "5_3_1", + "5_3_2", + "5_3_3", + "5_4_0", + "5_4_1", + "5_4_2", + "5_4_3", + "5_5_0", + "5_5_1", + "5_5_2", + "5_5_3", + "5_6_0", + "5_6_1", + "5_6_2", + "5_6_3", + "5_6_4", + "5_6_5", + "5_6_6", + "5_6_7", + "5_6_8", + "5_6_9", + "5_6_10", + "5_6_11", + "5_6_12", + "5_6_13", + "6_0_0_alpha1", + "6_0_0_alpha2", + "6_0_0_beta1", + "6_0_0_beta2", + "6_0_0_rc1", + "6_0_0_rc2", + "6_0_0", + "6_0_1", + "6_1_0", + "6_1_1", + "6_1_2", + "6_1_3", + "6_1_4", + "6_2_0", + "6_2_1", + "6_2_2", + "6_2_3", + "6_2_4", + "6_3_0", + "6_3_1", + "6_3_2", + "6_4_0", + "6_4_1", + "6_4_2", + "6_5_0" + ) + ); + sampleVersions.put( + "6.6.0", + asList( + "5_0_0_alpha1", + "5_0_0_alpha2", + "5_0_0_alpha3", + "5_0_0_alpha4", + "5_0_0_alpha5", + "5_0_0_beta1", + "5_0_0_rc1", + "5_0_0", + "5_0_1", + "5_0_2", + "5_1_1", + "5_1_2", + "5_2_0", + "5_2_1", + "5_2_2", + "5_3_0", + "5_3_1", + "5_3_2", + "5_3_3", + "5_4_0", + "5_4_1", + "5_4_2", + "5_4_3", + "5_5_0", + "5_5_1", + "5_5_2", + "5_5_3", + "5_6_0", + "5_6_1", + "5_6_2", + "5_6_3", + "5_6_4", + "5_6_5", + "5_6_6", + "5_6_7", + "5_6_8", + "5_6_9", + "5_6_10", + "5_6_11", + "5_6_12", + "5_6_13", + "6_0_0_alpha1", + "6_0_0_alpha2", + "6_0_0_beta1", + "6_0_0_beta2", + "6_0_0_rc1", + "6_0_0_rc2", + "6_0_0", + "6_0_1", + "6_1_0", + "6_1_1", + "6_1_2", + "6_1_3", + "6_1_4", + "6_2_0", + "6_2_1", + "6_2_2", + "6_2_3", + "6_2_4", + "6_3_0", + "6_3_1", + "6_3_2", + "6_4_0", + "6_4_1", + "6_4_2", + "6_5_0", + "6_6_0" + ) + ); + sampleVersions.put( + "6.4.2", + asList( + "5_0_0_alpha1", + "5_0_0_alpha2", + "5_0_0_alpha3", + "5_0_0_alpha4", + "5_0_0_alpha5", + "5_0_0_beta1", + "5_0_0_rc1", + "5_0_0", + "5_0_1", + "5_0_2", + "5_1_1", + "5_1_2", + "5_2_0", + "5_2_1", + "5_2_2", + "5_3_0", + "5_3_1", + "5_3_2", + "5_3_3", + "5_4_0", + "5_4_1", + "5_4_2", + "5_4_3", + "5_5_0", + "5_5_1", + "5_5_2", + "5_5_3", + "5_6_0", + "5_6_1", + "5_6_2", + "5_6_3", + "5_6_4", + "5_6_5", + "5_6_6", + "5_6_7", + "5_6_8", + "5_6_9", + "5_6_10", + "5_6_11", + "5_6_12", + "5_6_13", + "6_0_0_alpha1", + "6_0_0_alpha2", + "6_0_0_beta1", + "6_0_0_beta2", + "6_0_0_rc1", + "6_0_0_rc2", + "6_0_0", + "6_0_1", + "6_1_0", + "6_1_1", + "6_1_2", + "6_1_3", + "6_1_4", + "6_2_0", + "6_2_1", + "6_2_2", + "6_2_3", + "6_2_4", + "6_3_0", + "6_3_1", + "6_3_2", + "6_4_0", + "6_4_1", + "6_4_2" + ) + ); + sampleVersions.put("7.1.0", asList("7_1_0", "7_0_0", "6_7_0", "6_6_1", "6_6_0")); } @Test(expected = IllegalArgumentException.class) @@ -99,131 +303,347 @@ public void testExceptionOnNonCurrent() { @Test(expected = IllegalStateException.class) public void testExceptionOnTooManyMajors() { new BwcVersions( - asList( - formatVersionToLine("5.6.12"), - formatVersionToLine("6.5.0"), - formatVersionToLine("7.0.0") - ), + asList(formatVersionToLine("5.6.12"), formatVersionToLine("6.5.0"), formatVersionToLine("7.0.0")), Version.fromString("6.5.0") ); } public void testWireCompatible() { - assertVersionsEquals( - asList("6.5.0", "7.0.0"), - getVersionCollection("7.0.0-alpha1").getWireCompatible() - ); + assertVersionsEquals(asList("6.5.0", "7.0.0"), getVersionCollection("7.0.0-alpha1").getWireCompatible()); assertVersionsEquals( asList( - "5.6.0", "5.6.1", "5.6.2", "5.6.3", "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", - "5.6.11", "5.6.12", "5.6.13", - "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", - "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", - "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2", "6.5.0" + "5.6.0", + "5.6.1", + "5.6.2", + "5.6.3", + "5.6.4", + "5.6.5", + "5.6.6", + "5.6.7", + "5.6.8", + "5.6.9", + "5.6.10", + "5.6.11", + "5.6.12", + "5.6.13", + "6.0.0", + "6.0.1", + "6.1.0", + "6.1.1", + "6.1.2", + "6.1.3", + "6.1.4", + "6.2.0", + "6.2.1", + "6.2.2", + "6.2.3", + "6.2.4", + "6.3.0", + "6.3.1", + "6.3.2", + "6.4.0", + "6.4.1", + "6.4.2", + "6.5.0" ), getVersionCollection("6.5.0").getWireCompatible() ); assertVersionsEquals( asList( - "5.6.0", "5.6.1", "5.6.2", "5.6.3", "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", - "5.6.11", "5.6.12", "5.6.13", "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", - "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2" + "5.6.0", + "5.6.1", + "5.6.2", + "5.6.3", + "5.6.4", + "5.6.5", + "5.6.6", + "5.6.7", + "5.6.8", + "5.6.9", + "5.6.10", + "5.6.11", + "5.6.12", + "5.6.13", + "6.0.0", + "6.0.1", + "6.1.0", + "6.1.1", + "6.1.2", + "6.1.3", + "6.1.4", + "6.2.0", + "6.2.1", + "6.2.2", + "6.2.3", + "6.2.4", + "6.3.0", + "6.3.1", + "6.3.2", + "6.4.0", + "6.4.1", + "6.4.2" ), getVersionCollection("6.4.2").getWireCompatible() ); assertVersionsEquals( asList( - "5.6.0", "5.6.1", "5.6.2", "5.6.3", "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", - "5.6.11", "5.6.12", "5.6.13", - "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", - "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", - "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2", "6.5.0", "6.6.0" + "5.6.0", + "5.6.1", + "5.6.2", + "5.6.3", + "5.6.4", + "5.6.5", + "5.6.6", + "5.6.7", + "5.6.8", + "5.6.9", + "5.6.10", + "5.6.11", + "5.6.12", + "5.6.13", + "6.0.0", + "6.0.1", + "6.1.0", + "6.1.1", + "6.1.2", + "6.1.3", + "6.1.4", + "6.2.0", + "6.2.1", + "6.2.2", + "6.2.3", + "6.2.4", + "6.3.0", + "6.3.1", + "6.3.2", + "6.4.0", + "6.4.1", + "6.4.2", + "6.5.0", + "6.6.0" ), getVersionCollection("6.6.0").getWireCompatible() ); - assertVersionsEquals( - asList("7.3.0", "8.0.0"), - getVersionCollection("8.0.0").getWireCompatible() - ); - assertVersionsEquals( - asList("6.7.0", "7.0.0", "7.1.0"), - getVersionCollection("7.1.0").getWireCompatible() - ); + assertVersionsEquals(asList("7.3.0", "8.0.0"), getVersionCollection("8.0.0").getWireCompatible()); + assertVersionsEquals(asList("6.7.0", "7.0.0", "7.1.0"), getVersionCollection("7.1.0").getWireCompatible()); } public void testWireCompatibleUnreleased() { - assertVersionsEquals( - asList("6.5.0", "7.0.0"), - getVersionCollection("7.0.0-alpha1").getUnreleasedWireCompatible() - ); - assertVersionsEquals( - asList("5.6.13", "6.4.2", "6.5.0"), - getVersionCollection("6.5.0").getUnreleasedWireCompatible() - ); + assertVersionsEquals(asList("6.5.0", "7.0.0"), getVersionCollection("7.0.0-alpha1").getUnreleasedWireCompatible()); + assertVersionsEquals(asList("5.6.13", "6.4.2", "6.5.0"), getVersionCollection("6.5.0").getUnreleasedWireCompatible()); - assertVersionsEquals( - asList("5.6.13", "6.4.2"), - getVersionCollection("6.4.2").getUnreleasedWireCompatible() - ); + assertVersionsEquals(asList("5.6.13", "6.4.2"), getVersionCollection("6.4.2").getUnreleasedWireCompatible()); - assertVersionsEquals( - asList("5.6.13", "6.4.2", "6.5.0", "6.6.0"), - getVersionCollection("6.6.0").getUnreleasedWireCompatible() - ); + assertVersionsEquals(asList("5.6.13", "6.4.2", "6.5.0", "6.6.0"), getVersionCollection("6.6.0").getUnreleasedWireCompatible()); - assertVersionsEquals( - asList("7.3.0", "8.0.0"), - getVersionCollection("8.0.0").getUnreleasedWireCompatible() - ); - assertVersionsEquals( - asList("6.7.0", "7.0.0", "7.1.0"), - getVersionCollection("7.1.0").getWireCompatible() - ); + assertVersionsEquals(asList("7.3.0", "8.0.0"), getVersionCollection("8.0.0").getUnreleasedWireCompatible()); + assertVersionsEquals(asList("6.7.0", "7.0.0", "7.1.0"), getVersionCollection("7.1.0").getWireCompatible()); } public void testIndexCompatible() { assertVersionsEquals( asList( - "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", - "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", "6.3.0", "6.3.1", - "6.3.2", "6.4.0", "6.4.1", "6.4.2", "6.5.0", "7.0.0" + "6.0.0", + "6.0.1", + "6.1.0", + "6.1.1", + "6.1.2", + "6.1.3", + "6.1.4", + "6.2.0", + "6.2.1", + "6.2.2", + "6.2.3", + "6.2.4", + "6.3.0", + "6.3.1", + "6.3.2", + "6.4.0", + "6.4.1", + "6.4.2", + "6.5.0", + "7.0.0" ), getVersionCollection("7.0.0-alpha1").getIndexCompatible() ); assertVersionsEquals( asList( - "5.0.0", "5.0.1", "5.0.2", "5.1.1", "5.1.2", "5.2.0", "5.2.1", "5.2.2", "5.3.0", "5.3.1", "5.3.2", "5.3.3", - "5.4.0", "5.4.1", "5.4.2", "5.4.3", "5.5.0", "5.5.1", "5.5.2", "5.5.3", "5.6.0", "5.6.1", "5.6.2", "5.6.3", - "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", "5.6.11", "5.6.12", "5.6.13", - "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", - "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2", "6.5.0" + "5.0.0", + "5.0.1", + "5.0.2", + "5.1.1", + "5.1.2", + "5.2.0", + "5.2.1", + "5.2.2", + "5.3.0", + "5.3.1", + "5.3.2", + "5.3.3", + "5.4.0", + "5.4.1", + "5.4.2", + "5.4.3", + "5.5.0", + "5.5.1", + "5.5.2", + "5.5.3", + "5.6.0", + "5.6.1", + "5.6.2", + "5.6.3", + "5.6.4", + "5.6.5", + "5.6.6", + "5.6.7", + "5.6.8", + "5.6.9", + "5.6.10", + "5.6.11", + "5.6.12", + "5.6.13", + "6.0.0", + "6.0.1", + "6.1.0", + "6.1.1", + "6.1.2", + "6.1.3", + "6.1.4", + "6.2.0", + "6.2.1", + "6.2.2", + "6.2.3", + "6.2.4", + "6.3.0", + "6.3.1", + "6.3.2", + "6.4.0", + "6.4.1", + "6.4.2", + "6.5.0" ), getVersionCollection("6.5.0").getIndexCompatible() ); assertVersionsEquals( asList( - "5.0.0", "5.0.1", "5.0.2", "5.1.1", "5.1.2", "5.2.0", "5.2.1", "5.2.2", "5.3.0", "5.3.1", "5.3.2", "5.3.3", - "5.4.0", "5.4.1", "5.4.2", "5.4.3", "5.5.0", "5.5.1", "5.5.2", "5.5.3", "5.6.0", "5.6.1", "5.6.2", "5.6.3", - "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", "5.6.11", "5.6.12", "5.6.13", - "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", - "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2" + "5.0.0", + "5.0.1", + "5.0.2", + "5.1.1", + "5.1.2", + "5.2.0", + "5.2.1", + "5.2.2", + "5.3.0", + "5.3.1", + "5.3.2", + "5.3.3", + "5.4.0", + "5.4.1", + "5.4.2", + "5.4.3", + "5.5.0", + "5.5.1", + "5.5.2", + "5.5.3", + "5.6.0", + "5.6.1", + "5.6.2", + "5.6.3", + "5.6.4", + "5.6.5", + "5.6.6", + "5.6.7", + "5.6.8", + "5.6.9", + "5.6.10", + "5.6.11", + "5.6.12", + "5.6.13", + "6.0.0", + "6.0.1", + "6.1.0", + "6.1.1", + "6.1.2", + "6.1.3", + "6.1.4", + "6.2.0", + "6.2.1", + "6.2.2", + "6.2.3", + "6.2.4", + "6.3.0", + "6.3.1", + "6.3.2", + "6.4.0", + "6.4.1", + "6.4.2" ), getVersionCollection("6.4.2").getIndexCompatible() ); assertVersionsEquals( asList( - "5.0.0", "5.0.1", "5.0.2", "5.1.1", "5.1.2", "5.2.0", "5.2.1", "5.2.2", "5.3.0", "5.3.1", "5.3.2", "5.3.3", - "5.4.0", "5.4.1", "5.4.2", "5.4.3", "5.5.0", "5.5.1", "5.5.2", "5.5.3", "5.6.0", "5.6.1", "5.6.2", "5.6.3", - "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", "5.6.11", "5.6.12", "5.6.13", - "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", - "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2", "6.5.0", "6.6.0" + "5.0.0", + "5.0.1", + "5.0.2", + "5.1.1", + "5.1.2", + "5.2.0", + "5.2.1", + "5.2.2", + "5.3.0", + "5.3.1", + "5.3.2", + "5.3.3", + "5.4.0", + "5.4.1", + "5.4.2", + "5.4.3", + "5.5.0", + "5.5.1", + "5.5.2", + "5.5.3", + "5.6.0", + "5.6.1", + "5.6.2", + "5.6.3", + "5.6.4", + "5.6.5", + "5.6.6", + "5.6.7", + "5.6.8", + "5.6.9", + "5.6.10", + "5.6.11", + "5.6.12", + "5.6.13", + "6.0.0", + "6.0.1", + "6.1.0", + "6.1.1", + "6.1.2", + "6.1.3", + "6.1.4", + "6.2.0", + "6.2.1", + "6.2.2", + "6.2.3", + "6.2.4", + "6.3.0", + "6.3.1", + "6.3.2", + "6.4.0", + "6.4.1", + "6.4.2", + "6.5.0", + "6.6.0" ), getVersionCollection("6.6.0").getIndexCompatible() ); @@ -235,76 +655,31 @@ public void testIndexCompatible() { } public void testIndexCompatibleUnreleased() { - assertVersionsEquals( - asList("6.4.2", "6.5.0", "7.0.0"), - getVersionCollection("7.0.0-alpha1").getUnreleasedIndexCompatible() - ); + assertVersionsEquals(asList("6.4.2", "6.5.0", "7.0.0"), getVersionCollection("7.0.0-alpha1").getUnreleasedIndexCompatible()); - assertVersionsEquals( - asList("5.6.13", "6.4.2", "6.5.0"), - getVersionCollection("6.5.0").getUnreleasedIndexCompatible() - ); + assertVersionsEquals(asList("5.6.13", "6.4.2", "6.5.0"), getVersionCollection("6.5.0").getUnreleasedIndexCompatible()); - assertVersionsEquals( - asList("5.6.13", "6.4.2"), - getVersionCollection("6.4.2").getUnreleasedIndexCompatible() - ); + assertVersionsEquals(asList("5.6.13", "6.4.2"), getVersionCollection("6.4.2").getUnreleasedIndexCompatible()); - assertVersionsEquals( - asList("5.6.13", "6.4.2", "6.5.0", "6.6.0"), - getVersionCollection("6.6.0").getUnreleasedIndexCompatible() - ); + assertVersionsEquals(asList("5.6.13", "6.4.2", "6.5.0", "6.6.0"), getVersionCollection("6.6.0").getUnreleasedIndexCompatible()); - assertVersionsEquals( - asList("7.1.1", "7.2.0", "7.3.0", "8.0.0"), - getVersionCollection("8.0.0").getUnreleasedIndexCompatible() - ); + assertVersionsEquals(asList("7.1.1", "7.2.0", "7.3.0", "8.0.0"), getVersionCollection("8.0.0").getUnreleasedIndexCompatible()); } public void testGetUnreleased() { - assertVersionsEquals( - asList("6.4.2", "6.5.0", "7.0.0-alpha1"), - getVersionCollection("7.0.0-alpha1").getUnreleased() - ); - assertVersionsEquals( - asList("5.6.13", "6.4.2", "6.5.0"), - getVersionCollection("6.5.0").getUnreleased() - ); - assertVersionsEquals( - asList("5.6.13", "6.4.2"), - getVersionCollection("6.4.2").getUnreleased() - ); - assertVersionsEquals( - asList("5.6.13", "6.4.2", "6.5.0", "6.6.0"), - getVersionCollection("6.6.0").getUnreleased() - ); - assertVersionsEquals( - asList("7.1.1", "7.2.0", "7.3.0", "8.0.0"), - getVersionCollection("8.0.0").getUnreleased() - ); + assertVersionsEquals(asList("6.4.2", "6.5.0", "7.0.0-alpha1"), getVersionCollection("7.0.0-alpha1").getUnreleased()); + assertVersionsEquals(asList("5.6.13", "6.4.2", "6.5.0"), getVersionCollection("6.5.0").getUnreleased()); + assertVersionsEquals(asList("5.6.13", "6.4.2"), getVersionCollection("6.4.2").getUnreleased()); + assertVersionsEquals(asList("5.6.13", "6.4.2", "6.5.0", "6.6.0"), getVersionCollection("6.6.0").getUnreleased()); + assertVersionsEquals(asList("7.1.1", "7.2.0", "7.3.0", "8.0.0"), getVersionCollection("8.0.0").getUnreleased()); } public void testGetBranch() { - assertUnreleasedBranchNames( - asList("6.4", "6.x"), - getVersionCollection("7.0.0-alpha1") - ); - assertUnreleasedBranchNames( - asList("5.6", "6.4"), - getVersionCollection("6.5.0") - ); - assertUnreleasedBranchNames( - singletonList("5.6"), - getVersionCollection("6.4.2") - ); - assertUnreleasedBranchNames( - asList("5.6", "6.4", "6.5"), - getVersionCollection("6.6.0") - ); - assertUnreleasedBranchNames( - asList("7.1", "7.2", "7.x"), - getVersionCollection("8.0.0") - ); + assertUnreleasedBranchNames(asList("6.4", "6.x"), getVersionCollection("7.0.0-alpha1")); + assertUnreleasedBranchNames(asList("5.6", "6.4"), getVersionCollection("6.5.0")); + assertUnreleasedBranchNames(singletonList("5.6"), getVersionCollection("6.4.2")); + assertUnreleasedBranchNames(asList("5.6", "6.4", "6.5"), getVersionCollection("6.6.0")); + assertUnreleasedBranchNames(asList("7.1", "7.2", "7.x"), getVersionCollection("8.0.0")); } public void testGetGradleProjectPath() { @@ -316,10 +691,7 @@ public void testGetGradleProjectPath() { asList(":distribution:bwc:maintenance", ":distribution:bwc:bugfix"), getVersionCollection("6.5.0") ); - assertUnreleasedGradleProjectPaths( - singletonList(":distribution:bwc:maintenance"), - getVersionCollection("6.4.2") - ); + assertUnreleasedGradleProjectPaths(singletonList(":distribution:bwc:maintenance"), getVersionCollection("6.4.2")); assertUnreleasedGradleProjectPaths( asList(":distribution:bwc:maintenance", ":distribution:bwc:bugfix", ":distribution:bwc:minor"), getVersionCollection("6.6.0") @@ -337,13 +709,11 @@ public void testGetGradleProjectPath() { public void testCompareToAuthoritative() { List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1", "7.1.0") - .map(Version::fromString) - .collect(Collectors.toList()); + .map(Version::fromString) + .collect(Collectors.toList()); BwcVersions vc = new BwcVersions( - listOfVersions.stream() - .map(this::formatVersionToLine) - .collect(Collectors.toList()), + listOfVersions.stream().map(this::formatVersionToLine).collect(Collectors.toList()), Version.fromString("8.0.0") ); vc.compareToAuthoritative(authoritativeReleasedVersions); @@ -352,13 +722,11 @@ public void testCompareToAuthoritative() { public void testCompareToAuthoritativeUnreleasedActuallyReleased() { List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1", "7.1.0", "7.1.1", "8.0.0") - .map(Version::fromString) - .collect(Collectors.toList()); + .map(Version::fromString) + .collect(Collectors.toList()); BwcVersions vc = new BwcVersions( - listOfVersions.stream() - .map(this::formatVersionToLine) - .collect(Collectors.toList()), + listOfVersions.stream().map(this::formatVersionToLine).collect(Collectors.toList()), Version.fromString("8.0.0") ); expectedEx.expect(IllegalStateException.class); @@ -368,13 +736,9 @@ public void testCompareToAuthoritativeUnreleasedActuallyReleased() { public void testCompareToAuthoritativeNotReallyRelesed() { List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); - List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1") - .map(Version::fromString) - .collect(Collectors.toList()); + List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1").map(Version::fromString).collect(Collectors.toList()); BwcVersions vc = new BwcVersions( - listOfVersions.stream() - .map(this::formatVersionToLine) - .collect(Collectors.toList()), + listOfVersions.stream().map(this::formatVersionToLine).collect(Collectors.toList()), Version.fromString("8.0.0") ); expectedEx.expect(IllegalStateException.class); @@ -384,17 +748,13 @@ public void testCompareToAuthoritativeNotReallyRelesed() { private void assertUnreleasedGradleProjectPaths(List expectedNAmes, BwcVersions bwcVersions) { List actualNames = new ArrayList<>(); - bwcVersions.forPreviousUnreleased(unreleasedVersion -> - actualNames.add(unreleasedVersion.gradleProjectPath) - ); + bwcVersions.forPreviousUnreleased(unreleasedVersion -> actualNames.add(unreleasedVersion.gradleProjectPath)); assertEquals(expectedNAmes, actualNames); } private void assertUnreleasedBranchNames(List expectedBranches, BwcVersions bwcVersions) { List actualBranches = new ArrayList<>(); - bwcVersions.forPreviousUnreleased(unreleasedVersionInfo -> - actualBranches.add(unreleasedVersionInfo.branch) - ); + bwcVersions.forPreviousUnreleased(unreleasedVersionInfo -> actualBranches.add(unreleasedVersionInfo.branch)); assertEquals(expectedBranches, actualBranches); } @@ -403,19 +763,12 @@ private String formatVersionToLine(final String version) { } private void assertVersionsEquals(List expected, List actual) { - assertEquals( - expected.stream() - .map(Version::fromString) - .collect(Collectors.toList()), - actual - ); + assertEquals(expected.stream().map(Version::fromString).collect(Collectors.toList()), actual); } private BwcVersions getVersionCollection(String currentVersion) { return new BwcVersions( - sampleVersions.get(currentVersion).stream() - .map(this::formatVersionToLine) - .collect(Collectors.toList()), + sampleVersions.get(currentVersion).stream().map(this::formatVersionToLine).collect(Collectors.toList()), Version.fromString(currentVersion) ); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/ConcatFilesTaskTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/ConcatFilesTaskTests.java index 72e128b72efd7..18ee983dd7a8b 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/ConcatFilesTaskTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/ConcatFilesTaskTests.java @@ -66,17 +66,14 @@ public void testConcatenationWithUnique() throws IOException { file2.getParentFile().mkdirs(); file1.createNewFile(); file2.createNewFile(); - Files.write(file1.toPath(), ("Hello" + System.lineSeparator() + "Hello").getBytes(StandardCharsets.UTF_8)); + Files.write(file1.toPath(), ("Hello" + System.lineSeparator() + "Hello").getBytes(StandardCharsets.UTF_8)); Files.write(file2.toPath(), ("Hello" + System.lineSeparator() + "नमस्ते").getBytes(StandardCharsets.UTF_8)); concatFilesTask.setFiles(project.fileTree(file1.getParentFile().getParentFile())); concatFilesTask.concatFiles(); - assertEquals( - Arrays.asList("Hello", "नमस्ते"), - Files.readAllLines(concatFilesTask.getTarget().toPath(), StandardCharsets.UTF_8) - ); + assertEquals(Arrays.asList("Hello", "नमस्ते"), Files.readAllLines(concatFilesTask.getTarget().toPath(), StandardCharsets.UTF_8)); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginIT.java index fe2ba08853170..eb6fdd889455a 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginIT.java @@ -44,49 +44,100 @@ public class DistributionDownloadPluginIT extends GradleIntegrationTestCase { public void testCurrent() throws Exception { String projectName = ":distribution:archives:linux-tar"; - assertExtractedDistro(VersionProperties.getElasticsearch(), "archive", "linux", null, null, - "tests.local_distro.config", "default", - "tests.local_distro.project", projectName); + assertExtractedDistro( + VersionProperties.getElasticsearch(), + "archive", + "linux", + null, + null, + "tests.local_distro.config", + "default", + "tests.local_distro.project", + projectName + ); } public void testCurrentExternal() throws Exception { - checkService(VersionProperties.getElasticsearch(), "archive", "linux", null, null, + checkService( + VersionProperties.getElasticsearch(), + "archive", + "linux", + null, + null, "/downloads/elasticsearch/elasticsearch-" + VersionProperties.getElasticsearch() + "-linux-x86_64.tar.gz", - "tests.internal", "false"); + "tests.internal", + "false" + ); } public void testBwc() throws Exception { - assertExtractedDistro("8.1.0", "archive", "linux", null, null, - "tests.local_distro.config", "linux-tar", - "tests.local_distro.project", ":distribution:bwc:minor", - "tests.current_version", "8.0.0"); + assertExtractedDistro( + "8.1.0", + "archive", + "linux", + null, + null, + "tests.local_distro.config", + "linux-tar", + "tests.local_distro.project", + ":distribution:bwc:minor", + "tests.current_version", + "8.0.0" + ); } public void testBwcExternal() throws Exception { - checkService("8.1.0-SNAPSHOT", "archive", "linux", null, null, + checkService( + "8.1.0-SNAPSHOT", + "archive", + "linux", + null, + null, "/downloads/elasticsearch/elasticsearch-8.1.0-SNAPSHOT-linux-x86_64.tar.gz", - "tests.internal", "false", - "tests.current_version", "9.0.0"); + "tests.internal", + "false", + "tests.current_version", + "9.0.0" + ); } public void testReleased() throws Exception { - checkService("7.0.0", "archive", "windows", null, null, - "/downloads/elasticsearch/elasticsearch-7.0.0-windows-x86_64.zip"); - checkService("6.5.0", "archive", "windows", null, null, - "/downloads/elasticsearch/elasticsearch-6.5.0.zip"); + checkService("7.0.0", "archive", "windows", null, null, "/downloads/elasticsearch/elasticsearch-7.0.0-windows-x86_64.zip"); + checkService("6.5.0", "archive", "windows", null, null, "/downloads/elasticsearch/elasticsearch-6.5.0.zip"); } public void testReleasedExternal() throws Exception { - checkService("7.0.0", "archive", "windows", null, null, + checkService( + "7.0.0", + "archive", + "windows", + null, + null, "/downloads/elasticsearch/elasticsearch-7.0.0-windows-x86_64.zip", - "tests.internal", "false"); - checkService("6.5.0", "archive", "windows", null, null, + "tests.internal", + "false" + ); + checkService( + "6.5.0", + "archive", + "windows", + null, + null, "/downloads/elasticsearch/elasticsearch-6.5.0.zip", - "tests.internal", "false"); + "tests.internal", + "false" + ); } - private void checkService(String version, String type, String platform, String flavor, Boolean bundledJdk, - String urlPath, String... sysProps) throws IOException { + private void checkService( + String version, + String type, + String platform, + String flavor, + Boolean bundledJdk, + String urlPath, + String... sysProps + ) throws IOException { String suffix = urlPath.endsWith("zip") ? "zip" : "tar.gz"; String sourceFile = "src/testKit/distribution-download/distribution/files/fake_elasticsearch." + suffix; WireMockServer wireMock = new WireMockServer(0); @@ -113,16 +164,16 @@ private void checkService(String version, String type, String platform, String f } } - private void assertFileDistro(String version, String type, String platform, String flavor, Boolean bundledJdk, - String... sysProps) throws IOException { + private void assertFileDistro(String version, String type, String platform, String flavor, Boolean bundledJdk, String... sysProps) + throws IOException { List finalSysProps = new ArrayList<>(); addDistroSysProps(finalSysProps, version, type, platform, flavor, bundledJdk); finalSysProps.addAll(Arrays.asList(sysProps)); runBuild(":subproj:assertDistroFile", finalSysProps.toArray(new String[0])); } - private void assertExtractedDistro(String version, String type, String platform, String flavor, Boolean bundledJdk, - String... sysProps) throws IOException { + private void assertExtractedDistro(String version, String type, String platform, String flavor, Boolean bundledJdk, String... sysProps) + throws IOException { List finalSysProps = new ArrayList<>(); addDistroSysProps(finalSysProps, version, type, platform, flavor, bundledJdk); finalSysProps.addAll(Arrays.asList(sysProps)); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java index 00c9bf20a56d3..a2e7413c5d717 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java @@ -45,64 +45,138 @@ public class DistributionDownloadPluginTests extends GradleUnitTestCase { private static final Version BWC_STAGED_VERSION = Version.fromString("1.0.0"); private static final Version BWC_BUGFIX_VERSION = Version.fromString("1.0.1"); private static final Version BWC_MAINTENANCE_VERSION = Version.fromString("0.90.1"); - private static final BwcVersions BWC_MINOR = - new BwcVersions(new TreeSet<>(Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)), BWC_MAJOR_VERSION); - private static final BwcVersions BWC_STAGED = - new BwcVersions(new TreeSet<>(Arrays.asList(BWC_STAGED_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)), BWC_MAJOR_VERSION); - private static final BwcVersions BWC_BUGFIX = - new BwcVersions(new TreeSet<>(Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)), BWC_MAJOR_VERSION); - private static final BwcVersions BWC_MAINTENANCE = - new BwcVersions(new TreeSet<>(Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_STAGED_VERSION, BWC_MINOR_VERSION)), BWC_MINOR_VERSION); + private static final BwcVersions BWC_MINOR = new BwcVersions( + new TreeSet<>(Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)), + BWC_MAJOR_VERSION + ); + private static final BwcVersions BWC_STAGED = new BwcVersions( + new TreeSet<>(Arrays.asList(BWC_STAGED_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)), + BWC_MAJOR_VERSION + ); + private static final BwcVersions BWC_BUGFIX = new BwcVersions( + new TreeSet<>(Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)), + BWC_MAJOR_VERSION + ); + private static final BwcVersions BWC_MAINTENANCE = new BwcVersions( + new TreeSet<>(Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_STAGED_VERSION, BWC_MINOR_VERSION)), + BWC_MINOR_VERSION + ); public void testVersionDefault() { - ElasticsearchDistribution distro = checkDistro(createProject(null, false), - "testdistro", null, Type.ARCHIVE, Platform.LINUX, Flavor.OSS, true); + ElasticsearchDistribution distro = checkDistro( + createProject(null, false), + "testdistro", + null, + Type.ARCHIVE, + Platform.LINUX, + Flavor.OSS, + true + ); assertEquals(distro.getVersion(), VersionProperties.getElasticsearch()); } public void testBadVersionFormat() { - assertDistroError(createProject(null, false), "testdistro", "badversion", Type.ARCHIVE, Platform.LINUX, Flavor.OSS, true, - "Invalid version format: 'badversion'"); + assertDistroError( + createProject(null, false), + "testdistro", + "badversion", + Type.ARCHIVE, + Platform.LINUX, + Flavor.OSS, + true, + "Invalid version format: 'badversion'" + ); } public void testTypeDefault() { - ElasticsearchDistribution distro = checkDistro(createProject(null, false), - "testdistro", "5.0.0", null, Platform.LINUX, Flavor.OSS, true); + ElasticsearchDistribution distro = checkDistro( + createProject(null, false), + "testdistro", + "5.0.0", + null, + Platform.LINUX, + Flavor.OSS, + true + ); assertEquals(distro.getType(), Type.ARCHIVE); } public void testPlatformDefault() { - ElasticsearchDistribution distro = checkDistro(createProject(null, false), - "testdistro", "5.0.0", Type.ARCHIVE, null, Flavor.OSS, true); + ElasticsearchDistribution distro = checkDistro( + createProject(null, false), + "testdistro", + "5.0.0", + Type.ARCHIVE, + null, + Flavor.OSS, + true + ); assertEquals(distro.getPlatform(), ElasticsearchDistribution.CURRENT_PLATFORM); } public void testPlatformForIntegTest() { - assertDistroError(createProject(null, false), "testdistro", "5.0.0", Type.INTEG_TEST_ZIP, Platform.LINUX, null, null, - "platform not allowed for elasticsearch distribution [testdistro]"); + assertDistroError( + createProject(null, false), + "testdistro", + "5.0.0", + Type.INTEG_TEST_ZIP, + Platform.LINUX, + null, + null, + "platform not allowed for elasticsearch distribution [testdistro]" + ); } public void testFlavorDefault() { - ElasticsearchDistribution distro = checkDistro(createProject(null, false), - "testdistro", "5.0.0", Type.ARCHIVE, Platform.LINUX, null, true); + ElasticsearchDistribution distro = checkDistro( + createProject(null, false), + "testdistro", + "5.0.0", + Type.ARCHIVE, + Platform.LINUX, + null, + true + ); assertEquals(distro.getFlavor(), Flavor.DEFAULT); } public void testFlavorForIntegTest() { - assertDistroError(createProject(null, false), - "testdistro", "5.0.0", Type.INTEG_TEST_ZIP, null, Flavor.OSS, null, - "flavor [oss] not allowed for elasticsearch distribution [testdistro] of type [integ_test_zip]"); + assertDistroError( + createProject(null, false), + "testdistro", + "5.0.0", + Type.INTEG_TEST_ZIP, + null, + Flavor.OSS, + null, + "flavor [oss] not allowed for elasticsearch distribution [testdistro] of type [integ_test_zip]" + ); } public void testBundledJdkDefault() { - ElasticsearchDistribution distro = checkDistro(createProject(null, false), - "testdistro", "5.0.0", Type.ARCHIVE, Platform.LINUX, null, true); + ElasticsearchDistribution distro = checkDistro( + createProject(null, false), + "testdistro", + "5.0.0", + Type.ARCHIVE, + Platform.LINUX, + null, + true + ); assertTrue(distro.getBundledJdk()); } public void testBundledJdkForIntegTest() { - assertDistroError(createProject(null, false), "testdistro", "5.0.0", Type.INTEG_TEST_ZIP, null, null, true, - "bundledJdk not allowed for elasticsearch distribution [testdistro]"); + assertDistroError( + createProject(null, false), + "testdistro", + "5.0.0", + Type.INTEG_TEST_ZIP, + null, + null, + true, + "bundledJdk not allowed for elasticsearch distribution [testdistro]" + ); } public void testLocalCurrentVersionIntegTestZip() { @@ -110,15 +184,14 @@ public void testLocalCurrentVersionIntegTestZip() { Project archiveProject = ProjectBuilder.builder().withParent(archivesProject).withName("integ-test-zip").build(); archiveProject.getConfigurations().create("default"); archiveProject.getArtifacts().add("default", new File("doesnotmatter")); - createDistro(project, "distro", - VersionProperties.getElasticsearch(), Type.INTEG_TEST_ZIP, null, null, null); + createDistro(project, "distro", VersionProperties.getElasticsearch(), Type.INTEG_TEST_ZIP, null, null, null); checkPlugin(project); } public void testLocalCurrentVersionArchives() { for (Platform platform : Platform.values()) { for (Flavor flavor : Flavor.values()) { - for (boolean bundledJdk : new boolean[] { true, false}) { + for (boolean bundledJdk : new boolean[] { true, false }) { // create a new project in each iteration, so that we know we are resolving the only additional project being created Project project = createProject(BWC_MINOR, true); String projectName = projectName(platform.toString(), flavor, bundledJdk); @@ -126,8 +199,7 @@ public void testLocalCurrentVersionArchives() { Project archiveProject = ProjectBuilder.builder().withParent(archivesProject).withName(projectName).build(); archiveProject.getConfigurations().create("default"); archiveProject.getArtifacts().add("default", new File("doesnotmatter")); - createDistro(project, "distro", - VersionProperties.getElasticsearch(), Type.ARCHIVE, platform, flavor, bundledJdk); + createDistro(project, "distro", VersionProperties.getElasticsearch(), Type.ARCHIVE, platform, flavor, bundledJdk); checkPlugin(project); } } @@ -137,14 +209,13 @@ public void testLocalCurrentVersionArchives() { public void testLocalCurrentVersionPackages() { for (Type packageType : new Type[] { Type.RPM, Type.DEB }) { for (Flavor flavor : Flavor.values()) { - for (boolean bundledJdk : new boolean[] { true, false}) { + for (boolean bundledJdk : new boolean[] { true, false }) { Project project = createProject(BWC_MINOR, true); String projectName = projectName(packageType.toString(), flavor, bundledJdk); Project packageProject = ProjectBuilder.builder().withParent(packagesProject).withName(projectName).build(); packageProject.getConfigurations().create("default"); packageProject.getArtifacts().add("default", new File("doesnotmatter")); - createDistro(project, "distro", - VersionProperties.getElasticsearch(), packageType, null, flavor, bundledJdk); + createDistro(project, "distro", VersionProperties.getElasticsearch(), packageType, null, flavor, bundledJdk); checkPlugin(project); } } @@ -180,15 +251,32 @@ public void testLocalBwcPackages() { } } - private void assertDistroError(Project project, String name, String version, Type type, Platform platform, - Flavor flavor, Boolean bundledJdk, String message) { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> checkDistro(project, name, version, type, platform, flavor, bundledJdk)); + private void assertDistroError( + Project project, + String name, + String version, + Type type, + Platform platform, + Flavor flavor, + Boolean bundledJdk, + String message + ) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> checkDistro(project, name, version, type, platform, flavor, bundledJdk) + ); assertThat(e.getMessage(), containsString(message)); } - private ElasticsearchDistribution createDistro(Project project, String name, String version, Type type, - Platform platform, Flavor flavor, Boolean bundledJdk) { + private ElasticsearchDistribution createDistro( + Project project, + String name, + String version, + Type type, + Platform platform, + Flavor flavor, + Boolean bundledJdk + ) { NamedDomainObjectContainer distros = DistributionDownloadPlugin.getContainer(project); return distros.create(name, distro -> { if (version != null) { @@ -210,8 +298,15 @@ private ElasticsearchDistribution createDistro(Project project, String name, Str } // create a distro and finalize its configuration - private ElasticsearchDistribution checkDistro(Project project, String name, String version, Type type, - Platform platform, Flavor flavor, Boolean bundledJdk) { + private ElasticsearchDistribution checkDistro( + Project project, + String name, + String version, + Type type, + Platform platform, + Flavor flavor, + Boolean bundledJdk + ) { ElasticsearchDistribution distribution = createDistro(project, name, version, type, platform, flavor, bundledJdk); distribution.finalizeValues(); return distribution; @@ -223,8 +318,16 @@ private void checkPlugin(Project project) { plugin.setupDistributions(project); } - private void checkBwc(String projectName, String config, Version version, - Type type, Platform platform, Flavor flavor, BwcVersions bwcVersions, boolean isInternal) { + private void checkBwc( + String projectName, + String config, + Version version, + Type type, + Platform platform, + Flavor flavor, + BwcVersions bwcVersions, + boolean isInternal + ) { Project project = createProject(bwcVersions, isInternal); Project archiveProject = ProjectBuilder.builder().withParent(bwcProject).withName(projectName).build(); archiveProject.getConfigurations().create(config); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java index 7968f4f57cf90..cd84920907887 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java @@ -22,35 +22,26 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; - public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTestCase { public static final String PROJECT_NAME = "elasticsearch-build-resources"; public void testUpToDateWithSourcesConfigured() { - getGradleRunner(PROJECT_NAME) - .withArguments("clean", "-s") - .build(); + getGradleRunner(PROJECT_NAME).withArguments("clean", "-s").build(); - BuildResult result = getGradleRunner(PROJECT_NAME) - .withArguments("buildResources", "-s", "-i") - .build(); + BuildResult result = getGradleRunner(PROJECT_NAME).withArguments("buildResources", "-s", "-i").build(); assertTaskSuccessful(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); - result = getGradleRunner(PROJECT_NAME) - .withArguments("buildResources", "-s", "-i") - .build(); + result = getGradleRunner(PROJECT_NAME).withArguments("buildResources", "-s", "-i").build(); assertTaskUpToDate(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); } public void testImplicitTaskDependencyCopy() { - BuildResult result = getGradleRunner(PROJECT_NAME) - .withArguments("clean", "sampleCopyAll", "-s", "-i") - .build(); + BuildResult result = getGradleRunner(PROJECT_NAME).withArguments("clean", "sampleCopyAll", "-s", "-i").build(); assertTaskSuccessful(result, ":buildResources"); assertTaskSuccessful(result, ":sampleCopyAll"); @@ -60,9 +51,7 @@ public void testImplicitTaskDependencyCopy() { } public void testImplicitTaskDependencyInputFileOfOther() { - BuildResult result = getGradleRunner(PROJECT_NAME) - .withArguments("clean", "sample", "-s", "-i") - .build(); + BuildResult result = getGradleRunner(PROJECT_NAME).withArguments("clean", "sample", "-s", "-i").build(); assertTaskSuccessful(result, ":sample"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); @@ -71,10 +60,7 @@ public void testImplicitTaskDependencyInputFileOfOther() { public void testIncorrectUsage() { assertOutputContains( - getGradleRunner(PROJECT_NAME) - .withArguments("noConfigAfterExecution", "-s", "-i") - .buildAndFail() - .getOutput(), + getGradleRunner(PROJECT_NAME).withArguments("noConfigAfterExecution", "-s", "-i").buildAndFail().getOutput(), "buildResources can't be configured after the task ran" ); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java index 4e760502e2c2e..8e76334e80df1 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java @@ -95,24 +95,30 @@ private void assertExtraction(String taskname, String platform, String javaBin, protected abstract byte[] filebytes(String platform, String extension) throws IOException; - private void runBuild( - String taskname, String platform, Consumer assertions, String vendor, String version) throws IOException { + private void runBuild(String taskname, String platform, Consumer assertions, String vendor, String version) + throws IOException { WireMockServer wireMock = new WireMockServer(0); try { String extension = platform.equals("windows") ? "zip" : "tar.gz"; boolean isOld = version.equals(oldJdkVersion()); wireMock.stubFor(head(urlEqualTo(urlPath(isOld, platform, extension))).willReturn(aResponse().withStatus(200))); - wireMock.stubFor(get(urlEqualTo(urlPath(isOld, platform, extension))) - .willReturn(aResponse().withStatus(200).withBody(filebytes(platform, extension)))); + wireMock.stubFor( + get(urlEqualTo(urlPath(isOld, platform, extension))).willReturn( + aResponse().withStatus(200).withBody(filebytes(platform, extension)) + ) + ); wireMock.start(); - GradleRunner runner = GradleRunner.create().withProjectDir(getProjectDir("jdk-download")) - .withArguments(taskname, + GradleRunner runner = GradleRunner.create() + .withProjectDir(getProjectDir("jdk-download")) + .withArguments( + taskname, "-Dtests.jdk_vendor=" + vendor, "-Dtests.jdk_version=" + version, "-Dtests.jdk_repo=" + wireMock.baseUrl(), - "-i") + "-i" + ) .withPluginClasspath(); BuildResult result = runner.build(); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java index b8918096340ec..ea291858913f8 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java @@ -32,7 +32,7 @@ public class JdkDownloadPluginTests extends GradleUnitTestCase { @BeforeClass public static void setupRoot() { - rootProject = ProjectBuilder.builder().build(); + rootProject = ProjectBuilder.builder().build(); } public void testMissingVendor() { @@ -46,7 +46,8 @@ public void testUnknownVendor() { "unknown", "11.0.2+33", "linux", - "unknown vendor [unknown] for jdk [testjdk], must be one of [adoptopenjdk, openjdk]"); + "unknown vendor [unknown] for jdk [testjdk], must be one of [adoptopenjdk, openjdk]" + ); } public void testMissingVersion() { @@ -62,13 +63,21 @@ public void testMissingPlatform() { } public void testUnknownPlatform() { - assertJdkError(createProject(), "testjdk", "openjdk", "11.0.2+33", "unknown", - "unknown platform [unknown] for jdk [testjdk], must be one of [darwin, linux, windows, mac]"); + assertJdkError( + createProject(), + "testjdk", + "openjdk", + "11.0.2+33", + "unknown", + "unknown platform [unknown] for jdk [testjdk], must be one of [darwin, linux, windows, mac]" + ); } private void assertJdkError(Project project, String name, String vendor, String version, String platform, String message) { - IllegalArgumentException e = - expectThrows(IllegalArgumentException.class, () -> createJdk(project, name, vendor, version, platform)); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> createJdk(project, name, vendor, version, platform) + ); assertThat(e.getMessage(), equalTo(message)); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionTests.java index ae2fb0e6215db..253490666d937 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionTests.java @@ -49,13 +49,13 @@ public void testRelaxedVersionParsing() { } public void testCompareWithStringVersions() { - assertTrue("1.10.20 is not interpreted as before 2.0.0", - Version.fromString("1.10.20").before("2.0.0") - ); - assertTrue("7.0.0-alpha1 should be equal to 7.0.0-alpha1", + assertTrue("1.10.20 is not interpreted as before 2.0.0", Version.fromString("1.10.20").before("2.0.0")); + assertTrue( + "7.0.0-alpha1 should be equal to 7.0.0-alpha1", Version.fromString("7.0.0-alpha1").equals(Version.fromString("7.0.0-alpha1")) ); - assertTrue("7.0.0-SNAPSHOT should be equal to 7.0.0-SNAPSHOT", + assertTrue( + "7.0.0-SNAPSHOT should be equal to 7.0.0-SNAPSHOT", Version.fromString("7.0.0-SNAPSHOT").equals(Version.fromString("7.0.0-SNAPSHOT")) ); } @@ -63,21 +63,25 @@ public void testCompareWithStringVersions() { public void testCollections() { assertTrue( Arrays.asList( - Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"), - Version.fromString("6.0.1"), Version.fromString("6.1.0") - ).containsAll(Arrays.asList( - Version.fromString("6.0.1"), Version.fromString("5.2.1-SNAPSHOT") - )) + Version.fromString("5.2.0"), + Version.fromString("5.2.1-SNAPSHOT"), + Version.fromString("6.0.0"), + Version.fromString("6.0.1"), + Version.fromString("6.1.0") + ).containsAll(Arrays.asList(Version.fromString("6.0.1"), Version.fromString("5.2.1-SNAPSHOT"))) ); Set versions = new HashSet<>(); - versions.addAll(Arrays.asList( - Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"), - Version.fromString("6.0.1"), Version.fromString("6.1.0") - )); + versions.addAll( + Arrays.asList( + Version.fromString("5.2.0"), + Version.fromString("5.2.1-SNAPSHOT"), + Version.fromString("6.0.0"), + Version.fromString("6.0.1"), + Version.fromString("6.1.0") + ) + ); Set subset = new HashSet<>(); - subset.addAll(Arrays.asList( - Version.fromString("6.0.1"), Version.fromString("5.2.1-SNAPSHOT") - )); + subset.addAll(Arrays.asList(Version.fromString("6.0.1"), Version.fromString("5.2.1-SNAPSHOT"))); assertTrue(versions.containsAll(subset)); } @@ -86,9 +90,7 @@ public void testToString() { } public void testCompareVersions() { - assertEquals(0, - new Version(7, 0, 0).compareTo(new Version(7, 0, 0)) - ); + assertEquals(0, new Version(7, 0, 0).compareTo(new Version(7, 0, 0))); } public void testExceptionEmpty() { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/doc/RestTestFromSnippetsTaskTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/doc/RestTestFromSnippetsTaskTests.java index 8f0f12747ffc3..a8e03cc0b0488 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/doc/RestTestFromSnippetsTaskTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/doc/RestTestFromSnippetsTaskTests.java @@ -23,8 +23,10 @@ public void testSimpleBlockQuote() { } public void testMultipleBlockQuotes() { - assertEquals("\"foo\": \"bort baz\", \"bar\": \"other\"", - replaceBlockQuote("\"foo\": \"\"\"bort baz\"\"\", \"bar\": \"\"\"other\"\"\"")); + assertEquals( + "\"foo\": \"bort baz\", \"bar\": \"other\"", + replaceBlockQuote("\"foo\": \"\"\"bort baz\"\"\", \"bar\": \"\"\"other\"\"\"") + ); } public void testEscapingInBlockQuote() { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/http/WaitForHttpResourceTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/http/WaitForHttpResourceTests.java index 67bae367c6f9f..991f6ff444c45 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/http/WaitForHttpResourceTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/http/WaitForHttpResourceTests.java @@ -43,7 +43,7 @@ public void testBuildTrustStoreFromFile() throws Exception { final Certificate certificate = store.getCertificate("ca"); assertThat(certificate, notNullValue()); assertThat(certificate, instanceOf(X509Certificate.class)); - assertThat(((X509Certificate)certificate).getSubjectDN().toString(), equalTo("CN=Elastic Certificate Tool Autogenerated CA")); + assertThat(((X509Certificate) certificate).getSubjectDN().toString(), equalTo("CN=Elastic Certificate Tool Autogenerated CA")); } public void testBuildTrustStoreFromCA() throws Exception { @@ -55,6 +55,6 @@ public void testBuildTrustStoreFromCA() throws Exception { final Certificate certificate = store.getCertificate("cert-0"); assertThat(certificate, notNullValue()); assertThat(certificate, instanceOf(X509Certificate.class)); - assertThat(((X509Certificate)certificate).getSubjectDN().toString(), equalTo("CN=Elastic Certificate Tool Autogenerated CA")); + assertThat(((X509Certificate) certificate).getSubjectDN().toString(), equalTo("CN=Elastic Certificate Tool Autogenerated CA")); } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/plugin/PluginBuildPluginTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/plugin/PluginBuildPluginTests.java index b3dccfbcbc891..113821f97f5e7 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/plugin/PluginBuildPluginTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/plugin/PluginBuildPluginTests.java @@ -18,38 +18,26 @@ public class PluginBuildPluginTests extends GradleUnitTestCase { @Before public void setUp() throws Exception { - project = ProjectBuilder.builder() - .withName(getClass().getName()) - .build(); + project = ProjectBuilder.builder().withName(getClass().getName()).build(); } public void testApply() { // FIXME: distribution download plugin doesn't support running externally - project.getExtensions().getExtraProperties().set( - "bwcVersions", Mockito.mock(BwcVersions.class) - ); + project.getExtensions().getExtraProperties().set("bwcVersions", Mockito.mock(BwcVersions.class)); project.getPlugins().apply(PluginBuildPlugin.class); assertNotNull( "plugin extension created with the right name", project.getExtensions().findByName(PluginBuildPlugin.PLUGIN_EXTENSION_NAME) ); - assertNotNull( - "plugin extensions has the right type", - project.getExtensions().findByType(PluginPropertiesExtension.class) - ); + assertNotNull("plugin extensions has the right type", project.getExtensions().findByType(PluginPropertiesExtension.class)); - assertNotNull( - "plugin created an integTest class", - project.getTasks().findByName("integTest") - ); + assertNotNull("plugin created an integTest class", project.getTasks().findByName("integTest")); } @Ignore("https://github.com/elastic/elasticsearch/issues/47123") public void testApplyWithAfterEvaluate() { - project.getExtensions().getExtraProperties().set( - "bwcVersions", Mockito.mock(BwcVersions.class) - ); + project.getExtensions().getExtraProperties().set("bwcVersions", Mockito.mock(BwcVersions.class)); project.getPlugins().apply(PluginBuildPlugin.class); PluginPropertiesExtension extension = project.getExtensions().getByType(PluginPropertiesExtension.class); extension.setNoticeFile(project.file("test.notice")); @@ -60,9 +48,7 @@ public void testApplyWithAfterEvaluate() { ((ProjectInternal) project).evaluate(); assertNotNull( - "Task to generate notice not created: " + project.getTasks().stream() - .map(Task::getPath) - .collect(Collectors.joining(", ")), + "Task to generate notice not created: " + project.getTasks().stream().map(Task::getPath).collect(Collectors.joining(", ")), project.getTasks().findByName("generateNotice") ); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/plugin/PluginPropertiesExtensionTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/plugin/PluginPropertiesExtensionTests.java index b93d400f3e057..a64f2133001b6 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/plugin/PluginPropertiesExtensionTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/plugin/PluginPropertiesExtensionTests.java @@ -30,8 +30,9 @@ public void testCreatingPluginPropertiesExtensionWithNameAndVersion() { String projectName = "Test"; String projectVersion = "5.0"; - PluginPropertiesExtension pluginPropertiesExtension = - new PluginPropertiesExtension(this.createProject(projectName, projectVersion)); + PluginPropertiesExtension pluginPropertiesExtension = new PluginPropertiesExtension( + this.createProject(projectName, projectVersion) + ); assertEquals(projectName, pluginPropertiesExtension.getName()); assertEquals(projectVersion, pluginPropertiesExtension.getVersion()); @@ -40,8 +41,7 @@ public void testCreatingPluginPropertiesExtensionWithNameAndVersion() { public void testCreatingPluginPropertiesExtensionWithNameWithoutVersion() { String projectName = "Test"; - PluginPropertiesExtension pluginPropertiesExtension = - new PluginPropertiesExtension(this.createProject(projectName, null)); + PluginPropertiesExtension pluginPropertiesExtension = new PluginPropertiesExtension(this.createProject(projectName, null)); assertEquals(projectName, pluginPropertiesExtension.getName()); assertEquals("unspecified", pluginPropertiesExtension.getVersion()); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/DependencyLicensesTaskTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/DependencyLicensesTaskTests.java index 5492a7cfa9668..fdf44ddaf6967 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/DependencyLicensesTaskTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/DependencyLicensesTaskTests.java @@ -170,10 +170,7 @@ public void givenProjectWithADependencyWithWrongShaThenShouldThrowException() th File licensesDir = getLicensesDir(project); createAllDefaultDependencyFiles(licensesDir, "groovy-all"); - Path groovySha = Files - .list(licensesDir.toPath()) - .filter(file -> file.toFile().getName().contains("sha")) - .findFirst().get(); + Path groovySha = Files.list(licensesDir.toPath()).filter(file -> file.toFile().getName().contains("sha")).findFirst().get(); Files.write(groovySha, new byte[] { 1 }, StandardOpenOption.CREATE); @@ -247,16 +244,14 @@ private void createFileIn(File parent, String name, String content) throws IOExc } private UpdateShasTask createUpdateShasTask(Project project, TaskProvider dependencyLicensesTask) { - UpdateShasTask task = project.getTasks() - .register("updateShas", UpdateShasTask.class) - .get(); + UpdateShasTask task = project.getTasks().register("updateShas", UpdateShasTask.class).get(); task.setParentTask(dependencyLicensesTask); return task; } private TaskProvider createDependencyLicensesTask(Project project) { - TaskProvider task = project.getTasks() + TaskProvider task = project.getTasks() .register("dependencyLicenses", DependencyLicensesTask.class, new Action() { @Override public void execute(DependencyLicensesTask dependencyLicensesTask) { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/FilePermissionsTaskTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/FilePermissionsTaskTests.java index 574545c4361d6..122dae068beee 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/FilePermissionsTaskTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/FilePermissionsTaskTests.java @@ -55,7 +55,6 @@ public void testCheckPermissionsWhenAnExecutableFileExists() throws Exception { file.delete(); } - public void testCheckPermissionsWhenNoFileExists() throws Exception { RandomizedTest.assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java index c6e1e2783cebc..1ac206b2a5d36 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java @@ -26,14 +26,18 @@ public class TestingConventionsTasksIT extends GradleIntegrationTestCase { @Before - public void setUp() { - } + public void setUp() {} public void testInnerClasses() { - GradleRunner runner = getGradleRunner("testingConventions") - .withArguments("clean", ":no_tests_in_inner_classes:testingConventions", "-i", "-s"); + GradleRunner runner = getGradleRunner("testingConventions").withArguments( + "clean", + ":no_tests_in_inner_classes:testingConventions", + "-i", + "-s" + ); BuildResult result = runner.buildAndFail(); - assertOutputContains(result.getOutput(), + assertOutputContains( + result.getOutput(), "Test classes implemented by inner classes will not run:", " * org.elasticsearch.gradle.testkit.NastyInnerClasses$LooksLikeATestWithoutNamingConvention1", " * org.elasticsearch.gradle.testkit.NastyInnerClasses$LooksLikeATestWithoutNamingConvention2", @@ -44,10 +48,15 @@ public void testInnerClasses() { } public void testNamingConvention() { - GradleRunner runner = getGradleRunner("testingConventions") - .withArguments("clean", ":incorrect_naming_conventions:testingConventions", "-i", "-s"); + GradleRunner runner = getGradleRunner("testingConventions").withArguments( + "clean", + ":incorrect_naming_conventions:testingConventions", + "-i", + "-s" + ); BuildResult result = runner.buildAndFail(); - assertOutputContains(result.getOutput(), + assertOutputContains( + result.getOutput(), "Seem like test classes but don't match naming convention:", " * org.elasticsearch.gradle.testkit.LooksLikeATestWithoutNamingConvention1", " * org.elasticsearch.gradle.testkit.LooksLikeATestWithoutNamingConvention2", @@ -57,61 +66,84 @@ public void testNamingConvention() { } public void testNoEmptyTasks() { - GradleRunner runner = getGradleRunner("testingConventions") - .withArguments("clean", ":empty_test_task:testingConventions", "-i", "-s"); + GradleRunner runner = getGradleRunner("testingConventions").withArguments( + "clean", + ":empty_test_task:testingConventions", + "-i", + "-s" + ); BuildResult result = runner.buildAndFail(); - assertOutputContains(result.getOutput(), + assertOutputContains( + result.getOutput(), "Expected at least one test class included in task :empty_test_task:emptyTest, but found none.", "Expected at least one test class included in task :empty_test_task:test, but found none." ); } public void testAllTestTasksIncluded() { - GradleRunner runner = getGradleRunner("testingConventions") - .withArguments("clean", ":all_classes_in_tasks:testingConventions", "-i", "-s"); + GradleRunner runner = getGradleRunner("testingConventions").withArguments( + "clean", + ":all_classes_in_tasks:testingConventions", + "-i", + "-s" + ); BuildResult result = runner.buildAndFail(); - assertOutputContains(result.getOutput(), + assertOutputContains( + result.getOutput(), "Test classes are not included in any enabled task (:all_classes_in_tasks:test):", " * org.elasticsearch.gradle.testkit.NamingConventionIT" ); } public void testTaskNotImplementBaseClass() { - GradleRunner runner = getGradleRunner("testingConventions") - .withArguments("clean", ":not_implementing_base:testingConventions", "-i", "-s"); + GradleRunner runner = getGradleRunner("testingConventions").withArguments( + "clean", + ":not_implementing_base:testingConventions", + "-i", + "-s" + ); BuildResult result = runner.buildAndFail(); - assertOutputContains(result.getOutput(), + assertOutputContains( + result.getOutput(), "Tests classes with suffix `IT` should extend org.elasticsearch.gradle.testkit.Integration but the following classes do not:", - " * org.elasticsearch.gradle.testkit.NamingConventionIT", - " * org.elasticsearch.gradle.testkit.NamingConventionMissmatchIT", - "Tests classes with suffix `Tests` should extend org.elasticsearch.gradle.testkit.Unit but the following classes do not:", - " * org.elasticsearch.gradle.testkit.NamingConventionMissmatchTests", - " * org.elasticsearch.gradle.testkit.NamingConventionTests" + " * org.elasticsearch.gradle.testkit.NamingConventionIT", + " * org.elasticsearch.gradle.testkit.NamingConventionMissmatchIT", + "Tests classes with suffix `Tests` should extend org.elasticsearch.gradle.testkit.Unit but the following classes do not:", + " * org.elasticsearch.gradle.testkit.NamingConventionMissmatchTests", + " * org.elasticsearch.gradle.testkit.NamingConventionTests" ); } public void testValidSetupWithoutBaseClass() { - GradleRunner runner = getGradleRunner("testingConventions") - .withArguments("clean", ":valid_setup_no_base:testingConventions", "-i", "-s"); + GradleRunner runner = getGradleRunner("testingConventions").withArguments( + "clean", + ":valid_setup_no_base:testingConventions", + "-i", + "-s" + ); BuildResult result = runner.build(); assertTaskSuccessful(result, ":valid_setup_no_base:testingConventions"); } public void testValidSetupWithBaseClass() { - GradleRunner runner = getGradleRunner("testingConventions") - .withArguments("clean", ":valid_setup_with_base:testingConventions", "-i", "-s"); + GradleRunner runner = getGradleRunner("testingConventions").withArguments( + "clean", + ":valid_setup_with_base:testingConventions", + "-i", + "-s" + ); BuildResult result = runner.build(); assertTaskSuccessful(result, ":valid_setup_with_base:testingConventions"); } public void testTestsInMain() { - GradleRunner runner = getGradleRunner("testingConventions") - .withArguments("clean", ":tests_in_main:testingConventions", "-i", "-s"); + GradleRunner runner = getGradleRunner("testingConventions").withArguments("clean", ":tests_in_main:testingConventions", "-i", "-s"); BuildResult result = runner.buildAndFail(); - assertOutputContains(result.getOutput(), - "Classes matching the test naming convention should be in test not main:", - " * NamingConventionIT", - " * NamingConventionTests" + assertOutputContains( + result.getOutput(), + "Classes matching the test naming convention should be in test not main:", + " * NamingConventionIT", + " * NamingConventionTests" ); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTaskIT.java index a57d6da036583..f2354e0fffc34 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTaskIT.java @@ -27,57 +27,64 @@ public class ThirdPartyAuditTaskIT extends GradleIntegrationTestCase { @Before public void setUp() throws Exception { // Build the sample jars - getGradleRunner("thirdPartyAudit") - .withArguments("build", "-s") - .build(); + getGradleRunner("thirdPartyAudit").withArguments("build", "-s").build(); } public void testElasticsearchIgnored() { - BuildResult result = getGradleRunner("thirdPartyAudit") - .withArguments("clean", "empty", "-s", - "-PcompileOnlyGroup=elasticsearch.gradle:broken-log4j", "-PcompileOnlyVersion=0.0.1", - "-PcompileGroup=elasticsearch.gradle:dummy-io", "-PcompileVersion=0.0.1" - ) - .build(); + BuildResult result = getGradleRunner("thirdPartyAudit").withArguments( + "clean", + "empty", + "-s", + "-PcompileOnlyGroup=elasticsearch.gradle:broken-log4j", + "-PcompileOnlyVersion=0.0.1", + "-PcompileGroup=elasticsearch.gradle:dummy-io", + "-PcompileVersion=0.0.1" + ).build(); assertTaskNoSource(result, ":empty"); } public void testWithEmptyRules() { - BuildResult result = getGradleRunner("thirdPartyAudit") - .withArguments("clean", "empty", "-s", - "-PcompileOnlyGroup=other.gradle:broken-log4j", "-PcompileOnlyVersion=0.0.1", - "-PcompileGroup=other.gradle:dummy-io", "-PcompileVersion=0.0.1" - ) - .buildAndFail(); + BuildResult result = getGradleRunner("thirdPartyAudit").withArguments( + "clean", + "empty", + "-s", + "-PcompileOnlyGroup=other.gradle:broken-log4j", + "-PcompileOnlyVersion=0.0.1", + "-PcompileGroup=other.gradle:dummy-io", + "-PcompileVersion=0.0.1" + ).buildAndFail(); } public void testViolationFoundAndCompileOnlyIgnored() { - BuildResult result = getGradleRunner("thirdPartyAudit") - .withArguments("clean", "absurd", "-s", - "-PcompileOnlyGroup=other.gradle:broken-log4j", "-PcompileOnlyVersion=0.0.1", - "-PcompileGroup=other.gradle:dummy-io", "-PcompileVersion=0.0.1" - ) - .buildAndFail(); + BuildResult result = getGradleRunner("thirdPartyAudit").withArguments( + "clean", + "absurd", + "-s", + "-PcompileOnlyGroup=other.gradle:broken-log4j", + "-PcompileOnlyVersion=0.0.1", + "-PcompileGroup=other.gradle:dummy-io", + "-PcompileVersion=0.0.1" + ).buildAndFail(); assertTaskFailed(result, ":absurd"); - assertOutputContains(result.getOutput(), - "Classes with violations:", - " * TestingIO", - "> Audit of third party dependencies failed" - ); - assertOutputDoesNotContain(result.getOutput(),"Missing classes:"); + assertOutputContains(result.getOutput(), "Classes with violations:", " * TestingIO", "> Audit of third party dependencies failed"); + assertOutputDoesNotContain(result.getOutput(), "Missing classes:"); } public void testClassNotFoundAndCompileOnlyIgnored() { - BuildResult result = getGradleRunner("thirdPartyAudit") - .withArguments("clean", "absurd", "-s", - "-PcompileGroup=other.gradle:broken-log4j", "-PcompileVersion=0.0.1", - "-PcompileOnlyGroup=other.gradle:dummy-io", "-PcompileOnlyVersion=0.0.1" - ) - .buildAndFail(); + BuildResult result = getGradleRunner("thirdPartyAudit").withArguments( + "clean", + "absurd", + "-s", + "-PcompileGroup=other.gradle:broken-log4j", + "-PcompileVersion=0.0.1", + "-PcompileOnlyGroup=other.gradle:dummy-io", + "-PcompileOnlyVersion=0.0.1" + ).buildAndFail(); assertTaskFailed(result, ":absurd"); - assertOutputContains(result.getOutput(), + assertOutputContains( + result.getOutput(), "Missing classes:", " * org.apache.logging.log4j.LogManager", "> Audit of third party dependencies failed" @@ -86,15 +93,19 @@ public void testClassNotFoundAndCompileOnlyIgnored() { } public void testJarHellWithJDK() { - BuildResult result = getGradleRunner("thirdPartyAudit") - .withArguments("clean", "absurd", "-s", - "-PcompileGroup=other.gradle:jarhellJdk", "-PcompileVersion=0.0.1", - "-PcompileOnlyGroup=other.gradle:dummy-io", "-PcompileOnlyVersion=0.0.1" - ) - .buildAndFail(); + BuildResult result = getGradleRunner("thirdPartyAudit").withArguments( + "clean", + "absurd", + "-s", + "-PcompileGroup=other.gradle:jarhellJdk", + "-PcompileVersion=0.0.1", + "-PcompileOnlyGroup=other.gradle:dummy-io", + "-PcompileOnlyVersion=0.0.1" + ).buildAndFail(); assertTaskFailed(result, ":absurd"); - assertOutputContains(result.getOutput(), + assertOutputContains( + result.getOutput(), "> Audit of third party dependencies failed:", " Jar Hell with the JDK:", " * java.lang.String" @@ -103,12 +114,15 @@ public void testJarHellWithJDK() { } public void testElasticsearchIgnoredWithViolations() { - BuildResult result = getGradleRunner("thirdPartyAudit") - .withArguments("clean", "absurd", "-s", - "-PcompileOnlyGroup=elasticsearch.gradle:broken-log4j", "-PcompileOnlyVersion=0.0.1", - "-PcompileGroup=elasticsearch.gradle:dummy-io", "-PcompileVersion=0.0.1" - ) - .build(); + BuildResult result = getGradleRunner("thirdPartyAudit").withArguments( + "clean", + "absurd", + "-s", + "-PcompileOnlyGroup=elasticsearch.gradle:broken-log4j", + "-PcompileOnlyVersion=0.0.1", + "-PcompileGroup=elasticsearch.gradle:dummy-io", + "-PcompileVersion=0.0.1" + ).build(); assertTaskNoSource(result, ":absurd"); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/UpdateShasTaskTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/UpdateShasTaskTests.java index 1cf523e2e62d8..363bc9c344af8 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/UpdateShasTaskTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/UpdateShasTaskTests.java @@ -45,8 +45,7 @@ public void prepare() throws IOException { } @Test - public void whenDependencyDoesntExistThenShouldDeleteDependencySha() - throws IOException, NoSuchAlgorithmException { + public void whenDependencyDoesntExistThenShouldDeleteDependencySha() throws IOException, NoSuchAlgorithmException { File unusedSha = createFileIn(getLicensesDir(project), "test.sha1", ""); task.updateShas(); @@ -55,23 +54,19 @@ public void whenDependencyDoesntExistThenShouldDeleteDependencySha() } @Test - public void whenDependencyExistsButShaNotThenShouldCreateNewShaFile() - throws IOException, NoSuchAlgorithmException { + public void whenDependencyExistsButShaNotThenShouldCreateNewShaFile() throws IOException, NoSuchAlgorithmException { project.getDependencies().add("compile", dependency); getLicensesDir(project).mkdir(); task.updateShas(); - Path groovySha = Files - .list(getLicensesDir(project).toPath()) - .findFirst().get(); + Path groovySha = Files.list(getLicensesDir(project).toPath()).findFirst().get(); assertTrue(groovySha.toFile().getName().startsWith("groovy-all")); } @Test - public void whenDependencyAndWrongShaExistsThenShouldNotOverwriteShaFile() - throws IOException, NoSuchAlgorithmException { + public void whenDependencyAndWrongShaExistsThenShouldNotOverwriteShaFile() throws IOException, NoSuchAlgorithmException { project.getDependencies().add("compile", dependency); File groovyJar = task.getParentTask().getDependencies().getFiles().iterator().next(); @@ -84,8 +79,7 @@ public void whenDependencyAndWrongShaExistsThenShouldNotOverwriteShaFile() } @Test - public void whenLicensesDirDoesntExistThenShouldThrowException() - throws IOException, NoSuchAlgorithmException { + public void whenLicensesDirDoesntExistThenShouldThrowException() throws IOException, NoSuchAlgorithmException { expectedException.expect(GradleException.class); expectedException.expectMessage(containsString("isn't a valid directory")); @@ -119,16 +113,14 @@ private File createFileIn(File parent, String name, String content) throws IOExc } private UpdateShasTask createUpdateShasTask(Project project) { - UpdateShasTask task = project.getTasks() - .register("updateShas", UpdateShasTask.class) - .get(); + UpdateShasTask task = project.getTasks().register("updateShas", UpdateShasTask.class).get(); task.setParentTask(createDependencyLicensesTask(project)); return task; } private TaskProvider createDependencyLicensesTask(Project project) { - TaskProvider task = project.getTasks() + TaskProvider task = project.getTasks() .register("dependencyLicenses", DependencyLicensesTask.class, new Action() { @Override public void execute(DependencyLicensesTask dependencyLicensesTask) { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTarIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTarIT.java index c7b59f64252e9..c5bbfc7c3d554 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTarIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTarIT.java @@ -74,8 +74,8 @@ interface FileInputStreamWrapper { InputStream apply(FileInputStream fis) throws IOException; } - private void assertTar( - final String extension, final FileInputStreamWrapper wrapper, boolean preserveFileTimestamps) throws IOException { + private void assertTar(final String extension, final FileInputStreamWrapper wrapper, boolean preserveFileTimestamps) + throws IOException { try (TarArchiveInputStream tar = new TarArchiveInputStream(wrapper.apply(new FileInputStream(getOutputFile(extension))))) { TarArchiveEntry entry = tar.getNextTarEntry(); boolean realFolderEntry = false; @@ -93,27 +93,18 @@ private void assertTar( fileEntry = true; } else if (entry.getName().equals("real-folder/link-to-file")) { assertTrue(entry.isSymbolicLink()); - assertThat( - entry.getLinkName(), - anyOf(equalTo("./file"), equalTo(".\\file")) - ); + assertThat(entry.getLinkName(), anyOf(equalTo("./file"), equalTo(".\\file"))); linkToFileEntry = true; } else if (entry.getName().equals("link-in-folder/")) { assertTrue(entry.isDirectory()); linkInFolderEntry = true; } else if (entry.getName().equals("link-in-folder/link-to-file")) { assertTrue(entry.isSymbolicLink()); - assertThat( - entry.getLinkName(), - anyOf(equalTo("../real-folder/file"), equalTo("..\\real-folder\\file")) - ); + assertThat(entry.getLinkName(), anyOf(equalTo("../real-folder/file"), equalTo("..\\real-folder\\file"))); linkInFolderLinkToFileEntry = true; } else if (entry.getName().equals("link-to-real-folder")) { assertTrue(entry.isSymbolicLink()); - assertThat( - entry.getLinkName(), - anyOf(equalTo("./real-folder"), equalTo(".\\real-folder")) - ); + assertThat(entry.getLinkName(), anyOf(equalTo("./real-folder"), equalTo(".\\real-folder"))); linkToRealFolderEntry = true; } else { throw new GradleException("unexpected entry [" + entry.getName() + "]"); @@ -135,12 +126,14 @@ private void assertTar( } private void runBuild(final String task, final boolean preserveFileTimestamps) { - final GradleRunner runner = GradleRunner.create().withProjectDir(getProjectDir()) + final GradleRunner runner = GradleRunner.create() + .withProjectDir(getProjectDir()) .withArguments( task, "-Dtests.symbolic_link_preserving_tar_source=" + temporaryFolder.getRoot().toString(), "-Dtests.symbolic_link_preserving_tar_preserve_file_timestamps=" + preserveFileTimestamps, - "-i") + "-i" + ) .withPluginClasspath(); runner.build(); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java index 0fc26f0284c44..1f77aa803c3b3 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java @@ -27,10 +27,7 @@ import org.junit.runner.RunWith; @RunWith(RandomizedRunner.class) -@TestMethodProviders({ - JUnit4MethodProvider.class, - JUnit3MethodProvider.class -}) +@TestMethodProviders({ JUnit4MethodProvider.class, JUnit3MethodProvider.class }) @ThreadLeakLingering(linger = 5000) // wait for "Connection worker" to die public abstract class BaseTestCase extends Assert { @@ -39,6 +36,7 @@ public abstract class BaseTestCase extends Assert { public interface ThrowingRunnable { void run() throws Throwable; } + public static T expectThrows(Class expectedType, ThrowingRunnable runnable) { try { runnable.run(); @@ -46,11 +44,12 @@ public static T expectThrows(Class expectedType, Throwi if (expectedType.isInstance(e)) { return expectedType.cast(e); } - AssertionFailedError assertion = - new AssertionFailedError("Unexpected exception type, expected " + expectedType.getSimpleName() + " but got " + e); + AssertionFailedError assertion = new AssertionFailedError( + "Unexpected exception type, expected " + expectedType.getSimpleName() + " but got " + e + ); assertion.initCause(e); throw assertion; } - throw new AssertionFailedError("Expected exception "+ expectedType.getSimpleName() + " but no exception was thrown"); + throw new AssertionFailedError("Expected exception " + expectedType.getSimpleName() + " but no exception was thrown"); } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/DistroTestPluginTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/DistroTestPluginTests.java index f88a4c11415bc..96b6208be7205 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/DistroTestPluginTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/DistroTestPluginTests.java @@ -11,23 +11,22 @@ public class DistroTestPluginTests extends GradleIntegrationTestCase { public void testParseOsReleaseOnOracle() { - final List lines = List - .of( - "NAME=\"Oracle Linux Server\"", - "VERSION=\"6.10\"", - "ID=\"ol\"", - "VERSION_ID=\"6.10\"", - "PRETTY_NAME=\"Oracle Linux Server 6.10\"", - "ANSI_COLOR=\"0;31\"", - "CPE_NAME=\"cpe:/o:oracle:linux:6:10:server\"", - "HOME_URL" + "=\"https://linux.oracle.com/\"", - "BUG_REPORT_URL=\"https://bugzilla.oracle.com/\"", - "", - "ORACLE_BUGZILLA_PRODUCT" + "=\"Oracle Linux 6\"", - "ORACLE_BUGZILLA_PRODUCT_VERSION=6.10", - "ORACLE_SUPPORT_PRODUCT=\"Oracle Linux\"", - "ORACLE_SUPPORT_PRODUCT_VERSION=6.10" - ); + final List lines = List.of( + "NAME=\"Oracle Linux Server\"", + "VERSION=\"6.10\"", + "ID=\"ol\"", + "VERSION_ID=\"6.10\"", + "PRETTY_NAME=\"Oracle Linux Server 6.10\"", + "ANSI_COLOR=\"0;31\"", + "CPE_NAME=\"cpe:/o:oracle:linux:6:10:server\"", + "HOME_URL" + "=\"https://linux.oracle.com/\"", + "BUG_REPORT_URL=\"https://bugzilla.oracle.com/\"", + "", + "ORACLE_BUGZILLA_PRODUCT" + "=\"Oracle Linux 6\"", + "ORACLE_BUGZILLA_PRODUCT_VERSION=6.10", + "ORACLE_SUPPORT_PRODUCT=\"Oracle Linux\"", + "ORACLE_SUPPORT_PRODUCT_VERSION=6.10" + ); final Map results = parseOsRelease(lines); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index 6c8795cbdab04..8f0f2e69551cf 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -26,8 +26,10 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { protected File getProjectDir(String name) { File root = new File("src/testKit/"); if (root.exists() == false) { - throw new RuntimeException("Could not find resources dir for integration tests. " + - "Note that these tests can only be ran by Gradle and are not currently supported by the IDE"); + throw new RuntimeException( + "Could not find resources dir for integration tests. " + + "Note that these tests can only be ran by Gradle and are not currently supported by the IDE" + ); } return new File(root, name).getAbsoluteFile(); } @@ -39,10 +41,7 @@ protected GradleRunner getGradleRunner(String sampleProject) { } catch (IOException e) { throw new UncheckedIOException(e); } - return GradleRunner.create() - .withProjectDir(getProjectDir(sampleProject)) - .withPluginClasspath() - .withTestKitDir(testkit); + return GradleRunner.create().withProjectDir(getProjectDir(sampleProject)).withPluginClasspath().withTestKitDir(testkit); } protected File getBuildDir(String name) { @@ -55,9 +54,12 @@ protected void assertOutputContains(String output, String... lines) { } List index = Stream.of(lines).map(line -> output.indexOf(line)).collect(Collectors.toList()); if (index.equals(index.stream().sorted().collect(Collectors.toList())) == false) { - fail("Expected the following lines to appear in this order:\n" + - Stream.of(lines).map(line -> " - `" + line + "`").collect(Collectors.joining("\n")) + - "\nTBut the order was different. Output is:\n\n```" + output + "\n```\n" + fail( + "Expected the following lines to appear in this order:\n" + + Stream.of(lines).map(line -> " - `" + line + "`").collect(Collectors.joining("\n")) + + "\nTBut the order was different. Output is:\n\n```" + + output + + "\n```\n" ); } } @@ -69,17 +71,11 @@ protected void assertOutputContains(String output, Set lines) { } protected void assertOutputContains(String output, String line) { - assertTrue( - "Expected the following line in output:\n\n" + line + "\n\nOutput is:\n" + output, - output.contains(line) - ); + assertTrue("Expected the following line in output:\n\n" + line + "\n\nOutput is:\n" + output, output.contains(line)); } protected void assertOutputDoesNotContain(String output, String line) { - assertFalse( - "Expected the following line not to be in output:\n\n" + line + "\n\nOutput is:\n" + output, - output.contains(line) - ); + assertFalse("Expected the following line not to be in output:\n\n" + line + "\n\nOutput is:\n" + output, output.contains(line)); } protected void assertOutputDoesNotContain(String output, String... lines) { @@ -113,12 +109,19 @@ protected void assertTaskNoSource(BuildResult result, String... taskNames) { private void assertTaskOutcome(BuildResult result, String taskName, TaskOutcome taskOutcome) { BuildTask task = result.task(taskName); if (task == null) { - fail("Expected task `" + taskName + "` to be " + taskOutcome +", but it did not run" + - "\n\nOutput is:\n" + result.getOutput()); + fail( + "Expected task `" + taskName + "` to be " + taskOutcome + ", but it did not run" + "\n\nOutput is:\n" + result.getOutput() + ); } assertEquals( - "Expected task `" + taskName +"` to be " + taskOutcome + " but it was: " + task.getOutcome() + - "\n\nOutput is:\n" + result.getOutput() , + "Expected task `" + + taskName + + "` to be " + + taskOutcome + + " but it was: " + + task.getOutcome() + + "\n\nOutput is:\n" + + result.getOutput(), taskOutcome, task.getOutcome() ); @@ -131,8 +134,7 @@ protected void assertTaskUpToDate(BuildResult result, String... taskNames) { fail("Expected task `" + taskName + "` to be up-to-date, but it did not run"); } assertEquals( - "Expected task to be up to date but it was: " + task.getOutcome() + - "\n\nOutput is:\n" + result.getOutput(), + "Expected task to be up to date but it was: " + task.getOutcome() + "\n\nOutput is:\n" + result.getOutput(), TaskOutcome.UP_TO_DATE, task.getOutcome() ); @@ -142,8 +144,7 @@ protected void assertTaskUpToDate(BuildResult result, String... taskNames) { protected void assertBuildFileExists(BuildResult result, String projectName, String path) { Path absPath = getBuildDir(projectName).toPath().resolve(path); assertTrue( - result.getOutput() + "\n\nExpected `" + absPath + "` to exists but it did not" + - "\n\nOutput is:\n" + result.getOutput(), + result.getOutput() + "\n\nExpected `" + absPath + "` to exists but it did not" + "\n\nOutput is:\n" + result.getOutput(), Files.exists(absPath) ); } @@ -151,8 +152,7 @@ protected void assertBuildFileExists(BuildResult result, String projectName, Str protected void assertBuildFileDoesNotExists(BuildResult result, String projectName, String path) { Path absPath = getBuildDir(projectName).toPath().resolve(path); assertFalse( - result.getOutput() + "\n\nExpected `" + absPath + "` bo to exists but it did" + - "\n\nOutput is:\n" + result.getOutput(), + result.getOutput() + "\n\nExpected `" + absPath + "` bo to exists but it did" + "\n\nOutput is:\n" + result.getOutput(), Files.exists(absPath) ); } @@ -177,12 +177,11 @@ private String getLocalTestPath(String propertyName) { public void assertOutputOnlyOnce(String output, String... text) { for (String each : text) { int i = output.indexOf(each); - if (i == -1 ) { - fail("Expected \n```" + each + "```\nto appear at most once, but it didn't at all.\n\nOutout is:\n"+ output - ); + if (i == -1) { + fail("Expected \n```" + each + "```\nto appear at most once, but it didn't at all.\n\nOutout is:\n" + output); } - if(output.indexOf(each) != output.lastIndexOf(each)) { - fail("Expected `" + each + "` to appear at most once, but it did multiple times.\n\nOutout is:\n"+ output); + if (output.indexOf(each) != output.lastIndexOf(each)) { + fail("Expected `" + each + "` to appear at most once, but it did multiple times.\n\nOutout is:\n" + output); } } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleUnitTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleUnitTestCase.java index 58852230bb7e6..543469e6ff5f1 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleUnitTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleUnitTestCase.java @@ -7,12 +7,6 @@ import org.junit.runner.RunWith; @RunWith(RandomizedRunner.class) -@TestMethodProviders({ - JUnit4MethodProvider.class, - JUnit3MethodProvider.class -}) -@ThreadLeakFilters(defaultFilters = true, filters = { - GradleThreadsFilter.class -}) -public abstract class GradleUnitTestCase extends BaseTestCase { -} +@TestMethodProviders({ JUnit4MethodProvider.class, JUnit3MethodProvider.class }) +@ThreadLeakFilters(defaultFilters = true, filters = { GradleThreadsFilter.class }) +public abstract class GradleUnitTestCase extends BaseTestCase {} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/JUnit3MethodProvider.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/JUnit3MethodProvider.java index 18871e16555ef..4b3384eb117c3 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/JUnit3MethodProvider.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/JUnit3MethodProvider.java @@ -36,17 +36,18 @@ public final class JUnit3MethodProvider implements TestMethodProvider { @Override public Collection getTestMethods(Class suiteClass, ClassModel classModel) { - Map methods = classModel.getMethods(); + Map methods = classModel.getMethods(); ArrayList result = new ArrayList<>(); for (MethodModel mm : methods.values()) { // Skip any methods that have overrieds/ shadows. - if (mm.getDown() != null) continue; + if (mm.getDown() != null) + continue; Method m = mm.element; - if (m.getName().startsWith("test") && - Modifier.isPublic(m.getModifiers()) && - !Modifier.isStatic(m.getModifiers()) && - m.getParameterTypes().length == 0) { + if (m.getName().startsWith("test") + && Modifier.isPublic(m.getModifiers()) + && !Modifier.isStatic(m.getModifiers()) + && m.getParameterTypes().length == 0) { result.add(m); } } diff --git a/buildSrc/src/testKit/testingConventions/all_classes_in_tasks/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java b/buildSrc/src/testKit/testingConventions/all_classes_in_tasks/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java index 48a4f7adfd99e..a78b438abfd5a 100644 --- a/buildSrc/src/testKit/testingConventions/all_classes_in_tasks/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java +++ b/buildSrc/src/testKit/testingConventions/all_classes_in_tasks/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java @@ -20,4 +20,4 @@ public class NamingConventionIT { -} \ No newline at end of file +} diff --git a/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/LooksLikeATestWithoutNamingConvention1.java b/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/LooksLikeATestWithoutNamingConvention1.java index 35d60d8a56b53..fa5295e926b78 100644 --- a/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/LooksLikeATestWithoutNamingConvention1.java +++ b/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/LooksLikeATestWithoutNamingConvention1.java @@ -27,4 +27,4 @@ public void annotatedTestMethod() { } -} \ No newline at end of file +} diff --git a/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/LooksLikeATestWithoutNamingConvention3.java b/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/LooksLikeATestWithoutNamingConvention3.java index 4a946c3aeb0ae..253559bb59ae5 100644 --- a/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/LooksLikeATestWithoutNamingConvention3.java +++ b/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/LooksLikeATestWithoutNamingConvention3.java @@ -18,13 +18,10 @@ */ package org.elasticsearch.gradle.testkit; -import org.junit.Assert; -import org.junit.Test; - public class LooksLikeATestWithoutNamingConvention3 { public void testMethod() { } -} \ No newline at end of file +} diff --git a/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/LooksLikeTestsButAbstract.java b/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/LooksLikeTestsButAbstract.java index 15718cff841e1..49a327480579a 100644 --- a/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/LooksLikeTestsButAbstract.java +++ b/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/LooksLikeTestsButAbstract.java @@ -18,9 +18,6 @@ */ package org.elasticsearch.gradle.testkit; -import org.junit.Assert; -import org.junit.Test; - public abstract class LooksLikeTestsButAbstract { public void testMethod() { diff --git a/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java b/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java index 48a4f7adfd99e..a78b438abfd5a 100644 --- a/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java +++ b/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java @@ -20,4 +20,4 @@ public class NamingConventionIT { -} \ No newline at end of file +} diff --git a/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionTests.java b/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionTests.java index 95152520a3f2d..6afb89ddf56b0 100644 --- a/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionTests.java +++ b/buildSrc/src/testKit/testingConventions/incorrect_naming_conventions/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionTests.java @@ -20,4 +20,4 @@ public class NamingConventionTests { -} \ No newline at end of file +} diff --git a/buildSrc/src/testKit/testingConventions/not_implementing_base/src/test/java/org/elasticsearch/gradle/testkit/Integration.java b/buildSrc/src/testKit/testingConventions/not_implementing_base/src/test/java/org/elasticsearch/gradle/testkit/Integration.java index 80522be3fb5ab..508adc24bc73a 100644 --- a/buildSrc/src/testKit/testingConventions/not_implementing_base/src/test/java/org/elasticsearch/gradle/testkit/Integration.java +++ b/buildSrc/src/testKit/testingConventions/not_implementing_base/src/test/java/org/elasticsearch/gradle/testkit/Integration.java @@ -19,5 +19,5 @@ package org.elasticsearch.gradle.testkit; public class Integration { - + } diff --git a/buildSrc/src/testKit/testingConventions/not_implementing_base/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java b/buildSrc/src/testKit/testingConventions/not_implementing_base/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java index 48a4f7adfd99e..a78b438abfd5a 100644 --- a/buildSrc/src/testKit/testingConventions/not_implementing_base/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java +++ b/buildSrc/src/testKit/testingConventions/not_implementing_base/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java @@ -20,4 +20,4 @@ public class NamingConventionIT { -} \ No newline at end of file +} diff --git a/buildSrc/src/testKit/testingConventions/tests_in_main/src/main/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java b/buildSrc/src/testKit/testingConventions/tests_in_main/src/main/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java index 48a4f7adfd99e..a78b438abfd5a 100644 --- a/buildSrc/src/testKit/testingConventions/tests_in_main/src/main/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java +++ b/buildSrc/src/testKit/testingConventions/tests_in_main/src/main/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java @@ -20,4 +20,4 @@ public class NamingConventionIT { -} \ No newline at end of file +} diff --git a/buildSrc/src/testKit/testingConventions/valid_setup_no_base/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java b/buildSrc/src/testKit/testingConventions/valid_setup_no_base/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java index 48a4f7adfd99e..a78b438abfd5a 100644 --- a/buildSrc/src/testKit/testingConventions/valid_setup_no_base/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java +++ b/buildSrc/src/testKit/testingConventions/valid_setup_no_base/src/test/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java @@ -20,4 +20,4 @@ public class NamingConventionIT { -} \ No newline at end of file +} diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/src/main/java/TestingIO.java b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/src/main/java/TestingIO.java index e4d8a0bbe0f87..eeda1f2b7b23a 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/src/main/java/TestingIO.java +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/src/main/java/TestingIO.java @@ -5,4 +5,3 @@ public TestingIO() { new File("foo"); } } - diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 4cdc59dd3bd35..96494238e75e0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -415,35 +414,6 @@ public Cancellable flushAsync(FlushRequest flushRequest, RequestOptions options, FlushResponse::fromXContent, listener, emptySet()); } - /** - * Initiate a synced flush manually using the synced flush API. - * See - * Synced flush API on elastic.co - * @param syncedFlushRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response - */ - public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(syncedFlushRequest, IndicesRequestConverters::flushSynced, options, - SyncedFlushResponse::fromXContent, emptySet()); - } - - /** - * Asynchronously initiate a synced flush manually using the synced flush API. - * See - * Synced flush API on elastic.co - * @param syncedFlushRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @return cancellable that may be used to cancel the request - */ - public Cancellable flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, RequestOptions options, - ActionListener listener) { - return restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, IndicesRequestConverters::flushSynced, options, - SyncedFlushResponse::fromXContent, listener, emptySet()); - } - /** * Retrieve the settings of one or more indices. * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java index 553da42711c38..fea48c1c3e909 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -194,15 +193,6 @@ static Request flush(FlushRequest flushRequest) { return request; } - static Request flushSynced(SyncedFlushRequest syncedFlushRequest) { - String[] indices = syncedFlushRequest.indices() == null ? Strings.EMPTY_ARRAY : syncedFlushRequest.indices(); - Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_flush/synced")); - RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withIndicesOptions(syncedFlushRequest.indicesOptions()); - request.addParameters(parameters.asMap()); - return request; - } - static Request forceMerge(ForceMergeRequest forceMergeRequest) { String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_forcemerge")); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index 4967d8091c961..2e077f547e34f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -73,6 +73,7 @@ import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutFilterRequest; import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.PutTrainedModelRequest; import org.elasticsearch.client.ml.RevertModelSnapshotRequest; import org.elasticsearch.client.ml.SetUpgradeModeRequest; import org.elasticsearch.client.ml.StartDataFrameAnalyticsRequest; @@ -792,6 +793,16 @@ static Request deleteTrainedModel(DeleteTrainedModelRequest deleteRequest) { return new Request(HttpDelete.METHOD_NAME, endpoint); } + static Request putTrainedModel(PutTrainedModelRequest putTrainedModelRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_ml", "inference") + .addPathPart(putTrainedModelRequest.getTrainedModelConfig().getModelId()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + request.setEntity(createEntity(putTrainedModelRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request putFilter(PutFilterRequest putFilterRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_ml") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index 0a71b8ddb0172..bdb2f22f3b3fa 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -100,6 +100,8 @@ import org.elasticsearch.client.ml.PutFilterResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.PutTrainedModelRequest; +import org.elasticsearch.client.ml.PutTrainedModelResponse; import org.elasticsearch.client.ml.RevertModelSnapshotRequest; import org.elasticsearch.client.ml.RevertModelSnapshotResponse; import org.elasticsearch.client.ml.SetUpgradeModeRequest; @@ -2340,6 +2342,48 @@ public Cancellable getTrainedModelsAsync(GetTrainedModelsRequest request, Collections.emptySet()); } + /** + * Put trained model config + *

+ * For additional info + * see + * PUT Trained Model Config documentation + * + * @param request The {@link PutTrainedModelRequest} + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return {@link PutTrainedModelResponse} response object + */ + public PutTrainedModelResponse putTrainedModel(PutTrainedModelRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::putTrainedModel, + options, + PutTrainedModelResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Put trained model config asynchronously and notifies listener upon completion + *

+ * For additional info + * see + * PUT Trained Model Config documentation + * + * @param request The {@link PutTrainedModelRequest} + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request + */ + public Cancellable putTrainedModelAsync(PutTrainedModelRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::putTrainedModel, + options, + PutTrainedModelResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Gets trained model stats *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SyncedFlushResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SyncedFlushResponse.java deleted file mode 100644 index 41e9c3d062b0a..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SyncedFlushResponse.java +++ /dev/null @@ -1,346 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client; - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentLocation; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentParser.Token; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - -public class SyncedFlushResponse implements ToXContentObject { - - public static final String SHARDS_FIELD = "_shards"; - - private ShardCounts totalCounts; - private Map indexResults; - - SyncedFlushResponse(ShardCounts totalCounts, Map indexResults) { - this.totalCounts = new ShardCounts(totalCounts.total, totalCounts.successful, totalCounts.failed); - this.indexResults = Collections.unmodifiableMap(indexResults); - } - - /** - * @return The total number of shard copies that were processed across all indexes - */ - public int totalShards() { - return totalCounts.total; - } - - /** - * @return The number of successful shard copies that were processed across all indexes - */ - public int successfulShards() { - return totalCounts.successful; - } - - /** - * @return The number of failed shard copies that were processed across all indexes - */ - public int failedShards() { - return totalCounts.failed; - } - - /** - * @return A map of results for each index where the keys of the map are the index names - * and the values are the results encapsulated in {@link IndexResult}. - */ - public Map getIndexResults() { - return indexResults; - } - - ShardCounts getShardCounts() { - return totalCounts; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.startObject(SHARDS_FIELD); - totalCounts.toXContent(builder, params); - builder.endObject(); - for (Map.Entry entry: indexResults.entrySet()) { - String indexName = entry.getKey(); - IndexResult indexResult = entry.getValue(); - builder.startObject(indexName); - indexResult.toXContent(builder, params); - builder.endObject(); - } - builder.endObject(); - return builder; - } - - public static SyncedFlushResponse fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); - ShardCounts totalCounts = null; - Map indexResults = new HashMap<>(); - XContentLocation startLoc = parser.getTokenLocation(); - while (parser.nextToken().equals(Token.FIELD_NAME)) { - if (parser.currentName().equals(SHARDS_FIELD)) { - ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); - totalCounts = ShardCounts.fromXContent(parser); - } else { - String indexName = parser.currentName(); - IndexResult indexResult = IndexResult.fromXContent(parser); - indexResults.put(indexName, indexResult); - } - } - if (totalCounts != null) { - return new SyncedFlushResponse(totalCounts, indexResults); - } else { - throw new ParsingException( - startLoc, - "Unable to reconstruct object. Total counts for shards couldn't be parsed." - ); - } - } - - /** - * Encapsulates the number of total successful and failed shard copies - */ - public static final class ShardCounts implements ToXContentFragment { - - public static final String TOTAL_FIELD = "total"; - public static final String SUCCESSFUL_FIELD = "successful"; - public static final String FAILED_FIELD = "failed"; - - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>( - "shardcounts", - a -> new ShardCounts((Integer) a[0], (Integer) a[1], (Integer) a[2]) - ); - static { - PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD)); - } - - private int total; - private int successful; - private int failed; - - - ShardCounts(int total, int successful, int failed) { - this.total = total; - this.successful = successful; - this.failed = failed; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(TOTAL_FIELD, total); - builder.field(SUCCESSFUL_FIELD, successful); - builder.field(FAILED_FIELD, failed); - return builder; - } - - public static ShardCounts fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - - public boolean equals(ShardCounts other) { - if (other != null) { - return - other.total == this.total && - other.successful == this.successful && - other.failed == this.failed; - } else { - return false; - } - } - - } - - /** - * Description for the flush/synced results for a particular index. - * This includes total, successful and failed copies along with failure description for each failed copy. - */ - public static final class IndexResult implements ToXContentFragment { - - public static final String TOTAL_FIELD = "total"; - public static final String SUCCESSFUL_FIELD = "successful"; - public static final String FAILED_FIELD = "failed"; - public static final String FAILURES_FIELD = "failures"; - - @SuppressWarnings("unchecked") - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>( - "indexresult", - a -> new IndexResult((Integer) a[0], (Integer) a[1], (Integer) a[2], (List)a[3]) - ); - static { - PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD)); - PARSER.declareObjectArray(optionalConstructorArg(), ShardFailure.PARSER, new ParseField(FAILURES_FIELD)); - } - - private ShardCounts counts; - private List failures; - - IndexResult(int total, int successful, int failed, List failures) { - counts = new ShardCounts(total, successful, failed); - if (failures != null) { - this.failures = Collections.unmodifiableList(failures); - } else { - this.failures = Collections.unmodifiableList(new ArrayList<>()); - } - } - - /** - * @return The total number of shard copies that were processed for this index. - */ - public int totalShards() { - return counts.total; - } - - /** - * @return The number of successful shard copies that were processed for this index. - */ - public int successfulShards() { - return counts.successful; - } - - /** - * @return The number of failed shard copies that were processed for this index. - */ - public int failedShards() { - return counts.failed; - } - - /** - * @return A list of {@link ShardFailure} objects that describe each of the failed shard copies for this index. - */ - public List failures() { - return failures; - } - - ShardCounts getShardCounts() { - return counts; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - counts.toXContent(builder, params); - if (failures.size() > 0) { - builder.startArray(FAILURES_FIELD); - for (ShardFailure failure : failures) { - failure.toXContent(builder, params); - } - builder.endArray(); - } - return builder; - } - - public static IndexResult fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - } - - /** - * Description of a failed shard copy for an index. - */ - public static final class ShardFailure implements ToXContentFragment { - - public static String SHARD_ID_FIELD = "shard"; - public static String FAILURE_REASON_FIELD = "reason"; - public static String ROUTING_FIELD = "routing"; - - private int shardId; - private String failureReason; - private Map routing; - - @SuppressWarnings("unchecked") - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "shardfailure", - a -> new ShardFailure((Integer)a[0], (String)a[1], (Map)a[2]) - ); - static { - PARSER.declareInt(constructorArg(), new ParseField(SHARD_ID_FIELD)); - PARSER.declareString(constructorArg(), new ParseField(FAILURE_REASON_FIELD)); - PARSER.declareObject( - optionalConstructorArg(), - (parser, c) -> parser.map(), - new ParseField(ROUTING_FIELD) - ); - } - - ShardFailure(int shardId, String failureReason, Map routing) { - this.shardId = shardId; - this.failureReason = failureReason; - if (routing != null) { - this.routing = Collections.unmodifiableMap(routing); - } else { - this.routing = Collections.unmodifiableMap(new HashMap<>()); - } - } - - /** - * @return Id of the shard whose copy failed - */ - public int getShardId() { - return shardId; - } - - /** - * @return Reason for failure of the shard copy - */ - public String getFailureReason() { - return failureReason; - } - - /** - * @return Additional information about the failure. - */ - public Map getRouting() { - return routing; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(SHARD_ID_FIELD, shardId); - builder.field(FAILURE_REASON_FIELD, failureReason); - if (routing.size() > 0) { - builder.field(ROUTING_FIELD, routing); - } - builder.endObject(); - return builder; - } - - public static ShardFailure fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutTrainedModelRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutTrainedModelRequest.java new file mode 100644 index 0000000000000..780ec31771baa --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutTrainedModelRequest.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ml.inference.TrainedModelConfig; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + + +public class PutTrainedModelRequest implements Validatable, ToXContentObject { + + private final TrainedModelConfig config; + + public PutTrainedModelRequest(TrainedModelConfig config) { + this.config = config; + } + + public TrainedModelConfig getTrainedModelConfig() { + return config; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + return config.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PutTrainedModelRequest request = (PutTrainedModelRequest) o; + return Objects.equals(config, request.config); + } + + @Override + public int hashCode() { + return Objects.hash(config); + } + + @Override + public final String toString() { + return Strings.toString(config); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutTrainedModelResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutTrainedModelResponse.java new file mode 100644 index 0000000000000..3bc81f1812940 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutTrainedModelResponse.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.inference.TrainedModelConfig; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + + +public class PutTrainedModelResponse implements ToXContentObject { + + private final TrainedModelConfig trainedModelConfig; + + public static PutTrainedModelResponse fromXContent(XContentParser parser) throws IOException { + return new PutTrainedModelResponse(TrainedModelConfig.PARSER.parse(parser, null).build()); + } + + public PutTrainedModelResponse(TrainedModelConfig trainedModelConfig) { + this.trainedModelConfig = trainedModelConfig; + } + + public TrainedModelConfig getResponse() { + return trainedModelConfig; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return trainedModelConfig.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PutTrainedModelResponse response = (PutTrainedModelResponse) o; + return Objects.equals(trainedModelConfig, response.trainedModelConfig); + } + + @Override + public int hashCode() { + return Objects.hash(trainedModelConfig); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/Classification.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/Classification.java index 9d384e6d86786..02861adc73845 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/Classification.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/Classification.java @@ -46,6 +46,7 @@ public static Builder builder(String dependentVariable) { static final ParseField ETA = new ParseField("eta"); static final ParseField MAXIMUM_NUMBER_TREES = new ParseField("maximum_number_trees"); static final ParseField FEATURE_BAG_FRACTION = new ParseField("feature_bag_fraction"); + static final ParseField NUM_TOP_FEATURE_IMPORTANCE_VALUES = new ParseField("num_top_feature_importance_values"); static final ParseField PREDICTION_FIELD_NAME = new ParseField("prediction_field_name"); static final ParseField TRAINING_PERCENT = new ParseField("training_percent"); static final ParseField NUM_TOP_CLASSES = new ParseField("num_top_classes"); @@ -62,10 +63,11 @@ public static Builder builder(String dependentVariable) { (Double) a[3], (Integer) a[4], (Double) a[5], - (String) a[6], - (Double) a[7], - (Integer) a[8], - (Long) a[9])); + (Integer) a[6], + (String) a[7], + (Double) a[8], + (Integer) a[9], + (Long) a[10])); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), DEPENDENT_VARIABLE); @@ -74,6 +76,7 @@ public static Builder builder(String dependentVariable) { PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), ETA); PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAXIMUM_NUMBER_TREES); PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), FEATURE_BAG_FRACTION); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), NUM_TOP_FEATURE_IMPORTANCE_VALUES); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), PREDICTION_FIELD_NAME); PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), TRAINING_PERCENT); PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), NUM_TOP_CLASSES); @@ -86,13 +89,15 @@ public static Builder builder(String dependentVariable) { private final Double eta; private final Integer maximumNumberTrees; private final Double featureBagFraction; + private final Integer numTopFeatureImportanceValues; private final String predictionFieldName; private final Double trainingPercent; private final Integer numTopClasses; private final Long randomizeSeed; private Classification(String dependentVariable, @Nullable Double lambda, @Nullable Double gamma, @Nullable Double eta, - @Nullable Integer maximumNumberTrees, @Nullable Double featureBagFraction, @Nullable String predictionFieldName, + @Nullable Integer maximumNumberTrees, @Nullable Double featureBagFraction, + @Nullable Integer numTopFeatureImportanceValues, @Nullable String predictionFieldName, @Nullable Double trainingPercent, @Nullable Integer numTopClasses, @Nullable Long randomizeSeed) { this.dependentVariable = Objects.requireNonNull(dependentVariable); this.lambda = lambda; @@ -100,6 +105,7 @@ private Classification(String dependentVariable, @Nullable Double lambda, @Nulla this.eta = eta; this.maximumNumberTrees = maximumNumberTrees; this.featureBagFraction = featureBagFraction; + this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; this.predictionFieldName = predictionFieldName; this.trainingPercent = trainingPercent; this.numTopClasses = numTopClasses; @@ -135,6 +141,10 @@ public Double getFeatureBagFraction() { return featureBagFraction; } + public Integer getNumTopFeatureImportanceValues() { + return numTopFeatureImportanceValues; + } + public String getPredictionFieldName() { return predictionFieldName; } @@ -170,6 +180,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (featureBagFraction != null) { builder.field(FEATURE_BAG_FRACTION.getPreferredName(), featureBagFraction); } + if (numTopFeatureImportanceValues != null) { + builder.field(NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName(), numTopFeatureImportanceValues); + } if (predictionFieldName != null) { builder.field(PREDICTION_FIELD_NAME.getPreferredName(), predictionFieldName); } @@ -188,8 +201,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public int hashCode() { - return Objects.hash(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, predictionFieldName, - trainingPercent, randomizeSeed, numTopClasses); + return Objects.hash(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, numTopFeatureImportanceValues, + predictionFieldName, trainingPercent, randomizeSeed, numTopClasses); } @Override @@ -203,6 +216,7 @@ public boolean equals(Object o) { && Objects.equals(eta, that.eta) && Objects.equals(maximumNumberTrees, that.maximumNumberTrees) && Objects.equals(featureBagFraction, that.featureBagFraction) + && Objects.equals(numTopFeatureImportanceValues, that.numTopFeatureImportanceValues) && Objects.equals(predictionFieldName, that.predictionFieldName) && Objects.equals(trainingPercent, that.trainingPercent) && Objects.equals(randomizeSeed, that.randomizeSeed) @@ -221,6 +235,7 @@ public static class Builder { private Double eta; private Integer maximumNumberTrees; private Double featureBagFraction; + private Integer numTopFeatureImportanceValues; private String predictionFieldName; private Double trainingPercent; private Integer numTopClasses; @@ -255,6 +270,11 @@ public Builder setFeatureBagFraction(Double featureBagFraction) { return this; } + public Builder setNumTopFeatureImportanceValues(Integer numTopFeatureImportanceValues) { + this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; + return this; + } + public Builder setPredictionFieldName(String predictionFieldName) { this.predictionFieldName = predictionFieldName; return this; @@ -276,8 +296,8 @@ public Builder setNumTopClasses(Integer numTopClasses) { } public Classification build() { - return new Classification(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, predictionFieldName, - trainingPercent, numTopClasses, randomizeSeed); + return new Classification(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, + numTopFeatureImportanceValues, predictionFieldName, trainingPercent, numTopClasses, randomizeSeed); } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/Regression.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/Regression.java index fa55ee40b27fb..d7e374a2563a1 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/Regression.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/Regression.java @@ -46,6 +46,7 @@ public static Builder builder(String dependentVariable) { static final ParseField ETA = new ParseField("eta"); static final ParseField MAXIMUM_NUMBER_TREES = new ParseField("maximum_number_trees"); static final ParseField FEATURE_BAG_FRACTION = new ParseField("feature_bag_fraction"); + static final ParseField NUM_TOP_FEATURE_IMPORTANCE_VALUES = new ParseField("num_top_feature_importance_values"); static final ParseField PREDICTION_FIELD_NAME = new ParseField("prediction_field_name"); static final ParseField TRAINING_PERCENT = new ParseField("training_percent"); static final ParseField RANDOMIZE_SEED = new ParseField("randomize_seed"); @@ -61,9 +62,10 @@ public static Builder builder(String dependentVariable) { (Double) a[3], (Integer) a[4], (Double) a[5], - (String) a[6], - (Double) a[7], - (Long) a[8])); + (Integer) a[6], + (String) a[7], + (Double) a[8], + (Long) a[9])); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), DEPENDENT_VARIABLE); @@ -72,6 +74,7 @@ public static Builder builder(String dependentVariable) { PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), ETA); PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAXIMUM_NUMBER_TREES); PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), FEATURE_BAG_FRACTION); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), NUM_TOP_FEATURE_IMPORTANCE_VALUES); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), PREDICTION_FIELD_NAME); PARSER.declareDouble(ConstructingObjectParser.optionalConstructorArg(), TRAINING_PERCENT); PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), RANDOMIZE_SEED); @@ -83,12 +86,14 @@ public static Builder builder(String dependentVariable) { private final Double eta; private final Integer maximumNumberTrees; private final Double featureBagFraction; + private final Integer numTopFeatureImportanceValues; private final String predictionFieldName; private final Double trainingPercent; private final Long randomizeSeed; - private Regression(String dependentVariable, @Nullable Double lambda, @Nullable Double gamma, @Nullable Double eta, - @Nullable Integer maximumNumberTrees, @Nullable Double featureBagFraction, @Nullable String predictionFieldName, + private Regression(String dependentVariable, @Nullable Double lambda, @Nullable Double gamma, @Nullable Double eta, + @Nullable Integer maximumNumberTrees, @Nullable Double featureBagFraction, + @Nullable Integer numTopFeatureImportanceValues, @Nullable String predictionFieldName, @Nullable Double trainingPercent, @Nullable Long randomizeSeed) { this.dependentVariable = Objects.requireNonNull(dependentVariable); this.lambda = lambda; @@ -96,6 +101,7 @@ private Regression(String dependentVariable, @Nullable Double lambda, @Nullable this.eta = eta; this.maximumNumberTrees = maximumNumberTrees; this.featureBagFraction = featureBagFraction; + this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; this.predictionFieldName = predictionFieldName; this.trainingPercent = trainingPercent; this.randomizeSeed = randomizeSeed; @@ -130,6 +136,10 @@ public Double getFeatureBagFraction() { return featureBagFraction; } + public Integer getNumTopFeatureImportanceValues() { + return numTopFeatureImportanceValues; + } + public String getPredictionFieldName() { return predictionFieldName; } @@ -161,6 +171,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (featureBagFraction != null) { builder.field(FEATURE_BAG_FRACTION.getPreferredName(), featureBagFraction); } + if (numTopFeatureImportanceValues != null) { + builder.field(NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName(), numTopFeatureImportanceValues); + } if (predictionFieldName != null) { builder.field(PREDICTION_FIELD_NAME.getPreferredName(), predictionFieldName); } @@ -176,8 +189,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public int hashCode() { - return Objects.hash(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, predictionFieldName, - trainingPercent, randomizeSeed); + return Objects.hash(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, numTopFeatureImportanceValues, + predictionFieldName, trainingPercent, randomizeSeed); } @Override @@ -191,6 +204,7 @@ public boolean equals(Object o) { && Objects.equals(eta, that.eta) && Objects.equals(maximumNumberTrees, that.maximumNumberTrees) && Objects.equals(featureBagFraction, that.featureBagFraction) + && Objects.equals(numTopFeatureImportanceValues, that.numTopFeatureImportanceValues) && Objects.equals(predictionFieldName, that.predictionFieldName) && Objects.equals(trainingPercent, that.trainingPercent) && Objects.equals(randomizeSeed, that.randomizeSeed); @@ -208,6 +222,7 @@ public static class Builder { private Double eta; private Integer maximumNumberTrees; private Double featureBagFraction; + private Integer numTopFeatureImportanceValues; private String predictionFieldName; private Double trainingPercent; private Long randomizeSeed; @@ -241,6 +256,11 @@ public Builder setFeatureBagFraction(Double featureBagFraction) { return this; } + public Builder setNumTopFeatureImportanceValues(Integer numTopFeatureImportanceValues) { + this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; + return this; + } + public Builder setPredictionFieldName(String predictionFieldName) { this.predictionFieldName = predictionFieldName; return this; @@ -257,8 +277,8 @@ public Builder setRandomizeSeed(Long randomizeSeed) { } public Regression build() { - return new Regression(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, predictionFieldName, - trainingPercent, randomizeSeed); + return new Regression(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, + numTopFeatureImportanceValues, predictionFieldName, trainingPercent, randomizeSeed); } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/InferenceToXContentCompressor.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/InferenceToXContentCompressor.java new file mode 100644 index 0000000000000..9bec4c4eb5d52 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/InferenceToXContentCompressor.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ml.inference; + +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; + +/** + * Collection of helper methods. Similar to CompressedXContent, but this utilizes GZIP. + */ +public final class InferenceToXContentCompressor { + private static final int BUFFER_SIZE = 4096; + private static final long MAX_INFLATED_BYTES = 1_000_000_000; // 1 gb maximum + + private InferenceToXContentCompressor() {} + + public static String deflate(T objectToCompress) throws IOException { + BytesReference reference = XContentHelper.toXContent(objectToCompress, XContentType.JSON, false); + return deflate(reference); + } + + public static T inflate(String compressedString, + CheckedFunction parserFunction, + NamedXContentRegistry xContentRegistry) throws IOException { + try(XContentParser parser = XContentHelper.createParser(xContentRegistry, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + inflate(compressedString, MAX_INFLATED_BYTES), + XContentType.JSON)) { + return parserFunction.apply(parser); + } + } + + static BytesReference inflate(String compressedString, long streamSize) throws IOException { + byte[] compressedBytes = Base64.getDecoder().decode(compressedString.getBytes(StandardCharsets.UTF_8)); + InputStream gzipStream = new GZIPInputStream(new BytesArray(compressedBytes).streamInput(), BUFFER_SIZE); + InputStream inflateStream = new SimpleBoundedInputStream(gzipStream, streamSize); + return Streams.readFully(inflateStream); + } + + private static String deflate(BytesReference reference) throws IOException { + BytesStreamOutput out = new BytesStreamOutput(); + try (OutputStream compressedOutput = new GZIPOutputStream(out, BUFFER_SIZE)) { + reference.writeTo(compressedOutput); + } + return new String(Base64.getEncoder().encode(BytesReference.toBytes(out.bytes())), StandardCharsets.UTF_8); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/SimpleBoundedInputStream.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/SimpleBoundedInputStream.java new file mode 100644 index 0000000000000..683e23dc9d7cf --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/SimpleBoundedInputStream.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.inference; + + +import java.io.IOException; +import java.io.InputStream; +import java.util.Objects; + +/** + * This is a pared down bounded input stream. + * Only read is specifically enforced. + */ +final class SimpleBoundedInputStream extends InputStream { + + private final InputStream in; + private final long maxBytes; + private long numBytes; + + SimpleBoundedInputStream(InputStream inputStream, long maxBytes) { + this.in = Objects.requireNonNull(inputStream, "inputStream"); + if (maxBytes < 0) { + throw new IllegalArgumentException("[maxBytes] must be greater than or equal to 0"); + } + this.maxBytes = maxBytes; + } + + + /** + * A simple wrapper around the injected input stream that restricts the total number of bytes able to be read. + * @return The byte read. -1 on internal stream completion or when maxBytes is exceeded. + * @throws IOException on failure + */ + @Override + public int read() throws IOException { + // We have reached the maximum, signal stream completion. + if (numBytes >= maxBytes) { + return -1; + } + numBytes++; + return in.read(); + } + + /** + * Delegates `close` to the wrapped InputStream + * @throws IOException on failure + */ + @Override + public void close() throws IOException { + in.close(); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/TrainedModelConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/TrainedModelConfig.java index 23eb01fb3b153..9d2b323cf4880 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/TrainedModelConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/TrainedModelConfig.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.time.Instant; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -111,7 +112,7 @@ public static TrainedModelConfig fromXContent(XContentParser parser) throws IOEx this.modelId = modelId; this.createdBy = createdBy; this.version = version; - this.createTime = Instant.ofEpochMilli(createTime.toEpochMilli()); + this.createTime = createTime == null ? null : Instant.ofEpochMilli(createTime.toEpochMilli()); this.definition = definition; this.compressedDefinition = compressedDefinition; this.description = description; @@ -293,12 +294,12 @@ public Builder setModelId(String modelId) { return this; } - public Builder setCreatedBy(String createdBy) { + private Builder setCreatedBy(String createdBy) { this.createdBy = createdBy; return this; } - public Builder setVersion(Version version) { + private Builder setVersion(Version version) { this.version = version; return this; } @@ -312,7 +313,7 @@ public Builder setDescription(String description) { return this; } - public Builder setCreateTime(Instant createTime) { + private Builder setCreateTime(Instant createTime) { this.createTime = createTime; return this; } @@ -322,6 +323,10 @@ public Builder setTags(List tags) { return this; } + public Builder setTags(String... tags) { + return setTags(Arrays.asList(tags)); + } + public Builder setMetadata(Map metadata) { this.metadata = metadata; return this; @@ -347,17 +352,17 @@ public Builder setInput(TrainedModelInput input) { return this; } - public Builder setEstimatedHeapMemory(Long estimatedHeapMemory) { + private Builder setEstimatedHeapMemory(Long estimatedHeapMemory) { this.estimatedHeapMemory = estimatedHeapMemory; return this; } - public Builder setEstimatedOperations(Long estimatedOperations) { + private Builder setEstimatedOperations(Long estimatedOperations) { this.estimatedOperations = estimatedOperations; return this; } - public Builder setLicenseLevel(String licenseLevel) { + private Builder setLicenseLevel(String licenseLevel) { this.licenseLevel = licenseLevel; return this; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/TrainedModelInput.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/TrainedModelInput.java index 10f849cac481a..9b19323023d81 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/TrainedModelInput.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/TrainedModelInput.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Objects; @@ -48,6 +49,10 @@ public TrainedModelInput(List fieldNames) { this.fieldNames = fieldNames; } + public TrainedModelInput(String... fieldNames) { + this(Arrays.asList(fieldNames)); + } + public static TrainedModelInput fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/trainedmodel/ensemble/Ensemble.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/trainedmodel/ensemble/Ensemble.java index d16d758769c2b..60ffde6e35f59 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/trainedmodel/ensemble/Ensemble.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/trainedmodel/ensemble/Ensemble.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Objects; @@ -41,6 +42,7 @@ public class Ensemble implements TrainedModel { public static final ParseField AGGREGATE_OUTPUT = new ParseField("aggregate_output"); public static final ParseField TARGET_TYPE = new ParseField("target_type"); public static final ParseField CLASSIFICATION_LABELS = new ParseField("classification_labels"); + public static final ParseField CLASSIFICATION_WEIGHTS = new ParseField("classification_weights"); private static final ObjectParser PARSER = new ObjectParser<>( NAME, @@ -60,6 +62,7 @@ public class Ensemble implements TrainedModel { AGGREGATE_OUTPUT); PARSER.declareString(Ensemble.Builder::setTargetType, TARGET_TYPE); PARSER.declareStringArray(Ensemble.Builder::setClassificationLabels, CLASSIFICATION_LABELS); + PARSER.declareDoubleArray(Ensemble.Builder::setClassificationWeights, CLASSIFICATION_WEIGHTS); } public static Ensemble fromXContent(XContentParser parser) { @@ -71,17 +74,20 @@ public static Ensemble fromXContent(XContentParser parser) { private final OutputAggregator outputAggregator; private final TargetType targetType; private final List classificationLabels; + private final double[] classificationWeights; Ensemble(List featureNames, List models, @Nullable OutputAggregator outputAggregator, TargetType targetType, - @Nullable List classificationLabels) { + @Nullable List classificationLabels, + @Nullable double[] classificationWeights) { this.featureNames = featureNames; this.models = models; this.outputAggregator = outputAggregator; this.targetType = targetType; this.classificationLabels = classificationLabels; + this.classificationWeights = classificationWeights; } @Override @@ -116,6 +122,9 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par if (classificationLabels != null) { builder.field(CLASSIFICATION_LABELS.getPreferredName(), classificationLabels); } + if (classificationWeights != null) { + builder.field(CLASSIFICATION_WEIGHTS.getPreferredName(), classificationWeights); + } builder.endObject(); return builder; } @@ -129,12 +138,18 @@ public boolean equals(Object o) { && Objects.equals(models, that.models) && Objects.equals(targetType, that.targetType) && Objects.equals(classificationLabels, that.classificationLabels) + && Arrays.equals(classificationWeights, that.classificationWeights) && Objects.equals(outputAggregator, that.outputAggregator); } @Override public int hashCode() { - return Objects.hash(featureNames, models, outputAggregator, classificationLabels, targetType); + return Objects.hash(featureNames, + models, + outputAggregator, + classificationLabels, + targetType, + Arrays.hashCode(classificationWeights)); } public static Builder builder() { @@ -147,6 +162,7 @@ public static class Builder { private OutputAggregator outputAggregator; private TargetType targetType; private List classificationLabels; + private double[] classificationWeights; public Builder setFeatureNames(List featureNames) { this.featureNames = featureNames; @@ -173,6 +189,11 @@ public Builder setClassificationLabels(List classificationLabels) { return this; } + public Builder setClassificationWeights(List classificationWeights) { + this.classificationWeights = classificationWeights.stream().mapToDouble(Double::doubleValue).toArray(); + return this; + } + private void setOutputAggregatorFromParser(List outputAggregators) { this.setOutputAggregator(outputAggregators.get(0)); } @@ -182,7 +203,7 @@ private void setTargetType(String targetType) { } public Ensemble build() { - return new Ensemble(featureNames, trainedModels, outputAggregator, targetType, classificationLabels); + return new Ensemble(featureNames, trainedModels, outputAggregator, targetType, classificationLabels, classificationWeights); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index e712604266a31..3303ed4f218e4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -33,7 +33,6 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -758,39 +757,6 @@ public void testFlush() throws IOException { } } - public void testSyncedFlush() throws IOException { - { - String index = "index"; - Settings settings = Settings.builder() - .put("number_of_shards", 1) - .put("number_of_replicas", 0) - .build(); - createIndex(index, settings); - SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(index); - SyncedFlushResponse flushResponse = - execute(syncedFlushRequest, highLevelClient().indices()::flushSynced, highLevelClient().indices()::flushSyncedAsync); - assertThat(flushResponse.totalShards(), equalTo(1)); - assertThat(flushResponse.successfulShards(), equalTo(1)); - assertThat(flushResponse.failedShards(), equalTo(0)); - } - { - String nonExistentIndex = "non_existent_index"; - assertFalse(indexExists(nonExistentIndex)); - SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(nonExistentIndex); - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> - execute( - syncedFlushRequest, - highLevelClient().indices()::flushSynced, - highLevelClient().indices()::flushSyncedAsync - ) - ); - assertEquals(RestStatus.NOT_FOUND, exception.status()); - } - } - - public void testClearCache() throws IOException { { String index = "index"; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java index ae94e700b5fb7..c9c2ee065fded 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -460,30 +459,6 @@ public void testFlush() { Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); } - public void testSyncedFlush() { - String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); - SyncedFlushRequest syncedFlushRequest; - if (ESTestCase.randomBoolean()) { - syncedFlushRequest = new SyncedFlushRequest(indices); - } else { - syncedFlushRequest = new SyncedFlushRequest(); - syncedFlushRequest.indices(indices); - } - Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomIndicesOptions(syncedFlushRequest::indicesOptions, syncedFlushRequest::indicesOptions, - expectedParams); - Request request = IndicesRequestConverters.flushSynced(syncedFlushRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indices != null && indices.length > 0) { - endpoint.add(String.join(",", indices)); - } - endpoint.add("_flush/synced"); - Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString())); - Assert.assertThat(request.getParameters(), equalTo(expectedParams)); - Assert.assertThat(request.getEntity(), nullValue()); - Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); - } - public void testForceMerge() { String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); ForceMergeRequest forceMergeRequest; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index 475dda254448f..c137fbc464d56 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -71,6 +71,7 @@ import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutFilterRequest; import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.PutTrainedModelRequest; import org.elasticsearch.client.ml.RevertModelSnapshotRequest; import org.elasticsearch.client.ml.SetUpgradeModeRequest; import org.elasticsearch.client.ml.StartDataFrameAnalyticsRequest; @@ -91,6 +92,9 @@ import org.elasticsearch.client.ml.dataframe.MlDataFrameAnalysisNamedXContentProvider; import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; import org.elasticsearch.client.ml.filestructurefinder.FileStructure; +import org.elasticsearch.client.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.client.ml.inference.TrainedModelConfig; +import org.elasticsearch.client.ml.inference.TrainedModelConfigTests; import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Job; @@ -874,6 +878,20 @@ public void testDeleteTrainedModel() { assertNull(request.getEntity()); } + public void testPutTrainedModel() throws IOException { + TrainedModelConfig trainedModelConfig = TrainedModelConfigTests.createTestTrainedModelConfig(); + PutTrainedModelRequest putTrainedModelRequest = new PutTrainedModelRequest(trainedModelConfig); + + Request request = MLRequestConverters.putTrainedModel(putTrainedModelRequest); + + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertThat(request.getEndpoint(), equalTo("/_ml/inference/" + trainedModelConfig.getModelId())); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + TrainedModelConfig parsedTrainedModelConfig = TrainedModelConfig.PARSER.apply(parser, null).build(); + assertThat(parsedTrainedModelConfig, equalTo(trainedModelConfig)); + } + } + public void testPutFilter() throws IOException { MlFilter filter = MlFilterTests.createRandomBuilder("foo").build(); PutFilterRequest putFilterRequest = new PutFilterRequest(filter); @@ -1046,6 +1064,7 @@ protected NamedXContentRegistry xContentRegistry() { namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); namedXContent.addAll(new MlDataFrameAnalysisNamedXContentProvider().getNamedXContentParsers()); namedXContent.addAll(new MlEvaluationNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); return new NamedXContentRegistry(namedXContent); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 547521a089cc6..6fe08f8a507de 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -101,6 +101,8 @@ import org.elasticsearch.client.ml.PutFilterResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.PutTrainedModelRequest; +import org.elasticsearch.client.ml.PutTrainedModelResponse; import org.elasticsearch.client.ml.RevertModelSnapshotRequest; import org.elasticsearch.client.ml.RevertModelSnapshotResponse; import org.elasticsearch.client.ml.SetUpgradeModeRequest; @@ -146,9 +148,12 @@ import org.elasticsearch.client.ml.dataframe.explain.FieldSelection; import org.elasticsearch.client.ml.dataframe.explain.MemoryEstimation; import org.elasticsearch.client.ml.filestructurefinder.FileStructure; +import org.elasticsearch.client.ml.inference.InferenceToXContentCompressor; +import org.elasticsearch.client.ml.inference.MlInferenceNamedXContentProvider; import org.elasticsearch.client.ml.inference.TrainedModelConfig; import org.elasticsearch.client.ml.inference.TrainedModelDefinition; import org.elasticsearch.client.ml.inference.TrainedModelDefinitionTests; +import org.elasticsearch.client.ml.inference.TrainedModelInput; import org.elasticsearch.client.ml.inference.TrainedModelStats; import org.elasticsearch.client.ml.inference.trainedmodel.TargetType; import org.elasticsearch.client.ml.inference.trainedmodel.langident.LangIdentNeuralNetwork; @@ -162,14 +167,12 @@ import org.elasticsearch.client.ml.job.process.ModelSnapshot; import org.elasticsearch.client.ml.job.stats.JobStats; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -178,11 +181,9 @@ import org.junit.After; import java.io.IOException; -import java.io.OutputStream; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -190,7 +191,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import java.util.zip.GZIPOutputStream; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; @@ -1294,6 +1294,12 @@ public void testPutDataFrameAnalyticsConfig_GivenRegression() throws Exception { .setPredictionFieldName("my_dependent_variable_prediction") .setTrainingPercent(80.0) .setRandomizeSeed(42L) + .setLambda(1.0) + .setGamma(1.0) + .setEta(1.0) + .setMaximumNumberTrees(10) + .setFeatureBagFraction(0.5) + .setNumTopFeatureImportanceValues(3) .build()) .setDescription("this is a regression") .build(); @@ -1331,6 +1337,12 @@ public void testPutDataFrameAnalyticsConfig_GivenClassification() throws Excepti .setTrainingPercent(80.0) .setRandomizeSeed(42L) .setNumTopClasses(1) + .setLambda(1.0) + .setGamma(1.0) + .setEta(1.0) + .setMaximumNumberTrees(10) + .setFeatureBagFraction(0.5) + .setNumTopFeatureImportanceValues(3) .build()) .setDescription("this is a classification") .build(); @@ -2192,6 +2204,50 @@ public void testGetTrainedModels() throws Exception { } } + public void testPutTrainedModel() throws Exception { + String modelId = "test-put-trained-model"; + String modelIdCompressed = "test-put-trained-model-compressed-definition"; + + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + TrainedModelDefinition definition = TrainedModelDefinitionTests.createRandomBuilder(TargetType.REGRESSION).build(); + TrainedModelConfig trainedModelConfig = TrainedModelConfig.builder() + .setDefinition(definition) + .setModelId(modelId) + .setInput(new TrainedModelInput(Arrays.asList("col1", "col2", "col3", "col4"))) + .setDescription("test model") + .build(); + PutTrainedModelResponse putTrainedModelResponse = execute(new PutTrainedModelRequest(trainedModelConfig), + machineLearningClient::putTrainedModel, + machineLearningClient::putTrainedModelAsync); + TrainedModelConfig createdModel = putTrainedModelResponse.getResponse(); + assertThat(createdModel.getModelId(), equalTo(modelId)); + + definition = TrainedModelDefinitionTests.createRandomBuilder(TargetType.REGRESSION).build(); + trainedModelConfig = TrainedModelConfig.builder() + .setCompressedDefinition(InferenceToXContentCompressor.deflate(definition)) + .setModelId(modelIdCompressed) + .setInput(new TrainedModelInput(Arrays.asList("col1", "col2", "col3", "col4"))) + .setDescription("test model") + .build(); + putTrainedModelResponse = execute(new PutTrainedModelRequest(trainedModelConfig), + machineLearningClient::putTrainedModel, + machineLearningClient::putTrainedModelAsync); + createdModel = putTrainedModelResponse.getResponse(); + assertThat(createdModel.getModelId(), equalTo(modelIdCompressed)); + + GetTrainedModelsResponse getTrainedModelsResponse = execute( + new GetTrainedModelsRequest(modelIdCompressed).setDecompressDefinition(true).setIncludeDefinition(true), + machineLearningClient::getTrainedModels, + machineLearningClient::getTrainedModelsAsync); + + assertThat(getTrainedModelsResponse.getCount(), equalTo(1L)); + assertThat(getTrainedModelsResponse.getTrainedModels(), hasSize(1)); + assertThat(getTrainedModelsResponse.getTrainedModels().get(0).getCompressedDefinition(), is(nullValue())); + assertThat(getTrainedModelsResponse.getTrainedModels().get(0).getDefinition(), is(not(nullValue()))); + assertThat(getTrainedModelsResponse.getTrainedModels().get(0).getModelId(), equalTo(modelIdCompressed)); + } + public void testGetTrainedModelsStats() throws Exception { MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); String modelIdPrefix = "a-get-trained-model-stats-"; @@ -2474,56 +2530,13 @@ private void openJob(Job job) throws IOException { private void putTrainedModel(String modelId) throws IOException { TrainedModelDefinition definition = TrainedModelDefinitionTests.createRandomBuilder(TargetType.REGRESSION).build(); - highLevelClient().index( - new IndexRequest(".ml-inference-000001") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source(modelConfigString(modelId), XContentType.JSON) - .id(modelId), - RequestOptions.DEFAULT); - - highLevelClient().index( - new IndexRequest(".ml-inference-000001") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source(modelDocString(compressDefinition(definition), modelId), XContentType.JSON) - .id("trained_model_definition_doc-" + modelId + "-0"), - RequestOptions.DEFAULT); - } - - private String compressDefinition(TrainedModelDefinition definition) throws IOException { - BytesReference reference = XContentHelper.toXContent(definition, XContentType.JSON, false); - BytesStreamOutput out = new BytesStreamOutput(); - try (OutputStream compressedOutput = new GZIPOutputStream(out, 4096)) { - reference.writeTo(compressedOutput); - } - return new String(Base64.getEncoder().encode(BytesReference.toBytes(out.bytes())), StandardCharsets.UTF_8); - } - - private static String modelConfigString(String modelId) { - return "{\n" + - " \"doc_type\": \"trained_model_config\",\n" + - " \"model_id\": \"" + modelId + "\",\n" + - " \"input\":{\"field_names\":[\"col1\",\"col2\",\"col3\",\"col4\"]}," + - " \"description\": \"test model\",\n" + - " \"version\": \"7.6.0\",\n" + - " \"license_level\": \"platinum\",\n" + - " \"created_by\": \"ml_test\",\n" + - " \"estimated_heap_memory_usage_bytes\": 0," + - " \"estimated_operations\": 0," + - " \"created_time\": 0\n" + - "}"; - } - - private static String modelDocString(String compressedDefinition, String modelId) { - return "" + - "{" + - "\"model_id\": \"" + modelId + "\",\n" + - "\"doc_num\": 0,\n" + - "\"doc_type\": \"trained_model_definition_doc\",\n" + - " \"compression_version\": " + 1 + ",\n" + - " \"total_definition_length\": " + compressedDefinition.length() + ",\n" + - " \"definition_length\": " + compressedDefinition.length() + ",\n" + - "\"definition\": \"" + compressedDefinition + "\"\n" + - "}"; + TrainedModelConfig trainedModelConfig = TrainedModelConfig.builder() + .setDefinition(definition) + .setModelId(modelId) + .setInput(new TrainedModelInput(Arrays.asList("col1", "col2", "col3", "col4"))) + .setDescription("test model") + .build(); + highLevelClient().machineLearning().putTrainedModel(new PutTrainedModelRequest(trainedModelConfig), RequestOptions.DEFAULT); } private void waitForJobToClose(String jobId) throws Exception { @@ -2768,4 +2781,9 @@ public void testEnableUpgradeMode() throws Exception { mlInfoResponse = machineLearningClient.getMlInfo(new MlInfoRequest(), RequestOptions.DEFAULT); assertThat(mlInfoResponse.getInfo().get("upgrade_mode"), equalTo(false)); } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 4ec048ad1e4f5..c33c7212388ad 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -839,7 +839,8 @@ public void testApiNamingConventions() throws Exception { // looking like it doesn't have a valid implementatation when it does. apiUnsupported.remove("indices.get_template"); - + // Synced flush is deprecated + apiUnsupported.remove("indices.flush_synced"); for (Map.Entry> entry : methods.entrySet()) { String apiName = entry.getKey(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java deleted file mode 100644 index f5cb9cdb0e02b..0000000000000 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client; - -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.ObjectIntMap; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.TestShardRouting; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.DeprecationHandler; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; -import org.elasticsearch.indices.flush.SyncedFlushService; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public class SyncedFlushResponseTests extends ESTestCase { - - public void testXContentSerialization() throws IOException { - final XContentType xContentType = randomFrom(XContentType.values()); - TestPlan plan = createTestPlan(); - - XContentBuilder serverResponsebuilder = XContentBuilder.builder(xContentType.xContent()); - assertNotNull(plan.result); - serverResponsebuilder.startObject(); - plan.result.toXContent(serverResponsebuilder, ToXContent.EMPTY_PARAMS); - serverResponsebuilder.endObject(); - XContentBuilder clientResponsebuilder = XContentBuilder.builder(xContentType.xContent()); - assertNotNull(plan.result); - plan.clientResult.toXContent(clientResponsebuilder, ToXContent.EMPTY_PARAMS); - Map serverContentMap = convertFailureListToSet( - serverResponsebuilder - .generator() - .contentType() - .xContent() - .createParser( - xContentRegistry(), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(serverResponsebuilder).streamInput() - ).map() - ); - Map clientContentMap = convertFailureListToSet( - clientResponsebuilder - .generator() - .contentType() - .xContent() - .createParser( - xContentRegistry(), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(clientResponsebuilder).streamInput() - ) - .map() - ); - assertEquals(serverContentMap, clientContentMap); - } - - public void testXContentDeserialization() throws IOException { - final XContentType xContentType = randomFrom(XContentType.values()); - TestPlan plan = createTestPlan(); - XContentBuilder builder = XContentBuilder.builder(xContentType.xContent()); - builder.startObject(); - plan.result.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); - XContentParser parser = builder - .generator() - .contentType() - .xContent() - .createParser( - xContentRegistry(), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(builder).streamInput() - ); - SyncedFlushResponse originalResponse = plan.clientResult; - SyncedFlushResponse parsedResponse = SyncedFlushResponse.fromXContent(parser); - assertNotNull(parsedResponse); - assertShardCounts(originalResponse.getShardCounts(), parsedResponse.getShardCounts()); - for (Map.Entry entry: originalResponse.getIndexResults().entrySet()) { - String index = entry.getKey(); - SyncedFlushResponse.IndexResult responseResult = entry.getValue(); - SyncedFlushResponse.IndexResult parsedResult = parsedResponse.getIndexResults().get(index); - assertNotNull(responseResult); - assertNotNull(parsedResult); - assertShardCounts(responseResult.getShardCounts(), parsedResult.getShardCounts()); - assertEquals(responseResult.failures().size(), parsedResult.failures().size()); - for (SyncedFlushResponse.ShardFailure responseShardFailure: responseResult.failures()) { - assertTrue(containsFailure(parsedResult.failures(), responseShardFailure)); - } - } - } - - static class TestPlan { - SyncedFlushResponse.ShardCounts totalCounts; - Map countsPerIndex = new HashMap<>(); - ObjectIntMap expectedFailuresPerIndex = new ObjectIntHashMap<>(); - org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse result; - SyncedFlushResponse clientResult; - } - - TestPlan createTestPlan() throws IOException { - final TestPlan testPlan = new TestPlan(); - final Map> indicesResults = new HashMap<>(); - Map indexResults = new HashMap<>(); - final XContentType xContentType = randomFrom(XContentType.values()); - final int indexCount = randomIntBetween(1, 10); - int totalShards = 0; - int totalSuccessful = 0; - int totalFailed = 0; - for (int i = 0; i < indexCount; i++) { - final String index = "index_" + i; - int shards = randomIntBetween(1, 4); - int replicas = randomIntBetween(0, 2); - int successful = 0; - int failed = 0; - int failures = 0; - List shardsResults = new ArrayList<>(); - List shardFailures = new ArrayList<>(); - for (int shard = 0; shard < shards; shard++) { - final ShardId shardId = new ShardId(index, "_na_", shard); - if (randomInt(5) < 2) { - // total shard failure - failed += replicas + 1; - failures++; - shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure")); - shardFailures.add( - new SyncedFlushResponse.ShardFailure( - shardId.id(), - "simulated total failure", - new HashMap<>() - ) - ); - } else { - Map shardResponses = new HashMap<>(); - for (int copy = 0; copy < replicas + 1; copy++) { - final ShardRouting shardRouting = - TestShardRouting.newShardRouting( - index, shard, "node_" + shardId + "_" + copy, null, - copy == 0, ShardRoutingState.STARTED - ); - if (randomInt(5) < 2) { - // shard copy failure - failed++; - failures++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId)); - // Building the shardRouting map here. - XContentBuilder builder = XContentBuilder.builder(xContentType.xContent()); - Map routing = - shardRouting.toXContent(builder, ToXContent.EMPTY_PARAMS) - .generator() - .contentType() - .xContent() - .createParser( - xContentRegistry(), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(builder).streamInput() - ) - .map(); - shardFailures.add( - new SyncedFlushResponse.ShardFailure( - shardId.id(), - "copy failure " + shardId, - routing - ) - ); - } else { - successful++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse((String) null)); - } - } - shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); - } - } - indicesResults.put(index, shardsResults); - indexResults.put( - index, - new SyncedFlushResponse.IndexResult( - shards * (replicas + 1), - successful, - failed, - shardFailures - ) - ); - testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed)); - testPlan.expectedFailuresPerIndex.put(index, failures); - totalFailed += failed; - totalShards += shards * (replicas + 1); - totalSuccessful += successful; - } - testPlan.result = new org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse(indicesResults); - testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed); - testPlan.clientResult = new SyncedFlushResponse( - new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed), - indexResults - ); - return testPlan; - } - - public boolean containsFailure(List failures, SyncedFlushResponse.ShardFailure origFailure) { - for (SyncedFlushResponse.ShardFailure failure: failures) { - if (failure.getShardId() == origFailure.getShardId() && - failure.getFailureReason().equals(origFailure.getFailureReason()) && - failure.getRouting().equals(origFailure.getRouting())) { - return true; - } - } - return false; - } - - - public void assertShardCounts(SyncedFlushResponse.ShardCounts first, SyncedFlushResponse.ShardCounts second) { - if (first == null) { - assertNull(second); - } else { - assertTrue(first.equals(second)); - } - } - - public Map convertFailureListToSet(Map input) { - Map retMap = new HashMap<>(); - for (Map.Entry entry: input.entrySet()) { - if (entry.getKey().equals(SyncedFlushResponse.SHARDS_FIELD)) { - retMap.put(entry.getKey(), entry.getValue()); - } else { - // This was an index entry. - @SuppressWarnings("unchecked") - Map indexResult = (Map)entry.getValue(); - Map retResult = new HashMap<>(); - for (Map.Entry entry2: indexResult.entrySet()) { - if (entry2.getKey().equals(SyncedFlushResponse.IndexResult.FAILURES_FIELD)) { - @SuppressWarnings("unchecked") - List failures = (List)entry2.getValue(); - Set retSet = new HashSet<>(failures); - retResult.put(entry.getKey(), retSet); - } else { - retResult.put(entry2.getKey(), entry2.getValue()); - } - } - retMap.put(entry.getKey(), retResult); - } - } - return retMap; - } -} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 480091e1e4a03..506a2b52e47b1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -31,7 +31,6 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -56,7 +55,6 @@ import org.elasticsearch.client.GetAliasesResponse; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.client.SyncedFlushResponse; import org.elasticsearch.client.core.BroadcastResponse.Shards; import org.elasticsearch.client.core.ShardsAcknowledgedResponse; import org.elasticsearch.client.indices.AnalyzeRequest; @@ -998,90 +996,6 @@ public void onFailure(Exception e) { } } - @SuppressWarnings("unused") - public void testSyncedFlushIndex() throws Exception { - RestHighLevelClient client = highLevelClient(); - - { - createIndex("index1", Settings.EMPTY); - } - - { - // tag::flush-synced-request - SyncedFlushRequest request = new SyncedFlushRequest("index1"); // <1> - SyncedFlushRequest requestMultiple = new SyncedFlushRequest("index1", "index2"); // <2> - SyncedFlushRequest requestAll = new SyncedFlushRequest(); // <3> - // end::flush-synced-request - - // tag::flush-synced-request-indicesOptions - request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> - // end::flush-synced-request-indicesOptions - - // tag::flush-synced-execute - SyncedFlushResponse flushSyncedResponse = client.indices().flushSynced(request, RequestOptions.DEFAULT); - // end::flush-synced-execute - - // tag::flush-synced-response - int totalShards = flushSyncedResponse.totalShards(); // <1> - int successfulShards = flushSyncedResponse.successfulShards(); // <2> - int failedShards = flushSyncedResponse.failedShards(); // <3> - - for (Map.Entry responsePerIndexEntry: - flushSyncedResponse.getIndexResults().entrySet()) { - String indexName = responsePerIndexEntry.getKey(); // <4> - SyncedFlushResponse.IndexResult indexResult = responsePerIndexEntry.getValue(); - int totalShardsForIndex = indexResult.totalShards(); // <5> - int successfulShardsForIndex = indexResult.successfulShards(); // <6> - int failedShardsForIndex = indexResult.failedShards(); // <7> - if (failedShardsForIndex > 0) { - for (SyncedFlushResponse.ShardFailure failureEntry: indexResult.failures()) { - int shardId = failureEntry.getShardId(); // <8> - String failureReason = failureEntry.getFailureReason(); // <9> - Map routing = failureEntry.getRouting(); // <10> - } - } - } - // end::flush-synced-response - - // tag::flush-synced-execute-listener - ActionListener listener = new ActionListener() { - @Override - public void onResponse(SyncedFlushResponse refreshResponse) { - // <1> - } - - @Override - public void onFailure(Exception e) { - // <2> - } - }; - // end::flush-synced-execute-listener - - // Replace the empty listener by a blocking listener in test - final CountDownLatch latch = new CountDownLatch(1); - listener = new LatchedActionListener<>(listener, latch); - - // tag::flush-synced-execute-async - client.indices().flushSyncedAsync(request, RequestOptions.DEFAULT, listener); // <1> - // end::flush-synced-execute-async - - assertTrue(latch.await(30L, TimeUnit.SECONDS)); - } - - { - // tag::flush-synced-notfound - try { - SyncedFlushRequest request = new SyncedFlushRequest("does_not_exist"); - client.indices().flushSynced(request, RequestOptions.DEFAULT); - } catch (ElasticsearchException exception) { - if (exception.status() == RestStatus.NOT_FOUND) { - // <1> - } - } - // end::flush-synced-notfound - } - } - public void testGetSettings() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 37ae59e9b992a..b850b2e8b9f1a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -114,6 +114,8 @@ import org.elasticsearch.client.ml.PutFilterResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.PutTrainedModelRequest; +import org.elasticsearch.client.ml.PutTrainedModelResponse; import org.elasticsearch.client.ml.RevertModelSnapshotRequest; import org.elasticsearch.client.ml.RevertModelSnapshotResponse; import org.elasticsearch.client.ml.SetUpgradeModeRequest; @@ -162,10 +164,14 @@ import org.elasticsearch.client.ml.dataframe.explain.FieldSelection; import org.elasticsearch.client.ml.dataframe.explain.MemoryEstimation; import org.elasticsearch.client.ml.filestructurefinder.FileStructure; +import org.elasticsearch.client.ml.inference.InferenceToXContentCompressor; +import org.elasticsearch.client.ml.inference.MlInferenceNamedXContentProvider; import org.elasticsearch.client.ml.inference.TrainedModelConfig; import org.elasticsearch.client.ml.inference.TrainedModelDefinition; import org.elasticsearch.client.ml.inference.TrainedModelDefinitionTests; +import org.elasticsearch.client.ml.inference.TrainedModelInput; import org.elasticsearch.client.ml.inference.TrainedModelStats; +import org.elasticsearch.client.ml.inference.trainedmodel.TargetType; import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.AnalysisLimits; import org.elasticsearch.client.ml.job.config.DataDescription; @@ -186,12 +192,11 @@ import org.elasticsearch.client.ml.job.results.OverallBucket; import org.elasticsearch.client.ml.job.stats.JobStats; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -202,12 +207,10 @@ import org.junit.After; import java.io.IOException; -import java.io.OutputStream; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import java.util.Date; import java.util.HashMap; @@ -216,7 +219,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import java.util.zip.GZIPOutputStream; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.closeTo; @@ -2973,10 +2975,11 @@ public void testPutDataFrameAnalytics() throws Exception { .setEta(5.5) // <4> .setMaximumNumberTrees(50) // <5> .setFeatureBagFraction(0.4) // <6> - .setPredictionFieldName("my_prediction_field_name") // <7> - .setTrainingPercent(50.0) // <8> - .setRandomizeSeed(1234L) // <9> - .setNumTopClasses(1) // <10> + .setNumTopFeatureImportanceValues(3) // <7> + .setPredictionFieldName("my_prediction_field_name") // <8> + .setTrainingPercent(50.0) // <9> + .setRandomizeSeed(1234L) // <10> + .setNumTopClasses(1) // <11> .build(); // end::put-data-frame-analytics-classification @@ -2987,9 +2990,10 @@ public void testPutDataFrameAnalytics() throws Exception { .setEta(5.5) // <4> .setMaximumNumberTrees(50) // <5> .setFeatureBagFraction(0.4) // <6> - .setPredictionFieldName("my_prediction_field_name") // <7> - .setTrainingPercent(50.0) // <8> - .setRandomizeSeed(1234L) // <9> + .setNumTopFeatureImportanceValues(3) // <7> + .setPredictionFieldName("my_prediction_field_name") // <8> + .setTrainingPercent(50.0) // <9> + .setRandomizeSeed(1234L) // <10> .build(); // end::put-data-frame-analytics-regression @@ -3625,6 +3629,79 @@ public void onFailure(Exception e) { } } + public void testPutTrainedModel() throws Exception { + TrainedModelDefinition definition = TrainedModelDefinitionTests.createRandomBuilder(TargetType.REGRESSION).build(); + // tag::put-trained-model-config + TrainedModelConfig trainedModelConfig = TrainedModelConfig.builder() + .setDefinition(definition) // <1> + .setCompressedDefinition(InferenceToXContentCompressor.deflate(definition)) // <2> + .setModelId("my-new-trained-model") // <3> + .setInput(new TrainedModelInput("col1", "col2", "col3", "col4")) // <4> + .setDescription("test model") // <5> + .setMetadata(new HashMap<>()) // <6> + .setTags("my_regression_models") // <7> + .build(); + // end::put-trained-model-config + + trainedModelConfig = TrainedModelConfig.builder() + .setDefinition(definition) + .setModelId("my-new-trained-model") + .setInput(new TrainedModelInput("col1", "col2", "col3", "col4")) + .setDescription("test model") + .setMetadata(new HashMap<>()) + .setTags("my_regression_models") + .build(); + + RestHighLevelClient client = highLevelClient(); + { + // tag::put-trained-model-request + PutTrainedModelRequest request = new PutTrainedModelRequest(trainedModelConfig); // <1> + // end::put-trained-model-request + + // tag::put-trained-model-execute + PutTrainedModelResponse response = client.machineLearning().putTrainedModel(request, RequestOptions.DEFAULT); + // end::put-trained-model-execute + + // tag::put-trained-model-response + TrainedModelConfig model = response.getResponse(); + // end::put-trained-model-response + + assertThat(model.getModelId(), equalTo(trainedModelConfig.getModelId())); + highLevelClient().machineLearning() + .deleteTrainedModel(new DeleteTrainedModelRequest("my-new-trained-model"), RequestOptions.DEFAULT); + } + { + PutTrainedModelRequest request = new PutTrainedModelRequest(trainedModelConfig); + + // tag::put-trained-model-execute-listener + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(PutTrainedModelResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::put-trained-model-execute-listener + + // Replace the empty listener by a blocking listener in test + CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::put-trained-model-execute-async + client.machineLearning().putTrainedModelAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::put-trained-model-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + highLevelClient().machineLearning() + .deleteTrainedModel(new DeleteTrainedModelRequest("my-new-trained-model"), RequestOptions.DEFAULT); + } + } + public void testGetTrainedModelsStats() throws Exception { putTrainedModel("my-trained-model"); RestHighLevelClient client = highLevelClient(); @@ -4088,57 +4165,19 @@ private DataFrameAnalyticsState getAnalyticsState(String configId) throws IOExce } private void putTrainedModel(String modelId) throws IOException { - TrainedModelDefinition definition = TrainedModelDefinitionTests.createRandomBuilder().build(); - highLevelClient().index( - new IndexRequest(".ml-inference-000001") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source(modelConfigString(modelId), XContentType.JSON) - .id(modelId), - RequestOptions.DEFAULT); - - highLevelClient().index( - new IndexRequest(".ml-inference-000001") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source(modelDocString(compressDefinition(definition), modelId), XContentType.JSON) - .id("trained_model_definition_doc-" + modelId + "-0"), - RequestOptions.DEFAULT); - } - - private String compressDefinition(TrainedModelDefinition definition) throws IOException { - BytesReference reference = XContentHelper.toXContent(definition, XContentType.JSON, false); - BytesStreamOutput out = new BytesStreamOutput(); - try (OutputStream compressedOutput = new GZIPOutputStream(out, 4096)) { - reference.writeTo(compressedOutput); - } - return new String(Base64.getEncoder().encode(BytesReference.toBytes(out.bytes())), StandardCharsets.UTF_8); - } - - private static String modelConfigString(String modelId) { - return "{\n" + - " \"doc_type\": \"trained_model_config\",\n" + - " \"model_id\": \"" + modelId + "\",\n" + - " \"input\":{\"field_names\":[\"col1\",\"col2\",\"col3\",\"col4\"]}," + - " \"description\": \"test model for\",\n" + - " \"version\": \"7.6.0\",\n" + - " \"license_level\": \"platinum\",\n" + - " \"created_by\": \"ml_test\",\n" + - " \"estimated_heap_memory_usage_bytes\": 0," + - " \"estimated_operations\": 0," + - " \"created_time\": 0\n" + - "}"; + TrainedModelDefinition definition = TrainedModelDefinitionTests.createRandomBuilder(TargetType.REGRESSION).build(); + TrainedModelConfig trainedModelConfig = TrainedModelConfig.builder() + .setDefinition(definition) + .setModelId(modelId) + .setInput(new TrainedModelInput(Arrays.asList("col1", "col2", "col3", "col4"))) + .setDescription("test model") + .build(); + highLevelClient().machineLearning().putTrainedModel(new PutTrainedModelRequest(trainedModelConfig), RequestOptions.DEFAULT); } - private static String modelDocString(String compressedDefinition, String modelId) { - return "" + - "{" + - "\"model_id\": \"" + modelId + "\",\n" + - "\"doc_num\": 0,\n" + - "\"doc_type\": \"trained_model_definition_doc\",\n" + - " \"compression_version\": " + 1 + ",\n" + - " \"total_definition_length\": " + compressedDefinition.length() + ",\n" + - " \"definition_length\": " + compressedDefinition.length() + ",\n" + - "\"definition\": \"" + compressedDefinition + "\"\n" + - "}"; + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); } private static final DataFrameAnalyticsConfig DF_ANALYTICS_CONFIG = diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index ec2043d3da296..ce1636987cce8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -694,8 +694,8 @@ public void testGetRoles() throws Exception { List roles = response.getRoles(); assertNotNull(response); - // 28 system roles plus the three we created - assertThat(roles.size(), equalTo(28 + 3)); + // 29 system roles plus the three we created + assertThat(roles.size(), equalTo(29 + 3)); } { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutTrainedModelActionRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutTrainedModelActionRequestTests.java new file mode 100644 index 0000000000000..b3956c5c6afe0 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutTrainedModelActionRequestTests.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.client.ml.inference.TrainedModelConfig; +import org.elasticsearch.client.ml.inference.TrainedModelConfigTests; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class PutTrainedModelActionRequestTests extends AbstractXContentTestCase { + + @Override + protected PutTrainedModelRequest createTestInstance() { + return new PutTrainedModelRequest(TrainedModelConfigTests.createTestTrainedModelConfig()); + } + + @Override + protected PutTrainedModelRequest doParseInstance(XContentParser parser) throws IOException { + return new PutTrainedModelRequest(TrainedModelConfig.PARSER.apply(parser, null).build()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutTrainedModelActionResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutTrainedModelActionResponseTests.java new file mode 100644 index 0000000000000..61e1638547b3f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutTrainedModelActionResponseTests.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.client.ml.inference.TrainedModelConfig; +import org.elasticsearch.client.ml.inference.TrainedModelConfigTests; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class PutTrainedModelActionResponseTests extends AbstractXContentTestCase { + + @Override + protected PutTrainedModelResponse createTestInstance() { + return new PutTrainedModelResponse(TrainedModelConfigTests.createTestTrainedModelConfig()); + } + + @Override + protected PutTrainedModelResponse doParseInstance(XContentParser parser) throws IOException { + return new PutTrainedModelResponse(TrainedModelConfig.PARSER.apply(parser, null).build()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/ClassificationTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/ClassificationTests.java index 5ef8fdaef5a27..79d78c888880f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/ClassificationTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/ClassificationTests.java @@ -32,6 +32,7 @@ public static Classification randomClassification() { .setEta(randomBoolean() ? null : randomDoubleBetween(0.001, 1.0, true)) .setMaximumNumberTrees(randomBoolean() ? null : randomIntBetween(1, 2000)) .setFeatureBagFraction(randomBoolean() ? null : randomDoubleBetween(0.0, 1.0, false)) + .setNumTopFeatureImportanceValues(randomBoolean() ? null : randomIntBetween(0, Integer.MAX_VALUE)) .setPredictionFieldName(randomBoolean() ? null : randomAlphaOfLength(10)) .setTrainingPercent(randomBoolean() ? null : randomDoubleBetween(1.0, 100.0, true)) .setRandomizeSeed(randomBoolean() ? null : randomLong()) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/RegressionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/RegressionTests.java index 02e41ecdff333..eedffb4740d78 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/RegressionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/RegressionTests.java @@ -32,6 +32,7 @@ public static Regression randomRegression() { .setEta(randomBoolean() ? null : randomDoubleBetween(0.001, 1.0, true)) .setMaximumNumberTrees(randomBoolean() ? null : randomIntBetween(1, 2000)) .setFeatureBagFraction(randomBoolean() ? null : randomDoubleBetween(0.0, 1.0, false)) + .setNumTopFeatureImportanceValues(randomBoolean() ? null : randomIntBetween(0, Integer.MAX_VALUE)) .setPredictionFieldName(randomBoolean() ? null : randomAlphaOfLength(10)) .setTrainingPercent(randomBoolean() ? null : randomDoubleBetween(1.0, 100.0, true)) .build(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/InferenceToXContentCompressorTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/InferenceToXContentCompressorTests.java new file mode 100644 index 0000000000000..11747638a2c15 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/InferenceToXContentCompressorTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.inference; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class InferenceToXContentCompressorTests extends ESTestCase { + + public void testInflateAndDeflate() throws IOException { + for(int i = 0; i < 10; i++) { + TrainedModelDefinition definition = TrainedModelDefinitionTests.createRandomBuilder().build(); + String firstDeflate = InferenceToXContentCompressor.deflate(definition); + TrainedModelDefinition inflatedDefinition = InferenceToXContentCompressor.inflate(firstDeflate, + parser -> TrainedModelDefinition.fromXContent(parser).build(), + xContentRegistry()); + + // Did we inflate to the same object? + assertThat(inflatedDefinition, equalTo(definition)); + } + } + + public void testInflateTooLargeStream() throws IOException { + TrainedModelDefinition definition = TrainedModelDefinitionTests.createRandomBuilder().build(); + String firstDeflate = InferenceToXContentCompressor.deflate(definition); + BytesReference inflatedBytes = InferenceToXContentCompressor.inflate(firstDeflate, 10L); + assertThat(inflatedBytes.length(), equalTo(10)); + try(XContentParser parser = XContentHelper.createParser(xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, + inflatedBytes, + XContentType.JSON)) { + expectThrows(IOException.class, () -> TrainedModelConfig.fromXContent(parser)); + } + } + + public void testInflateGarbage() { + expectThrows(IOException.class, () -> InferenceToXContentCompressor.inflate(randomAlphaOfLength(10), 100L)); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/TrainedModelConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/TrainedModelConfigTests.java index 95ebbad837d69..43ab2e5993fde 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/TrainedModelConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/TrainedModelConfigTests.java @@ -37,6 +37,24 @@ public class TrainedModelConfigTests extends AbstractXContentTestCase { + public static TrainedModelConfig createTestTrainedModelConfig() { + return new TrainedModelConfig( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + Version.CURRENT, + randomBoolean() ? null : randomAlphaOfLength(100), + Instant.ofEpochMilli(randomNonNegativeLong()), + randomBoolean() ? null : TrainedModelDefinitionTests.createRandomBuilder().build(), + randomBoolean() ? null : randomAlphaOfLength(100), + randomBoolean() ? null : + Stream.generate(() -> randomAlphaOfLength(10)).limit(randomIntBetween(0, 5)).collect(Collectors.toList()), + randomBoolean() ? null : Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10)), + randomBoolean() ? null : TrainedModelInputTests.createRandomInput(), + randomBoolean() ? null : randomNonNegativeLong(), + randomBoolean() ? null : randomNonNegativeLong(), + randomBoolean() ? null : randomFrom("platinum", "basic")); + } + @Override protected TrainedModelConfig doParseInstance(XContentParser parser) throws IOException { return TrainedModelConfig.fromXContent(parser); @@ -54,22 +72,7 @@ protected Predicate getRandomFieldsExcludeFilter() { @Override protected TrainedModelConfig createTestInstance() { - return new TrainedModelConfig( - randomAlphaOfLength(10), - randomAlphaOfLength(10), - Version.CURRENT, - randomBoolean() ? null : randomAlphaOfLength(100), - Instant.ofEpochMilli(randomNonNegativeLong()), - randomBoolean() ? null : TrainedModelDefinitionTests.createRandomBuilder().build(), - randomBoolean() ? null : randomAlphaOfLength(100), - randomBoolean() ? null : - Stream.generate(() -> randomAlphaOfLength(10)).limit(randomIntBetween(0, 5)).collect(Collectors.toList()), - randomBoolean() ? null : Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10)), - randomBoolean() ? null : TrainedModelInputTests.createRandomInput(), - randomBoolean() ? null : randomNonNegativeLong(), - randomBoolean() ? null : randomNonNegativeLong(), - randomBoolean() ? null : randomFrom("platinum", "basic")); - + return createTestTrainedModelConfig(); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/trainedmodel/ensemble/EnsembleTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/trainedmodel/ensemble/EnsembleTests.java index f2448cbf4c8bb..9f15a3f8a31b3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/trainedmodel/ensemble/EnsembleTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/trainedmodel/ensemble/EnsembleTests.java @@ -67,22 +67,32 @@ public static Ensemble createRandom(TargetType targetType) { .collect(Collectors.toList()); int numberOfModels = randomIntBetween(1, 10); List models = Stream.generate(() -> TreeTests.buildRandomTree(featureNames, 6, targetType)) - .limit(numberOfFeatures) + .limit(numberOfModels) .collect(Collectors.toList()); - OutputAggregator outputAggregator = null; - if (randomBoolean()) { - List weights = Stream.generate(ESTestCase::randomDouble).limit(numberOfModels).collect(Collectors.toList()); - outputAggregator = randomFrom(new WeightedMode(weights), new WeightedSum(weights), new LogisticRegression(weights)); + List weights = Stream.generate(ESTestCase::randomDouble).limit(numberOfModels).collect(Collectors.toList()); + List possibleAggregators = new ArrayList<>(Arrays.asList(new WeightedMode(weights), + new LogisticRegression(weights))); + if (targetType.equals(TargetType.REGRESSION)) { + possibleAggregators.add(new WeightedSum(weights)); } + OutputAggregator outputAggregator = randomFrom(possibleAggregators.toArray(new OutputAggregator[0])); List categoryLabels = null; - if (randomBoolean()) { + if (randomBoolean() && targetType.equals(TargetType.CLASSIFICATION)) { categoryLabels = Arrays.asList(generateRandomStringArray(randomIntBetween(1, 10), randomIntBetween(1, 10), false, false)); } + double[] thresholds = randomBoolean() && targetType == TargetType.CLASSIFICATION ? + Stream.generate(ESTestCase::randomDouble) + .limit(categoryLabels == null ? randomIntBetween(1, 10) : categoryLabels.size()) + .mapToDouble(Double::valueOf) + .toArray() : + null; + return new Ensemble(featureNames, models, outputAggregator, targetType, - categoryLabels); + categoryLabels, + thresholds); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/trainedmodel/tree/TreeTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/trainedmodel/tree/TreeTests.java index febd1b98c2765..57cb2ba664d77 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/trainedmodel/tree/TreeTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/trainedmodel/tree/TreeTests.java @@ -84,7 +84,7 @@ public static Tree buildRandomTree(List featureNames, int depth, TargetT childNodes = nextNodes; } List categoryLabels = null; - if (randomBoolean()) { + if (randomBoolean() && targetType.equals(TargetType.CLASSIFICATION)) { categoryLabels = Arrays.asList(generateRandomStringArray(randomIntBetween(1, 10), randomIntBetween(1, 10), false, false)); } return builder.setClassificationLabels(categoryLabels) diff --git a/client/rest/src/main/java/org/elasticsearch/client/Response.java b/client/rest/src/main/java/org/elasticsearch/client/Response.java index c5a159eb39da5..7abe24aa59eb5 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Response.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Response.java @@ -116,6 +116,86 @@ public HttpEntity getEntity() { "GMT" + // GMT "\")?"); // closing quote (optional, since an older version can still send a warn-date) + /** + * Optimized regular expression to test if a string matches the RFC 1123 date + * format (with quotes and leading space). Start/end of line characters and + * atomic groups are used to prevent backtracking. + */ + private static final Pattern WARNING_HEADER_DATE_PATTERN = Pattern.compile( + "^ " + // start of line, leading space + // quoted RFC 1123 date format + "\"" + // opening quote + "(?>Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // day of week, atomic group to prevent backtracking + "\\d{2} " + // 2-digit day + "(?>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) " + // month, atomic group to prevent backtracking + "\\d{4} " + // 4-digit year + "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) + "GMT" + // GMT + "\"$"); // closing quote (optional, since an older version can still send a warn-date), end of line + + /** + * Length of RFC 1123 format (with quotes and leading space), used in + * matchWarningHeaderPatternByPrefix(String). + */ + private static final int WARNING_HEADER_DATE_LENGTH = 0 + + 1 + + 1 + + 3 + 1 + 1 + + 2 + 1 + + 3 + 1 + + 4 + 1 + + 2 + 1 + 2 + 1 + 2 + 1 + + 3 + + 1; + + /** + * Tests if a string matches the RFC 7234 specification for warning headers. + * This assumes that the warn code is always 299 and the warn agent is always + * Elasticsearch. + * + * @param s the value of a warning header formatted according to RFC 7234 + * @return {@code true} if the input string matches the specification + */ + private static boolean matchWarningHeaderPatternByPrefix(final String s) { + return s.startsWith("299 Elasticsearch-"); + } + + /** + * Refer to org.elasticsearch.common.logging.DeprecationLogger + */ + private static String extractWarningValueFromWarningHeader(final String s) { + String warningHeader = s; + + /* + * The following block tests for the existence of a RFC 1123 date in the warning header. If the date exists, it is removed for + * extractWarningValueFromWarningHeader(String) to work properly (as it does not handle dates). + */ + if (s.length() > WARNING_HEADER_DATE_LENGTH) { + final String possibleDateString = s.substring(s.length() - WARNING_HEADER_DATE_LENGTH); + final Matcher matcher = WARNING_HEADER_DATE_PATTERN.matcher(possibleDateString); + + if (matcher.matches()) { + warningHeader = warningHeader.substring(0, s.length() - WARNING_HEADER_DATE_LENGTH); + } + } + + final int firstQuote = warningHeader.indexOf('\"'); + final int lastQuote = warningHeader.length() - 1; + final String warningValue = warningHeader.substring(firstQuote + 1, lastQuote); + assert assertWarningValue(s, warningValue); + return warningValue; + } + + /** + * Refer to org.elasticsearch.common.logging.DeprecationLogger + */ + private static boolean assertWarningValue(final String s, final String warningValue) { + final Matcher matcher = WARNING_HEADER_PATTERN.matcher(s); + final boolean matches = matcher.matches(); + assert matches; + return matcher.group(1).equals(warningValue); + } + /** * Returns a list of all warning headers returned in the response. */ @@ -123,9 +203,8 @@ public List getWarnings() { List warnings = new ArrayList<>(); for (Header header : response.getHeaders("Warning")) { String warning = header.getValue(); - final Matcher matcher = WARNING_HEADER_PATTERN.matcher(warning); - if (matcher.matches()) { - warnings.add(matcher.group(1)); + if (matchWarningHeaderPatternByPrefix(warning)) { + warnings.add(extractWarningValueFromWarningHeader(warning)); } else { warnings.add(warning); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index dd133f90daadb..fa5e9bcc6b43c 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -424,6 +424,7 @@ public void testHeaders() throws Exception { public void testDeprecationWarnings() throws Exception { String chars = randomAsciiAlphanumOfLength(5); assertDeprecationWarnings(singletonList("poorly formatted " + chars), singletonList("poorly formatted " + chars)); + assertDeprecationWarnings(singletonList(formatWarningWithoutDate(chars)), singletonList(chars)); assertDeprecationWarnings(singletonList(formatWarning(chars)), singletonList(chars)); assertDeprecationWarnings( Arrays.asList(formatWarning(chars), "another one", "and another"), @@ -433,6 +434,9 @@ public void testDeprecationWarnings() throws Exception { Arrays.asList("ignorable one", "and another")); assertDeprecationWarnings(singletonList("exact"), singletonList("exact")); assertDeprecationWarnings(Collections.emptyList(), Collections.emptyList()); + + String proxyWarning = "112 - \"network down\" \"Sat, 25 Aug 2012 23:34:45 GMT\""; + assertDeprecationWarnings(singletonList(proxyWarning), singletonList(proxyWarning)); } private enum DeprecationWarningOption { @@ -518,9 +522,13 @@ private void assertDeprecationWarnings(List warningHeaderTexts, List&2 + FILE_PERMS="$(stat -L -c '%a' ${!VAR_NAME_FILE})" + + if [[ "$FILE_PERMS" != "400" && "$FILE_PERMS" != "600" ]]; then + if [[ -h "${!VAR_NAME_FILE}" ]]; then + echo "ERROR: File $(readlink "${!VAR_NAME_FILE}") (target of symlink ${!VAR_NAME_FILE} from $VAR_NAME_FILE) must have file permissions 400 or 600, but actually has: $FILE_PERMS" >&2 + else + echo "ERROR: File ${!VAR_NAME_FILE} from $VAR_NAME_FILE must have file permissions 400 or 600, but actually has: $FILE_PERMS" >&2 + fi exit 1 fi diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index f404f941168d7..f2de4fd3b1763 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -81,6 +81,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -206,24 +207,65 @@ protected void printAdditionalHelp(Terminal terminal) { @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - String pluginId = arguments.value(options); + List pluginId = arguments.values(options); final boolean isBatch = options.has(batchOption); execute(terminal, pluginId, isBatch, env); } // pkg private for testing - void execute(Terminal terminal, String pluginId, boolean isBatch, Environment env) throws Exception { - if (pluginId == null) { - throw new UserException(ExitCodes.USAGE, "plugin id is required"); + void execute(Terminal terminal, List pluginIds, boolean isBatch, Environment env) throws Exception { + if (pluginIds.isEmpty()) { + throw new UserException(ExitCodes.USAGE, "at least one plugin id is required"); } - if ("x-pack".equals(pluginId)) { - handleInstallXPack(buildFlavor()); + final Set uniquePluginIds = new HashSet<>(); + for (final String pluginId : pluginIds) { + if (uniquePluginIds.add(pluginId) == false) { + throw new UserException(ExitCodes.USAGE, "duplicate plugin id [" + pluginId + "]"); + } } - Path pluginZip = download(terminal, pluginId, env.tmpFile(), isBatch); - Path extractedZip = unzip(pluginZip, env.pluginsFile()); - install(terminal, isBatch, extractedZip, env); + final Map> deleteOnFailures = new LinkedHashMap<>(); + for (final String pluginId : pluginIds) { + terminal.println("-> Installing " + pluginId); + try { + if ("x-pack".equals(pluginId)) { + handleInstallXPack(buildFlavor()); + } + + final List deleteOnFailure = new ArrayList<>(); + deleteOnFailures.put(pluginId, deleteOnFailure); + + final Path pluginZip = download(terminal, pluginId, env.tmpFile(), isBatch); + final Path extractedZip = unzip(pluginZip, env.pluginsFile()); + deleteOnFailure.add(extractedZip); + final PluginInfo pluginInfo = installPlugin(terminal, isBatch, extractedZip, env, deleteOnFailure); + terminal.println("-> Installed " + pluginInfo.getName()); + // swap the entry by plugin id for one with the installed plugin name, it gives a cleaner error message for URL installs + deleteOnFailures.remove(pluginId); + deleteOnFailures.put(pluginInfo.getName(), deleteOnFailure); + } catch (final Exception installProblem) { + terminal.println("-> Failed installing " + pluginId); + for (final Map.Entry> deleteOnFailureEntry : deleteOnFailures.entrySet()) { + terminal.println("-> Rolling back " + deleteOnFailureEntry.getKey()); + boolean success = false; + try { + IOUtils.rm(deleteOnFailureEntry.getValue().toArray(new Path[0])); + success = true; + } catch (final IOException exceptionWhileRemovingFiles) { + final Exception exception = new Exception( + "failed rolling back installation of [" + deleteOnFailureEntry.getKey() + "]", + exceptionWhileRemovingFiles); + installProblem.addSuppressed(exception); + terminal.println("-> Failed rolling back " + deleteOnFailureEntry.getKey()); + } + if (success) { + terminal.println("-> Rolled back " + deleteOnFailureEntry.getKey()); + } + } + throw installProblem; + } + } } Build.Flavor buildFlavor() { @@ -773,26 +815,11 @@ void jarHellCheck(PluginInfo candidateInfo, Path candidateDir, Path pluginsDir, // TODO: verify the classname exists in one of the jars! } - private void install(Terminal terminal, boolean isBatch, Path tmpRoot, Environment env) throws Exception { - List deleteOnFailure = new ArrayList<>(); - deleteOnFailure.add(tmpRoot); - try { - installPlugin(terminal, isBatch, tmpRoot, env, deleteOnFailure); - } catch (Exception installProblem) { - try { - IOUtils.rm(deleteOnFailure.toArray(new Path[0])); - } catch (IOException exceptionWhileRemovingFiles) { - installProblem.addSuppressed(exceptionWhileRemovingFiles); - } - throw installProblem; - } - } - /** * Installs the plugin from {@code tmpRoot} into the plugins dir. * If the plugin has a bin dir and/or a config dir, those are moved. */ - private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, + private PluginInfo installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, Environment env, List deleteOnFailure) throws Exception { final PluginInfo info = loadPluginInfo(terminal, tmpRoot, env); // read optional security policy (extra permissions), if it exists, confirm or warn the user @@ -811,7 +838,7 @@ private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, installPluginSupportFiles(info, tmpRoot, env.binFile().resolve(info.getName()), env.configFile().resolve(info.getName()), deleteOnFailure); movePlugin(tmpRoot, destination); - terminal.println("-> Installed " + info.getName()); + return info; } /** Moves bin and config directories from the plugin if they exist */ diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index 45fbd3133d175..6822f07d4c10f 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -64,6 +64,7 @@ import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.StringReader; @@ -280,9 +281,17 @@ void installPlugin(String pluginUrl, Path home) throws Exception { installPlugin(pluginUrl, home, skipJarHellCommand); } + void installPlugins(final List pluginUrls, final Path home) throws Exception { + installPlugins(pluginUrls, home, skipJarHellCommand); + } + void installPlugin(String pluginUrl, Path home, InstallPluginCommand command) throws Exception { - Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); - command.execute(terminal, pluginUrl, false, env); + installPlugins(pluginUrl == null ? List.of() : List.of(pluginUrl), home, command); + } + + void installPlugins(final List pluginUrls, final Path home, final InstallPluginCommand command) throws Exception { + final Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); + command.execute(terminal, pluginUrls, false, env); } void assertPlugin(String name, Path original, Environment env) throws IOException { @@ -382,7 +391,7 @@ void assertInstallCleaned(Environment env) throws IOException { public void testMissingPluginId() throws IOException { final Tuple env = createEnv(fs, temp); final UserException e = expectThrows(UserException.class, () -> installPlugin(null, env.v1())); - assertTrue(e.getMessage(), e.getMessage().contains("plugin id is required")); + assertTrue(e.getMessage(), e.getMessage().contains("at least one plugin id is required")); } public void testSomethingWorks() throws Exception { @@ -393,6 +402,37 @@ public void testSomethingWorks() throws Exception { assertPlugin("fake", pluginDir, env.v2()); } + public void testMultipleWorks() throws Exception { + Tuple env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + String fake1PluginZip = createPluginUrl("fake1", pluginDir); + String fake2PluginZip = createPluginUrl("fake2", pluginDir); + installPlugins(List.of(fake1PluginZip, fake2PluginZip), env.v1()); + assertPlugin("fake1", pluginDir, env.v2()); + assertPlugin("fake2", pluginDir, env.v2()); + } + + public void testDuplicateInstall() throws Exception { + Tuple env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + String pluginZip = createPluginUrl("fake", pluginDir); + final UserException e = expectThrows(UserException.class, () -> installPlugins(List.of(pluginZip, pluginZip), env.v1())); + assertThat(e, hasToString(containsString("duplicate plugin id [" + pluginZip + "]"))); + } + + public void testTransaction() throws Exception { + Tuple env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + String pluginZip = createPluginUrl("fake", pluginDir); + final FileNotFoundException e = + expectThrows(FileNotFoundException.class, () -> installPlugins(List.of(pluginZip, pluginZip + "does-not-exist"), env.v1())); + assertThat(e, hasToString(containsString("does-not-exist"))); + final Path fakeInstallPath = env.v2().pluginsFile().resolve("fake"); + // fake should have been removed when the file not found exception occurred + assertFalse(Files.exists(fakeInstallPath)); + assertInstallCleaned(env.v2()); + } + public void testInstallFailsIfPreviouslyRemovedPluginFailed() throws Exception { Tuple env = createEnv(fs, temp); Path pluginDir = createPluginDir(temp); @@ -769,7 +809,7 @@ Build.Flavor buildFlavor() { }; final Environment environment = createEnv(fs, temp).v2(); - final T exception = expectThrows(clazz, () -> flavorCommand.execute(terminal, "x-pack", false, environment)); + final T exception = expectThrows(clazz, () -> flavorCommand.execute(terminal, List.of("x-pack"), false, environment)); assertThat(exception, hasToString(containsString(expectedMessage))); } @@ -830,7 +870,7 @@ private void installPlugin(MockTerminal terminal, boolean isBatch) throws Except writePluginSecurityPolicy(pluginDir, "setFactory"); } String pluginZip = createPlugin("fake", pluginDir).toUri().toURL().toString(); - skipJarHellCommand.execute(terminal, pluginZip, isBatch, env.v2()); + skipJarHellCommand.execute(terminal, List.of(pluginZip), isBatch, env.v2()); } void assertInstallPluginFromUrl( diff --git a/docs/community-clients/index.asciidoc b/docs/community-clients/index.asciidoc index d931a33a937f7..8cd0609730d50 100644 --- a/docs/community-clients/index.asciidoc +++ b/docs/community-clients/index.asciidoc @@ -4,6 +4,15 @@ == Preface :client: https://www.elastic.co/guide/en/elasticsearch/client +[NOTE] +==== +This is a list of clients submitted by members of the Elastic community. +Elastic does not support or endorse these clients. + +If you'd like to add a new client to this list, please +https://github.com/elastic/elasticsearch/blob/master/CONTRIBUTING.md#contributing-code-and-documentation-changes[open a pull request]. +==== + Besides the link:/guide[officially supported Elasticsearch clients], there are a number of clients that have been contributed by the community for various languages: @@ -41,9 +50,9 @@ a number of clients that have been contributed by the community for various lang [[clojure]] == Clojure -* https://github.com/mpenet/spandex[Spandex]: +* https://github.com/mpenet/spandex[Spandex]: Clojure client, based on the new official low level rest-client. - + * http://github.com/clojurewerkz/elastisch[Elastisch]: Clojure client. @@ -64,6 +73,8 @@ a number of clients that have been contributed by the community for various lang http://github.com/karmi/tire[Tire]. Ready to use in pure Erlang environment. +* https://github.com/sashman/elasticsearch_elixir_bulk_processor[Elixir Bulk Processor]: + Dynamically configurable Elixir port of the [Bulk Processor](https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html). Implemented using GenStages to handle backpressure. [[go]] == Go @@ -75,7 +86,7 @@ Also see the {client}/go-api/current/index.html[official Elasticsearch Go client * https://github.com/olivere/elastic[elastic]: Elasticsearch client for Google Go. - + * https://github.com/softctrl/elk[elk] Golang lib for Elasticsearch client. @@ -106,8 +117,8 @@ Also see the {client}/javascript-api/current/index.html[official Elasticsearch J * https://github.com/mbuhot/eskotlin[ES Kotlin]: Elasticsearch Query DSL for kotlin based on the {client}/java-api/current/index.html[official Elasticsearch Java client]. - -* https://github.com/jillesvangurp/es-kotlin-wrapper-client[ES Kotlin Wrapper Client]: + +* https://github.com/jillesvangurp/es-kotlin-wrapper-client[ES Kotlin Wrapper Client]: Kotlin extension functions and abstractions for the {client}/java-api/current/index.html[official Elasticsearch Highlevel Client]. Aims to reduce the amount of boilerplate needed to do searches, bulk indexing and other common things users do with the client. [[lua]] @@ -154,7 +165,7 @@ Also see the {client}/python-api/current/index.html[official Elasticsearch Pytho * https://github.com/ropensci/elasticdsl[elasticdsl]: A high-level R DSL for Elasticsearch, wrapping the elastic R client. - + * https://github.com/uptake/uptasticsearch[uptasticsearch]: An R client tailored to data science workflows. @@ -178,12 +189,14 @@ Also see the {client}/ruby-api/current/index.html[official Elasticsearch Ruby cl [[rust]] == Rust +Also see the {client}/rust-api/current/index.html[official Elasticsearch Rust client]. + * https://github.com/benashford/rs-es[rs-es]: A REST API client with a strongly-typed Query DSL. * https://github.com/elastic-rs/elastic[elastic]: A modular REST API client that supports freeform queries. - + [[scala]] == Scala @@ -192,7 +205,7 @@ Also see the {client}/ruby-api/current/index.html[official Elasticsearch Ruby cl * https://github.com/gphat/wabisabi[wabisabi]: Asynchronous REST API Scala client. - + * https://github.com/workday/escalar[escalar]: Type-safe Scala wrapper for the REST API. diff --git a/docs/java-rest/high-level/indices/flush_synced.asciidoc b/docs/java-rest/high-level/indices/flush_synced.asciidoc deleted file mode 100644 index e5dfa59153b09..0000000000000 --- a/docs/java-rest/high-level/indices/flush_synced.asciidoc +++ /dev/null @@ -1,62 +0,0 @@ --- -:api: flush-synced -:request: SyncedFlushRequest -:response: SyncedFlushResponse --- - -[id="{upid}-{api}"] -=== Flush Synced API - -[id="{upid}-{api}-request"] -==== Flush Synced Request - -A +{request}+ can be applied to one or more indices, or even on `_all` the indices: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- -<1> Flush synced one index -<2> Flush synced multiple indices -<3> Flush synced all the indices - -==== Optional arguments - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-request-indicesOptions] --------------------------------------------------- -<1> Setting `IndicesOptions` controls how unavailable indices are resolved and -how wildcard expressions are expanded - -include::../execution.asciidoc[] - -[id="{upid}-{api}-response"] -==== Flush Synced Response - -The returned +{response}+ allows to retrieve information about the -executed operation as follows: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-response] --------------------------------------------------- -<1> Total number of shards hit by the flush request -<2> Number of shards where the flush has succeeded -<3> Number of shards where the flush has failed -<4> Name of the index whose results we are about to calculate. -<5> Total number of shards for index mentioned in 4. -<6> Successful shards for index mentioned in 4. -<7> Failed shards for index mentioned in 4. -<8> One of the failed shard ids of the failed index mentioned in 4. -<9> Reason for failure of copies of the shard mentioned in 8. -<10> JSON represented by a Map. Contains shard related information like id, state, version etc. -for the failed shard copies. If the entire shard failed then this returns an empty map. - -By default, if the indices were not found, an `ElasticsearchException` will be thrown: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-notfound] --------------------------------------------------- -<1> Do something if the indices to be flushed were not found diff --git a/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc b/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc index 2152eff5c0850..4be2011340210 100644 --- a/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc +++ b/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc @@ -117,10 +117,11 @@ include-tagged::{doc-tests-file}[{api}-classification] <4> The applied shrinkage. A double in [0.001, 1]. <5> The maximum number of trees the forest is allowed to contain. An integer in [1, 2000]. <6> The fraction of features which will be used when selecting a random bag for each candidate split. A double in (0, 1]. -<7> The name of the prediction field in the results object. -<8> The percentage of training-eligible rows to be used in training. Defaults to 100%. -<9> The seed to be used by the random generator that picks which rows are used in training. -<10> The number of top classes to be reported in the results. Defaults to 2. +<7> If set, feature importance for the top most important features will be computed. +<8> The name of the prediction field in the results object. +<9> The percentage of training-eligible rows to be used in training. Defaults to 100%. +<10> The seed to be used by the random generator that picks which rows are used in training. +<11> The number of top classes to be reported in the results. Defaults to 2. ===== Regression @@ -137,9 +138,10 @@ include-tagged::{doc-tests-file}[{api}-regression] <4> The applied shrinkage. A double in [0.001, 1]. <5> The maximum number of trees the forest is allowed to contain. An integer in [1, 2000]. <6> The fraction of features which will be used when selecting a random bag for each candidate split. A double in (0, 1]. -<7> The name of the prediction field in the results object. -<8> The percentage of training-eligible rows to be used in training. Defaults to 100%. -<9> The seed to be used by the random generator that picks which rows are used in training. +<7> If set, feature importance for the top most important features will be computed. +<8> The name of the prediction field in the results object. +<9> The percentage of training-eligible rows to be used in training. Defaults to 100%. +<10> The seed to be used by the random generator that picks which rows are used in training. ==== Analyzed fields diff --git a/docs/java-rest/high-level/ml/put-trained-model.asciidoc b/docs/java-rest/high-level/ml/put-trained-model.asciidoc new file mode 100644 index 0000000000000..dadc8dcf65a4f --- /dev/null +++ b/docs/java-rest/high-level/ml/put-trained-model.asciidoc @@ -0,0 +1,53 @@ +-- +:api: put-trained-model +:request: PutTrainedModelRequest +:response: PutTrainedModelResponse +-- +[role="xpack"] +[id="{upid}-{api}"] +=== Put Trained Model API + +Creates a new trained model for inference. +The API accepts a +{request}+ object as a request and returns a +{response}+. + +[id="{upid}-{api}-request"] +==== Put Trained Model request + +A +{request}+ requires the following argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The configuration of the {infer} Trained Model to create + +[id="{upid}-{api}-config"] +==== Trained Model configuration + +The `TrainedModelConfig` object contains all the details about the trained model +configuration and contains the following arguments: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-config] +-------------------------------------------------- +<1> The {infer} definition for the model +<2> Optionally, if the {infer} definition is large, you may choose to compress it for transport. + Do not supply both the compressed and uncompressed definitions. +<3> The unique model id +<4> The input field names for the model definition +<5> Optionally, a human-readable description +<6> Optionally, an object map contain metadata about the model +<7> Optionally, an array of tags to organize the model + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ contains the newly created trained model. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index e0d228b5d1e4b..3e2ed5ce6cb38 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -102,7 +102,6 @@ Index Management:: * <<{upid}-clone-index>> * <<{upid}-refresh>> * <<{upid}-flush>> -* <<{upid}-flush-synced>> * <<{upid}-clear-cache>> * <<{upid}-force-merge>> * <<{upid}-rollover-index>> @@ -138,7 +137,6 @@ include::indices/split_index.asciidoc[] include::indices/clone_index.asciidoc[] include::indices/refresh.asciidoc[] include::indices/flush.asciidoc[] -include::indices/flush_synced.asciidoc[] include::indices/clear_cache.asciidoc[] include::indices/force_merge.asciidoc[] include::indices/rollover.asciidoc[] @@ -304,6 +302,7 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <<{upid}-evaluate-data-frame>> * <<{upid}-explain-data-frame-analytics>> * <<{upid}-get-trained-models>> +* <<{upid}-put-trained-model>> * <<{upid}-get-trained-models-stats>> * <<{upid}-delete-trained-model>> * <<{upid}-put-filter>> @@ -359,6 +358,7 @@ include::ml/stop-data-frame-analytics.asciidoc[] include::ml/evaluate-data-frame.asciidoc[] include::ml/explain-data-frame-analytics.asciidoc[] include::ml/get-trained-models.asciidoc[] +include::ml/put-trained-model.asciidoc[] include::ml/get-trained-models-stats.asciidoc[] include::ml/delete-trained-model.asciidoc[] include::ml/put-filter.asciidoc[] diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index f7906a0be50ec..37bc8b6e66557 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -106,6 +106,32 @@ sudo ES_JAVA_OPTS="-Djavax.net.ssl.trustStore=/path/to/trustStore.jks" bin/elast ----------------------------------- -- +[[installing-multiple-plugins]] +=== Installing multiple plugins + +Multiple plugins can be installed in one invocation as follows: + +[source,shell] +----------------------------------- +sudo bin/elasticsearch-plugin install [plugin_id] [plugin_id] ... [plugin_id] +----------------------------------- + +Each `plugin_id` can be any valid form for installing a single plugin (e.g., the +name of a core plugin, or a custom URL). + +For instance, to install the core <>, and +<> run the following command: + +[source,shell] +----------------------------------- +sudo bin/elasticsearch-plugin install analysis-icu repository-s3 +----------------------------------- + +This command will install the versions of the plugins that matches your +Elasticsearch version. The installation will be treated as a transaction, so +that all the plugins will be installed, or none of the plugins will be installed +if any installation fails. + [[mandatory-plugins]] === Mandatory Plugins diff --git a/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc b/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc index 59033843078b1..8d4652aebe2c5 100644 --- a/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc @@ -191,6 +191,62 @@ var bbox = geohash.decode_bbox('u17'); -------------------------------------------------- // NOTCONSOLE +==== Requests with additional bounding box filtering + +The `geohash_grid` aggregation supports an optional `bounds` parameter +that restricts the points considered to those that fall within the +bounds provided. The `bounds` parameter accepts the bounding box in +all the same <> of the +bounds specified in the Geo Bounding Box Query. This bounding box can be used with or +without an additional `geo_bounding_box` query filtering the points prior to aggregating. +It is an independent bounding box that can intersect with, be equal to, or be disjoint +to any additional `geo_bounding_box` queries defined in the context of the aggregation. + +[source,console,id=geohashgrid-aggregation-with-bounds] +-------------------------------------------------- +POST /museums/_search?size=0 +{ + "aggregations" : { + "tiles-in-bounds" : { + "geohash_grid" : { + "field" : "location", + "precision" : 8, + "bounds": { + "top_left" : "53.4375, 4.21875", + "bottom_right" : "52.03125, 5.625" + } + } + } + } +} +-------------------------------------------------- +// TEST[continued] + +[source,console-result] +-------------------------------------------------- +{ + ... + "aggregations" : { + "tiles-in-bounds" : { + "buckets" : [ + { + "key" : "u173zy3j", + "doc_count" : 1 + }, + { + "key" : "u173zvfz", + "doc_count" : 1 + }, + { + "key" : "u173zt90", + "doc_count" : 1 + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] ==== Cell dimensions at the equator The table below shows the metric dimensions for cells covered by various string lengths of geohash. @@ -230,6 +286,8 @@ precision:: Optional. The string length of the geohashes used to define to precision levels higher than the supported 12 levels, (e.g. for distances <5.6cm) the value is rejected. +bounds: Optional. The bounding box to filter the points in the bucket. + size:: Optional. The maximum number of geohash buckets to return (defaults to 10,000). When results are trimmed, buckets are prioritised based on the volumes of documents they contain. diff --git a/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc b/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc index c60b14b0f2687..25cc1a977c86b 100644 --- a/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc @@ -162,6 +162,62 @@ POST /museums/_search?size=0 -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] +==== Requests with additional bounding box filtering + +The `geotile_grid` aggregation supports an optional `bounds` parameter +that restricts the points considered to those that fall within the +bounds provided. The `bounds` parameter accepts the bounding box in +all the same <> of the +bounds specified in the Geo Bounding Box Query. This bounding box can be used with or +without an additional `geo_bounding_box` query filtering the points prior to aggregating. +It is an independent bounding box that can intersect with, be equal to, or be disjoint +to any additional `geo_bounding_box` queries defined in the context of the aggregation. + +[source,console,id=geotilegrid-aggregation-with-bounds] +-------------------------------------------------- +POST /museums/_search?size=0 +{ + "aggregations" : { + "tiles-in-bounds" : { + "geotile_grid" : { + "field" : "location", + "precision" : 22, + "bounds": { + "top_left" : "52.4, 4.9", + "bottom_right" : "52.3, 5.0" + } + } + } + } +} +-------------------------------------------------- +// TEST[continued] + +[source,console-result] +-------------------------------------------------- +{ + ... + "aggregations" : { + "tiles-in-bounds" : { + "buckets" : [ + { + "key" : "22/2154412/1378379", + "doc_count" : 1 + }, + { + "key" : "22/2154385/1378332", + "doc_count" : 1 + }, + { + "key" : "22/2154259/1378425", + "doc_count" : 1 + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] ==== Options @@ -172,6 +228,8 @@ precision:: Optional. The integer zoom of the key used to define cells/buckets in the results. Defaults to 7. Values outside of [0,29] will be rejected. +bounds: Optional. The bounding box to filter the points in the bucket. + size:: Optional. The maximum number of geohash buckets to return (defaults to 10,000). When results are trimmed, buckets are prioritised based on the volumes of documents they contain. diff --git a/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc index c41df5095c8f2..3c7f429c97bc3 100644 --- a/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc @@ -49,7 +49,7 @@ The filter produces the following tokens: [source,text] -------------------------------------------------- -[ t, th, q, ui, b, br, f, fo, j, ju ] +[ t, th, q, qu, b, br, f, fo, j, ju ] -------------------------------------------------- ///////////////////// diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc index 50d3028101dd0..1d6601ed3b7b1 100644 --- a/docs/reference/cat.asciidoc +++ b/docs/reference/cat.asciidoc @@ -102,19 +102,30 @@ order is important, you can change it. Say you want to find the largest index in your cluster (storage used by all the shards, not number of documents). The `/_cat/indices` API -is ideal. We only need to tweak two things. First, we want to turn -off human mode. We'll use a byte-level resolution. Then we'll pipe -our output into `sort` using the appropriate column, which in this -case is the eighth one. +is ideal. You only need to add three things to the API request: -[source,sh] +. The `bytes` query string parameter with a value of `b` to get byte-level resolution. +. The `s` (sort) parameter with a value of `store.size:desc` to sort the output +by shard storage in descending order. +. The `v` (verbose) parameter to include column headings in the response. + +[source,console] -------------------------------------------------- -% curl '192.168.56.10:9200/_cat/indices?bytes=b' | sort -rnk8 -green wiki2 3 0 10000 0 105274918 105274918 -green wiki1 3 0 10000 413 103776272 103776272 -green foo 1 0 227 0 2065131 2065131 +GET /_cat/indices?bytes=b&s=store.size:desc&v -------------------------------------------------- -// NOTCONSOLE +// TEST[setup:huge_twitter] +// TEST[s/^/PUT twitter2\n{"settings": {"number_of_replicas": 0}}\n/] + +The API returns the following response: + +[source,txt] +-------------------------------------------------- +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size +yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 72171 72171 +green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 230 230 +-------------------------------------------------- +// TESTRESPONSE[s/72171|230/\\d+/] +// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ non_json] If you want to change the <>, use `time` parameter. diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index b26472812f514..e2f362f8e7fbf 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -239,7 +239,7 @@ Time (UTC)]. `unassigned.details`, `ud`:: Details about why the shard became unassigned. -`unassigned.for`, `ua`:: +`unassigned.for`, `uf`:: Time at which the shard was requested to be unassigned in https://en.wikipedia.org/wiki/List_of_UTC_time_offsets[Coordinated Universal Time (UTC)]. @@ -388,4 +388,4 @@ twitter 0 r STARTED 3014 31.1mb 192.168.56.30 bGG90GE twitter 0 r STARTED 3014 31.1mb 192.168.56.20 I8hydUG twitter 0 r UNASSIGNED ALLOCATION_FAILED --------------------------------------------------------------------------- -// TESTRESPONSE[non_json] \ No newline at end of file +// TESTRESPONSE[non_json] diff --git a/docs/reference/ccr/requirements.asciidoc b/docs/reference/ccr/requirements.asciidoc index 2e92d75707ba5..5e3b188849aa6 100644 --- a/docs/reference/ccr/requirements.asciidoc +++ b/docs/reference/ccr/requirements.asciidoc @@ -41,58 +41,3 @@ to a follower the following process will fail due to incomplete history on the l The default value is `12h`. For more information about index settings, see {ref}/index-modules.html[Index modules]. - - -[[ccr-overview-beats]] -==== Setting soft deletes on indices created by APM Server or Beats - -If you want to replicate indices created by APM Server or Beats, and are -allowing APM Server or Beats to manage index templates, you need to configure -soft deletes on the underlying index templates. To configure soft deletes on the -underlying index templates, incorporate the following changes to the relevant -APM Server or Beats configuration file. - -["source","yaml"] ----------------------------------------------------------------------- -setup.template.overwrite: true -setup.template.settings: - index.soft_deletes.retention.operations: 1024 ----------------------------------------------------------------------- - -For additional information on controlling the index templates managed by APM -Server or Beats, see the relevant documentation on loading the Elasticsearch -index template. - - -[[ccr-overview-logstash]] -==== Setting soft deletes on indices created by Logstash - -If you want to replicate indices created by Logstash, and are using Logstash to -manage index templates, you need to configure soft deletes on a custom Logstash -index template. To configure soft deletes on the underlying index template, -incorporate the following change to a custom Logstash template. - -["source","js"] ----------------------------------------------------------------------- -{ - "settings" : { - "index.soft_deletes.retention.operations" : 1024 - } -} ----------------------------------------------------------------------- -// NOTCONSOLE - -Additionally, you will need to configure the Elasticsearch output plugin to use -this custom template. - -["source","ruby"] ----------------------------------------------------------------------- -output { - elasticsearch { - template => "/path/to/custom/logstash/template.json" - } -} ----------------------------------------------------------------------- - -For additional information on controlling the index templates managed by -Logstash, see the relevant documentation on the Elasticsearch output plugin. diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index fc407f069a0e2..6abbfe6b75e37 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -103,14 +103,21 @@ GET /_cluster/allocation/explain ===== Examples of unassigned primary shard explanations +////// +[source,console] +-------------------------------------------------- +DELETE myindex +-------------------------------------------------- +////// + [source,console] -------------------------------------------------- -PUT /idx?master_timeout=1s&timeout=1s +PUT /my_index?master_timeout=1s&timeout=1s {"settings": {"index.routing.allocation.include._name": "non_existent_node"} } GET /_cluster/allocation/explain { - "index": "idx", + "index": "my_index", "shard": 0, "primary": true } @@ -122,7 +129,7 @@ The API returns the following response for an unassigned primary shard: [source,console-result] -------------------------------------------------- { - "index" : "idx", + "index" : "my_index", "shard" : 0, "primary" : true, "current_state" : "unassigned", <1> @@ -171,7 +178,7 @@ allocated to a node in the cluster: [source,js] -------------------------------------------------- { - "index" : "idx", + "index" : "my_index", "shard" : 0, "primary" : true, "current_state" : "unassigned", @@ -196,7 +203,7 @@ allocation: [source,js] -------------------------------------------------- { - "index" : "idx", + "index" : "my_index", "shard" : 0, "primary" : false, "current_state" : "unassigned", @@ -232,7 +239,7 @@ allocation: { "decider" : "same_shard", "decision" : "NO", - "explanation" : "the shard cannot be allocated to the same node on which a copy of the shard already exists [[idx][0], node[3sULLVJrRneSg0EfBB-2Ew], [P], s[STARTED], a[id=eV9P8BN1QPqRc3B4PLx6cg]]" + "explanation" : "the shard cannot be allocated to the same node on which a copy of the shard already exists [[my_index][0], node[3sULLVJrRneSg0EfBB-2Ew], [P], s[STARTED], a[id=eV9P8BN1QPqRc3B4PLx6cg]]" } ] } @@ -253,7 +260,7 @@ its current node and is required to move: [source,js] -------------------------------------------------- { - "index" : "idx", + "index" : "my_index", "shard" : 0, "primary" : true, "current_state" : "started", @@ -302,7 +309,7 @@ because moving the shard to another node does not form a better cluster balance: [source,js] -------------------------------------------------- { - "index" : "idx", + "index" : "my_index", "shard" : 0, "primary" : true, "current_state" : "started", diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 9306c23e43f43..ed04e1b085594 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -960,6 +960,78 @@ Current number of open HTTP connections for the node. (integer) Total number of HTTP connections opened for the node. +[[cluster-nodes-stats-api-response-body-breakers]] +===== `breakers` section + +`breakers..limit_size_in_bytes`:: +(integer) +Memory limit, in bytes, for the circuit breaker. + +`breakers..limit_size`:: +(<>) +Memory limit for the circuit breaker. + +`breakers..estimated_size_in_bytes`:: +(integer) +Estimated memory used, in bytes, for the operation. + +`breakers..estimated_size`:: +(<>) +Estimated memory used for the operation. + +`breakers..overhead`:: +(float) +A constant that all estimates for the circuit breaker are multiplied with to +calculate a final estimate. + +`breakers..tripped`:: +(integer) +Total number of times the circuit breaker has been triggered and prevented an +out of memory error. + +[[cluster-nodes-stats-api-response-body-script]] +===== `script` section + +`script.compilations`:: +(integer) +Total number of inline script compilations performed by the node. + +`script.cache_evictions`:: +(integer) +Total number of times the script cache has evicted old data. + +`script.compilation_limit_triggered`:: +(integer) +Total number of times the <> circuit breaker has limited inline script compilations. + +[[cluster-nodes-stats-api-response-body-discovery]] +===== `discovery` section + +`discovery.cluster_state_queue.total`:: +(integer) +Total number of cluster states in queue. + +`discovery.cluster_state_queue.pending`:: +(integer) +Number of pending cluster states in queue. + +`discovery.cluster_state_queue.committed`:: +(integer) +Number of committed cluster states in queue. + +`discovery.published_cluster_states.full_states`:: +(integer) +Number of published cluster states. + +`discovery.published_cluster_states.incompatible_diffs`:: +(integer) +Number of incompatible differences between published cluster states. + +`discovery.published_cluster_states.compatible_diffs`:: +(integer) +Number of compatible differences between published cluster states. + [[cluster-nodes-stats-api-response-body-ingest]] ===== `ingest` section diff --git a/docs/reference/commands/index.asciidoc b/docs/reference/commands/index.asciidoc index 81e9000c9bb13..7b49a9de873ef 100644 --- a/docs/reference/commands/index.asciidoc +++ b/docs/reference/commands/index.asciidoc @@ -10,6 +10,7 @@ tasks from the command line: * <> * <> * <> +* <> * <> * <> * <> @@ -22,6 +23,7 @@ tasks from the command line: include::certgen.asciidoc[] include::certutil.asciidoc[] include::croneval.asciidoc[] +include::keystore.asciidoc[] include::node-tool.asciidoc[] include::saml-metadata.asciidoc[] include::setup-passwords.asciidoc[] diff --git a/docs/reference/commands/keystore.asciidoc b/docs/reference/commands/keystore.asciidoc new file mode 100644 index 0000000000000..7b2df6ee24c77 --- /dev/null +++ b/docs/reference/commands/keystore.asciidoc @@ -0,0 +1,149 @@ +[[elasticsearch-keystore]] +== elasticsearch-keystore + +The `elasticsearch-keystore` command manages <> +in the {es} keystore. + +[discrete] +[[elasticsearch-keystore-synopsis]] +=== Synopsis + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-keystore +([add ] [--stdin] | +[add-file ] | [create] | +[list] | [remove ] | [upgrade]) +[-h, --help] ([-s, --silent] | [-v, --verbose]) +-------------------------------------------------- + +[discrete] +[[elasticsearch-keystore-description]] +=== Description + +IMPORTANT: This command should be run as the user that will run {es}. + +Currently, all secure settings are node-specific settings that must have the +same value on every node. Therefore you must run this command on every node. + +Modifications to the keystore do not take effect until you restart {es}. + +Only some settings are designed to be read from the keystore. However, there +is no validation to block unsupported settings from the keystore and they can +cause {es} to fail to start. To see whether a setting is supported in the +keystore, see the setting reference. + +[discrete] +[[elasticsearch-keystore-parameters]] +=== Parameters + +`add `:: Adds settings to the keystore. By default, you are prompted +for the value of the setting. + +`add-file `:: Adds a file to the keystore. + +`create`:: Creates the keystore. + +`-h, --help`:: Returns all of the command parameters. + +`list`:: Lists the settings in the keystore. + +`remove `:: Removes a setting from the keystore. + +`-s, --silent`:: Shows minimal output. + +`--stdin`:: When used with the `add` parameter, you can pass the setting value +through standard input (stdin). See <>. + +`upgrade`:: Upgrades the internal format of the keystore. + +`-v, --verbose`:: Shows verbose output. + +[discrete] +[[elasticsearch-keystore-examples]] +=== Examples + +[discrete] +[[creating-keystore]] +==== Create the keystore + +To create the `elasticsearch.keystore`, use the `create` command: + +[source,sh] +---------------------------------------------------------------- +bin/elasticsearch-keystore create +---------------------------------------------------------------- + +A `elasticsearch.keystore` file is created alongside the `elasticsearch.yml` +file. + +[discrete] +[[list-settings]] +==== List settings in the keystore + +To list the settings in the keystore, use the `list` command. + +[source,sh] +---------------------------------------------------------------- +bin/elasticsearch-keystore list +---------------------------------------------------------------- + +[discrete] +[[add-string-to-keystore]] +==== Add settings to the keystore + +Sensitive string settings, like authentication credentials for Cloud plugins, +can be added with the `add` command: + +[source,sh] +---------------------------------------------------------------- +bin/elasticsearch-keystore add the.setting.name.to.set +---------------------------------------------------------------- + +You are prompted to enter the value of the setting. To pass the value +through standard input (stdin), use the `--stdin` flag: + +[source,sh] +---------------------------------------------------------------- +cat /file/containing/setting/value | bin/elasticsearch-keystore add --stdin the.setting.name.to.set +---------------------------------------------------------------- + +[discrete] +[[add-file-to-keystore]] +==== Add files to the keystore + +You can add sensitive files, like authentication key files for Cloud plugins, +using the `add-file` command. Be sure to include your file path as an argument +after the setting name. + +[source,sh] +---------------------------------------------------------------- +bin/elasticsearch-keystore add-file the.setting.name.to.set /path/example-file.json +---------------------------------------------------------------- + +[discrete] +[[remove-settings]] +==== Remove settings from the keystore + +To remove a setting from the keystore, use the `remove` command: + +[source,sh] +---------------------------------------------------------------- +bin/elasticsearch-keystore remove the.setting.name.to.remove +---------------------------------------------------------------- + +[discrete] +[[keystore-upgrade]] +==== Upgrade the keystore + +Occasionally, the internal format of the keystore changes. When {es} is +installed from a package manager, an upgrade of the on-disk keystore to the new +format is done during package upgrade. In other cases, {es} performs the upgrade +during node startup. This requires that {es} has write permissions to the +directory that contains the keystore. Alternatively, you can manually perform +such an upgrade by using the `upgrade` command: + +[source,sh] +---------------------------------------------------------------- +bin/elasticsearch-keystore upgrade +---------------------------------------------------------------- diff --git a/docs/reference/commands/node-tool.asciidoc b/docs/reference/commands/node-tool.asciidoc index 4dd2b0dfe0b6a..f1dc24bc761a5 100644 --- a/docs/reference/commands/node-tool.asciidoc +++ b/docs/reference/commands/node-tool.asciidoc @@ -3,9 +3,9 @@ The `elasticsearch-node` command enables you to perform certain unsafe operations on a node that are only possible while it is shut down. This command -allows you to adjust the <> of a node and may be able to -recover some data after a disaster or start a node even if it is incompatible -with the data on disk. +allows you to adjust the <> of a node, unsafely edit cluster +settings and may be able to recover some data after a disaster or start a node +even if it is incompatible with the data on disk. [float] === Synopsis @@ -20,13 +20,21 @@ bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster|override-versio [float] === Description -This tool has four modes: +This tool has a number of modes: * `elasticsearch-node repurpose` can be used to delete unwanted data from a node if it used to be a <> or a <> but has been repurposed not to have one or other of these roles. +* `elasticsearch-node remove-settings` can be used to remove persistent settings + from the cluster state in case where it contains incompatible settings that + prevent the cluster from forming. + +* `elasticsearch-node remove-customs` can be used to remove custom metadata + from the cluster state in case where it contains broken metadata that + prevents the cluster state from being loaded. + * `elasticsearch-node unsafe-bootstrap` can be used to perform _unsafe cluster bootstrapping_. It forces one of the nodes to form a brand-new cluster on its own, using its local copy of the cluster metadata. @@ -76,6 +84,44 @@ The tool provides a summary of the data to be deleted and asks for confirmation before making any changes. You can get detailed information about the affected indices and shards by passing the verbose (`-v`) option. +[float] +==== Removing persistent cluster settings + +There may be situations where a node contains persistent cluster +settings that prevent the cluster from forming. Since the cluster cannot form, +it is not possible to remove these settings using the +<> API. + +The `elasticsearch-node remove-settings` tool allows you to forcefully remove +those persistent settings from the on-disk cluster state. The tool takes a +list of settings as parameters that should be removed, and also supports +wildcard patterns. + +The intended use is: + +* Stop the node +* Run `elasticsearch-node remove-settings name-of-setting-to-remove` on the node +* Repeat for all other master-eligible nodes +* Start the nodes + +[float] +==== Removing custom metadata from the cluster state + +There may be situations where a node contains custom metadata, typically +provided by plugins, that prevent the node from starting up and loading +the cluster from disk. + +The `elasticsearch-node remove-customs` tool allows you to forcefully remove +the problematic custom metadata. The tool takes a list of custom metadata names +as parameters that should be removed, and also supports wildcard patterns. + +The intended use is: + +* Stop the node +* Run `elasticsearch-node remove-customs name-of-custom-to-remove` on the node +* Repeat for all other master-eligible nodes +* Start the nodes + [float] ==== Recovering data after a disaster @@ -143,9 +189,9 @@ If there is at least one remaining master-eligible node, but it is not possible to restart a majority of them, then the `elasticsearch-node unsafe-bootstrap` command will unsafely override the cluster's <> as if performing another -<>. +<>. The target node can then form a new cluster on its own by using -the cluster metadata held locally on the target node. +the cluster metadata held locally on the target node. [WARNING] These steps can lead to arbitrary data loss since the target node may not hold the latest cluster @@ -290,6 +336,9 @@ it can join a different cluster. `override-version`:: Overwrites the version number stored in the data path so that a node can start despite being incompatible with the on-disk data. +`remove-settings`:: Forcefully removes the provided persistent cluster settings +from the on-disk cluster state. + `-E `:: Configures a setting. `-h, --help`:: Returns all of the command parameters. @@ -346,6 +395,67 @@ Confirm [y/N] y Node successfully repurposed to no-master and no-data. ---- +[float] +==== Removing persistent cluster settings + +If your nodes contain persistent cluster settings that prevent the cluster +from forming, i.e., can't be removed using the <> API, +you can run the following commands to remove one or more cluster settings. + +[source,txt] +---- +node$ ./bin/elasticsearch-node remove-settings xpack.monitoring.exporters.my_exporter.host + + WARNING: Elasticsearch MUST be stopped before running this tool. + +The following settings will be removed: +xpack.monitoring.exporters.my_exporter.host: "10.1.2.3" + +You should only run this tool if you have incompatible settings in the +cluster state that prevent the cluster from forming. +This tool can cause data loss and its use should be your last resort. + +Do you want to proceed? + +Confirm [y/N] y + +Settings were successfully removed from the cluster state +---- + +You can also use wildcards to remove multiple settings, for example using + +[source,txt] +---- +node$ ./bin/elasticsearch-node remove-settings xpack.monitoring.* +---- + +[float] +==== Removing custom metadata from the cluster state + +If the on-disk cluster state contains custom metadata that prevents the node +from starting up and loading the cluster state, you can run the following +commands to remove this custom metadata. + +[source,txt] +---- +node$ ./bin/elasticsearch-node remove-customs snapshot_lifecycle + + WARNING: Elasticsearch MUST be stopped before running this tool. + +The following customs will be removed: +snapshot_lifecycle + +You should only run this tool if you have broken custom metadata in the +cluster state that prevents the cluster state from being loaded. +This tool can cause data loss and its use should be your last resort. + +Do you want to proceed? + +Confirm [y/N] y + +Customs were successfully removed from the cluster state +---- + [float] ==== Unsafe cluster bootstrapping diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index 65401a5c866d7..4cfa87489165a 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -55,7 +55,7 @@ PUT _ilm/policy/my_policy } -------------------------------------------------- -The Above example configures a policy that moves the index into the warm +The above example configures a policy that moves the index into the warm phase after one day. Until then, the index is in a waiting state. After moving into the warm phase, it will wait until 30 days have elapsed before moving to the delete phase and deleting the index. @@ -76,10 +76,14 @@ check occurs. === Phase Execution The current phase definition, of an index's policy being executed, is stored -in the index's metadata. The phase and its actions are compiled into a series -of discrete steps that are executed sequentially. Since some {ilm-init} actions -are more complex and involve multiple operations against an index, each of these -operations are done in isolation in a unit called a "step". The +in the index's metadata. This phase definition is cached to prevent changes to +the policy from putting the index in a state where it cannot proceed from its +current step. When the policy is updated we check to see if this phase +definition can be safely updated, and if so, update the cached definition in +indices using the updated policy. The phase and its actions are compiled into a +series of discrete steps that are executed sequentially. Since some {ilm-init} +actions are more complex and involve multiple operations against an index, each +of these operations are done in isolation in a unit called a "step". The <> exposes this information to us to see which step our index is either to execute next, or is currently executing. @@ -666,7 +670,9 @@ PUT _ilm/policy/my_policy [IMPORTANT] This action may be used explicitly, as shown below, but this action is also run before <> and <> as described in the documentation for those actions. +Shrink action>> as described in the documentation for those actions. This is +expected and safe for non-CCR indices to run, as the steps are skipped when CCR +is not in use. This action turns a {ref}/ccr-apis.html[ccr] follower index into a regular index. This can be desired when moving follower diff --git a/docs/reference/ilm/start-stop-ilm.asciidoc b/docs/reference/ilm/start-stop-ilm.asciidoc index fcbbbcd811545..dc9ecb817c407 100644 --- a/docs/reference/ilm/start-stop-ilm.asciidoc +++ b/docs/reference/ilm/start-stop-ilm.asciidoc @@ -79,7 +79,7 @@ The operating modes of ILM: |=== [float] -=== Stopping ILM= +=== Stopping ILM The ILM service can be paused such that no further steps will be executed using the <>. diff --git a/docs/reference/index-modules/allocation/delayed.asciidoc b/docs/reference/index-modules/allocation/delayed.asciidoc index f49ed7e05dbcd..5dee9444668ca 100644 --- a/docs/reference/index-modules/allocation/delayed.asciidoc +++ b/docs/reference/index-modules/allocation/delayed.asciidoc @@ -28,7 +28,7 @@ this scenario: If the master had just waited for a few minutes, then the missing shards could have been re-allocated to Node 5 with the minimum of network traffic. This process would be even quicker for idle shards (shards not receiving indexing -requests) which have been automatically <>. +requests) which have been automatically <>. The allocation of replica shards which become unassigned because a node has left can be delayed with the `index.unassigned.node_left.delayed_timeout` diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index e1bec04eabc03..224346f3f5dfa 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -68,7 +68,6 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> -* <> * <> @@ -136,10 +135,8 @@ include::indices/shrink-index.asciidoc[] include::indices/split-index.asciidoc[] -include::indices/synced-flush.asciidoc[] - include::indices/apis/unfreeze.asciidoc[] include::indices/aliases.asciidoc[] -include::indices/update-settings.asciidoc[] \ No newline at end of file +include::indices/update-settings.asciidoc[] diff --git a/docs/reference/indices/synced-flush.asciidoc b/docs/reference/indices/synced-flush.asciidoc index cb2c40793091a..4d0ab4ff98c2a 100644 --- a/docs/reference/indices/synced-flush.asciidoc +++ b/docs/reference/indices/synced-flush.asciidoc @@ -4,274 +4,4 @@ Synced flush ++++ -Performs a synced flush on one or more indices. - -[source,console] --------------------------------------------------- -POST /twitter/_flush/synced --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - - -[[synced-flush-api-request]] -==== {api-request-title} - -`POST //flush/synced` - -`GET //flush/synced` - -`POST /flush/synced` - -`GET /flush/synced` - - -[[synced-flush-api-desc]] -==== {api-description-title} - -[[synced-flush-using-api]] -===== Use the synced flush API - -Use the synced flush API to manually initiate a synced flush. -This can be useful for a planned cluster restart where -you can stop indexing but don't want to wait for 5 minutes until all indices -are marked as inactive and automatically sync-flushed. - -You can request a synced flush even if there is ongoing indexing activity, and -{es} will perform the synced flush on a "best-effort" basis: shards that do not -have any ongoing indexing activity will be successfully sync-flushed, and other -shards will fail to sync-flush. The successfully sync-flushed shards will have -faster recovery times as long as the `sync_id` marker is not removed by a -subsequent flush. - - -[[synced-flush-overview]] -===== Synced flush overview - -{es} keeps track of which shards have received indexing activity recently, and -considers shards that have not received any indexing operations for 5 minutes to -be inactive. - -When a shard becomes inactive {es} performs a special kind of flush -known as a *synced flush*. A synced flush performs a normal -<> on each replica of the shard, and then adds a marker known -as the `sync_id` to each replica to indicate that these copies have identical -Lucene indices. Comparing the `sync_id` markers of the two copies is a very -efficient way to check whether they have identical contents. - -When allocating shard replicas, {es} must ensure that each replica contains the -same data as the primary. If the shard copies have been synced-flushed and the -replica shares a `sync_id` with the primary then {es} knows that the two copies -have identical contents. This means there is no need to copy any segment files -from the primary to the replica, which saves a good deal of time during -recoveries and restarts. - -This is particularly useful for clusters having lots of indices which are very -rarely updated, such as with time-based indices. Without the synced flush -marker, recovery of this kind of cluster would be much slower. - - -[[synced-flush-sync-id-markers]] -===== Check for `sync_id` markers - -To check whether a shard has a `sync_id` marker or not, look for the `commit` -section of the shard stats returned by the <> API: - -[source,console] --------------------------------------------------- -GET /twitter/_stats?filter_path=**.commit&level=shards <1> --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - -<1> `filter_path` is used to reduce the verbosity of the response, but is entirely optional - -The API returns the following response: - -[source,console-result] --------------------------------------------------- -{ - "indices": { - "twitter": { - "shards": { - "0": [ - { - "commit" : { - "id" : "3M3zkw2GHMo2Y4h4/KFKCg==", - "generation" : 3, - "user_data" : { - "translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA", - "history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ", - "local_checkpoint" : "-1", - "translog_generation" : "2", - "max_seq_no" : "-1", - "sync_id" : "AVvFY-071siAOuFGEO9P", <1> - "max_unsafe_auto_id_timestamp" : "-1", - "min_retained_seq_no" : "0" - }, - "num_docs" : 0 - } - } - ] - } - } - } -} --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] -<1> the `sync id` marker - -NOTE: The `sync_id` marker is removed as soon as the shard is flushed again, and -{es} may trigger an automatic flush of a shard at any time if there are -unflushed operations in the shard's translog. In practice this means that one -should consider any indexing operation on an index as having removed its -`sync_id` markers. - - -[[synced-flush-api-path-params]] -==== {api-path-parms-title} - -include::{docdir}/rest-api/common-parms.asciidoc[tag=index] -+ -To sync-flush all indices, -omit this parameter -or use a value of `_all` or `*`. - - -[[synced-flush-api-query-params]] -==== {api-query-parms-title} - -include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] - -include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] -+ -Defaults to `open`. - -include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] - - -[[synced-flush-api-response-codes]] -==== {api-response-codes-title} - -`200`:: -All shards successfully sync-flushed. - -`409`:: -A replica shard failed to sync-flush. - - -[[synced-flush-api-example]] -==== {api-examples-title} - - -[[synced-flush-api-specific-ex]] -===== Sync-flush a specific index - -[source,console] ----- -POST /kimchy/_flush/synced ----- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - - -[[synced-flush-api-multi-ex]] -===== Synch-flush several indices - -[source,console] --------------------------------------------------- -POST /kimchy,elasticsearch/_flush/synced --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - - -[[synced-flush-api-all-ex]] -===== Sync-flush all indices - -[source,console] --------------------------------------------------- -POST /_flush/synced --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - -The response contains details about how many shards were successfully -sync-flushed and information about any failure. - -The following response indicates two shards -and one replica shard -successfully sync-flushed: - -[source,console-result] --------------------------------------------------- -{ - "_shards": { - "total": 2, - "successful": 2, - "failed": 0 - }, - "twitter": { - "total": 2, - "successful": 2, - "failed": 0 - } -} --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - -The following response indicates one shard group failed -due to pending operations: - -[source,console-result] --------------------------------------------------- -{ - "_shards": { - "total": 4, - "successful": 2, - "failed": 2 - }, - "twitter": { - "total": 4, - "successful": 2, - "failed": 2, - "failures": [ - { - "shard": 1, - "reason": "[2] ongoing operations on primary" - } - ] - } -} --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - -Sometimes the failures are specific to a shard replica. The copies that failed -will not be eligible for fast recovery but those that succeeded still will be. -This case is reported as follows: - -[source,console-result] --------------------------------------------------- -{ - "_shards": { - "total": 4, - "successful": 1, - "failed": 1 - }, - "twitter": { - "total": 4, - "successful": 3, - "failed": 1, - "failures": [ - { - "shard": 1, - "reason": "unexpected error", - "routing": { - "state": "STARTED", - "primary": false, - "node": "SZNr2J_ORxKTLUCydGX4zA", - "relocating_node": null, - "shard": 1, - "index": "twitter" - } - } - ] - } -} --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] +Synced flush was removed. Use normal <> instead. diff --git a/docs/reference/ingest/apis/simulate-pipeline.asciidoc b/docs/reference/ingest/apis/simulate-pipeline.asciidoc index 1b2317b83ecd6..cb954d6d2a081 100644 --- a/docs/reference/ingest/apis/simulate-pipeline.asciidoc +++ b/docs/reference/ingest/apis/simulate-pipeline.asciidoc @@ -350,7 +350,8 @@ The API returns the following response: "foo": "bar" }, "_ingest": { - "timestamp": "2017-05-04T22:46:09.674Z" + "timestamp": "2017-05-04T22:46:09.674Z", + "pipeline": "_simulate_pipeline" } } }, @@ -364,7 +365,8 @@ The API returns the following response: "foo": "bar" }, "_ingest": { - "timestamp": "2017-05-04T22:46:09.675Z" + "timestamp": "2017-05-04T22:46:09.675Z", + "pipeline": "_simulate_pipeline" } } } @@ -381,7 +383,8 @@ The API returns the following response: "foo": "rab" }, "_ingest": { - "timestamp": "2017-05-04T22:46:09.676Z" + "timestamp": "2017-05-04T22:46:09.676Z", + "pipeline": "_simulate_pipeline" } } }, @@ -395,7 +398,8 @@ The API returns the following response: "foo": "rab" }, "_ingest": { - "timestamp": "2017-05-04T22:46:09.677Z" + "timestamp": "2017-05-04T22:46:09.677Z", + "pipeline": "_simulate_pipeline" } } } diff --git a/docs/reference/ingest/processors/pipeline.asciidoc b/docs/reference/ingest/processors/pipeline.asciidoc index 7f1ea2885e69a..8a8b0310142d8 100644 --- a/docs/reference/ingest/processors/pipeline.asciidoc +++ b/docs/reference/ingest/processors/pipeline.asciidoc @@ -21,6 +21,8 @@ include::common-options.asciidoc[] -------------------------------------------------- // NOTCONSOLE +The name of the current pipeline can be accessed from the `_ingest.pipeline` ingest metadata key. + An example of using this processor for nesting pipelines would be: Define an inner pipeline: diff --git a/docs/reference/licensing/get-license.asciidoc b/docs/reference/licensing/get-license.asciidoc index 807a40729f9fd..34f12ef514406 100644 --- a/docs/reference/licensing/get-license.asciidoc +++ b/docs/reference/licensing/get-license.asciidoc @@ -60,6 +60,7 @@ GET /_license "expiry_date" : "2018-11-19T22:05:12.332Z", "expiry_date_in_millis" : 1542665112332, "max_nodes" : 1000, + "max_resource_units" : null, "issued_to" : "test", "issuer" : "elasticsearch", "start_date_in_millis" : -1 diff --git a/docs/reference/mapping/params/ignore-malformed.asciidoc b/docs/reference/mapping/params/ignore-malformed.asciidoc index 9ea5be72bdb28..ab0869bd558fb 100644 --- a/docs/reference/mapping/params/ignore-malformed.asciidoc +++ b/docs/reference/mapping/params/ignore-malformed.asciidoc @@ -95,7 +95,7 @@ Malformed fields are silently ignored at indexing time when `ignore_malformed` is turned on. Whenever possible it is recommended to keep the number of documents that have a malformed field contained, or queries on this field will become meaningless. Elasticsearch makes it easy to check how many documents -have malformed fields by using `exist` or `term` queries on the special +have malformed fields by using `exists`,`term` or `terms` queries on the special <> field. [[json-object-limits]] @@ -111,4 +111,4 @@ of the wrong datatype. A JSON object is any data surrounded by curly brackets `"{}"` and includes data mapped to the nested, object, and range datatypes. If you submit a JSON object to an unsupported field, {es} will return an error -and reject the entire document regardless of the `ignore_malformed` setting. \ No newline at end of file +and reject the entire document regardless of the `ignore_malformed` setting. diff --git a/docs/reference/migration/migrate_8_0/analysis.asciidoc b/docs/reference/migration/migrate_8_0/analysis.asciidoc index dd4b4ae73e920..6a7205436d0f6 100644 --- a/docs/reference/migration/migrate_8_0/analysis.asciidoc +++ b/docs/reference/migration/migrate_8_0/analysis.asciidoc @@ -16,3 +16,12 @@ The `nGram` and `edgeNGram` token filter names that have been deprecated since version 6.4 have been removed. Both token filters can only be used by their alternative names `ngram` and `edge_ngram` since version 7.0. + +[float] +[[nGram-edgeNGram-tokenizer-dreprecation]] +==== Disallow use of the `nGram` and `edgeNGram` tokenizer names + +The `nGram` and `edgeNGram` tokenizer names haven been deprecated with 7.6 and are no longer +supported on new indices. Mappings for indices created after 7.6 will continue to work but +emit a deprecation warning. The tokenizer name should be changed to the fully equivalent +`ngram` or `edge_ngram` names for new indices and in index templates. diff --git a/docs/reference/migration/migrate_8_0/indices.asciidoc b/docs/reference/migration/migrate_8_0/indices.asciidoc index 5b1d5a10df090..ef5cd25f81808 100644 --- a/docs/reference/migration/migrate_8_0/indices.asciidoc +++ b/docs/reference/migration/migrate_8_0/indices.asciidoc @@ -27,3 +27,10 @@ and the setting is removed. In 6.0, we deprecated the `template` field in put index template requests in favor of using `index_patterns`. Support for the `template` field is now removed in 8.0. + + +[float] +==== Remove synced flush + +Synced flush was deprecated in 7.6 and is removed in 8.0. Use a regular flush +instead as it has the same effect as a synced flush in 7.6 and later. diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index 796d4995404a9..a823c9c0ec7b5 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -64,6 +64,11 @@ include::{docdir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config] `frequency`:: (Optional, <>) include::{docdir}/ml/ml-shared.asciidoc[tag=frequency] ++ +-- +To learn more about the relationship between time related settings, see +<>. +-- `indices`:: (Required, array) @@ -84,6 +89,11 @@ include::{docdir}/ml/ml-shared.asciidoc[tag=query] `query_delay`:: (Optional, <>) include::{docdir}/ml/ml-shared.asciidoc[tag=query-delay] ++ +-- +To learn more about the relationship between time related settings, see +<>. +-- `script_fields`:: (Optional, object) @@ -93,6 +103,20 @@ include::{docdir}/ml/ml-shared.asciidoc[tag=script-fields] (Optional, unsigned integer) include::{docdir}/ml/ml-shared.asciidoc[tag=scroll-size] + +[[ml-put-datafeed-time-related-settings]] +===== Interaction between time-related settings + +Time-related settings have the following relationships: + +* Queries run at `query_delay` after the end of + each `frequency`. + +* When `frequency` is shorter than `bucket_span` of the associated job, interim + results for the last (partial) bucket are written, and then overwritten by the + full bucket results eventually. + + [[ml-put-datafeed-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index a4661a19c435d..f3de1621644ed 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -49,6 +49,11 @@ include::{docdir}/ml/ml-shared.asciidoc[tag=analysis-config] `analysis_config`.`bucket_span`::: (<>) include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-span] ++ +-- +To learn more about the relationship between time related settings, see +<>. +-- `analysis_config`.`categorization_field_name`::: (string) diff --git a/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc index c331d9bda7aab..ad4f0467750fd 100644 --- a/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc @@ -43,15 +43,16 @@ result field to be present. ==== {api-request-body-title} `evaluation`:: -(Required, object) Defines the type of evaluation you want to perform. The -value of this object can be different depending on the type of evaluation you -want to perform. See <>. +(Required, object) Defines the type of evaluation you want to perform. +See <>. + -- Available evaluation types: + * `binary_soft_classification` * `regression` * `classification` + -- `index`:: @@ -59,14 +60,14 @@ Available evaluation types: performed. `query`:: -(Optional, object) A query clause that retrieves a subset of data from the +(Optional, object) A query clause that retrieves a subset of data from the source index. See <>. [[ml-evaluate-dfanalytics-resources]] ==== {dfanalytics-cap} evaluation resources [[binary-sc-resources]] -===== Binary soft classification configuration objects +===== Binary soft classification evaluation objects Binary soft classification evaluates the results of an analysis which outputs the probability that each document belongs to a certain class. For example, in @@ -87,24 +88,24 @@ document is an outlier. (Optional, object) Specifies the metrics that are used for the evaluation. Available metrics: - `auc_roc`:: + `auc_roc`::: (Optional, object) The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. Default value is {"includes_curve": false}. - `precision`:: + `confusion_matrix`::: + (Optional, object) Set the different thresholds of the {olscore} at where + the metrics (`tp` - true positive, `fp` - false positive, `tn` - true + negative, `fn` - false negative) are calculated. Default value is + {"at": [0.25, 0.50, 0.75]}. + + `precision`::: (Optional, object) Set the different thresholds of the {olscore} at where the metric is calculated. Default value is {"at": [0.25, 0.50, 0.75]}. - `recall`:: + `recall`::: (Optional, object) Set the different thresholds of the {olscore} at where the metric is calculated. Default value is {"at": [0.25, 0.50, 0.75]}. - - `confusion_matrix`:: - (Optional, object) Set the different thresholds of the {olscore} at where - the metrics (`tp` - true positive, `fp` - false positive, `tn` - true - negative, `fn` - false negative) are calculated. Default value is - {"at": [0.25, 0.50, 0.75]}. [[regression-evaluation-resources]] @@ -122,9 +123,18 @@ which outputs a prediction of values. in other words the results of the {regression} analysis. `metrics`:: - (Required, object) Specifies the metrics that are used for the evaluation. - Available metrics are `r_squared` and `mean_squared_error`. - + (Optional, object) Specifies the metrics that are used for the evaluation. + Available metrics: + + `mean_squared_error`::: + (Optional, object) Average squared difference between the predicted values and the actual (`ground truth`) value. + For more information, read https://en.wikipedia.org/wiki/Mean_squared_error[this wiki article]. + + `r_squared`::: + (Optional, object) Proportion of the variance in the dependent variable that is predictable from the independent variables. + For more information, read https://en.wikipedia.org/wiki/Coefficient_of_determination[this wiki article]. + + [[classification-evaluation-resources]] ==== {classification-cap} evaluation objects @@ -134,20 +144,28 @@ outputs a prediction that identifies to which of the classes each document belongs. `actual_field`:: - (Required, string) The field of the `index` which contains the ground truth. - The data type of this field must be keyword. - -`metrics`:: - (Required, object) Specifies the metrics that are used for the evaluation. - Available metric is `multiclass_confusion_matrix`. + (Required, string) The field of the `index` which contains the `ground truth`. + The data type of this field must be categorical. `predicted_field`:: (Required, string) The field in the `index` that contains the predicted value, - in other words the results of the {classanalysis}. The data type of this field - is string. You need to add `.keyword` to the predicted field name (the name - you put in the {classanalysis} object as `prediction_field_name` or the - default value of the same field if you didn't specified explicitly). For - example, `predicted_field` : `ml.animal_class_prediction.keyword`. + in other words the results of the {classanalysis}. + +`metrics`:: + (Optional, object) Specifies the metrics that are used for the evaluation. + Available metrics: + + `accuracy`::: + (Optional, object) Accuracy of predictions (per-class and overall). + + `multiclass_confusion_matrix`::: + (Optional, object) Multiclass confusion matrix. + + `precision`::: + (Optional, object) Precision of predictions (per-class and average). + + `recall`::: + (Optional, object) Recall of predictions (per-class and average). //// @@ -360,7 +378,7 @@ POST _ml/data_frame/_evaluate "evaluation": { "classification": { <1> "actual_field": "animal_class", <2> - "predicted_field": "ml.animal_class_prediction.keyword", <3> + "predicted_field": "ml.animal_class_prediction", <3> "metrics": { "multiclass_confusion_matrix" : {} <4> } @@ -374,8 +392,7 @@ POST _ml/data_frame/_evaluate <2> The field that contains the ground truth value for the actual animal classification. This is required in order to evaluate results. <3> The field that contains the predicted value for animal classification by -the {classanalysis}. Since the field storing predicted class is dynamically -mapped as text and keyword, you need to add the `.keyword` suffix to the name. +the {classanalysis}. <4> Specifies the metric for the evaluation. diff --git a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc index 24149372e0e99..9c1b41c9b57ad 100644 --- a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc @@ -148,6 +148,10 @@ include::{docdir}/ml/ml-shared.asciidoc[tag=prediction-field-name] (Optional, long) include::{docdir}/ml/ml-shared.asciidoc[tag=randomize-seed] +`analysis`.`classification`.`num_top_feature_importance_values`:::: +(Optional, integer) +include::{docdir}/ml/ml-shared.asciidoc[tag=num-top-feature-importance-values] + `analysis`.`classification`.`training_percent`:::: (Optional, integer) include::{docdir}/ml/ml-shared.asciidoc[tag=training-percent] @@ -227,6 +231,10 @@ include::{docdir}/ml/ml-shared.asciidoc[tag=lambda] (Optional, string) include::{docdir}/ml/ml-shared.asciidoc[tag=prediction-field-name] +`analysis`.`regression`.`num_top_feature_importance_values`:::: +(Optional, integer) +include::{docdir}/ml/ml-shared.asciidoc[tag=num-top-feature-importance-values] + `analysis`.`regression`.`training_percent`:::: (Optional, integer) include::{docdir}/ml/ml-shared.asciidoc[tag=training-percent] @@ -399,7 +407,7 @@ The API returns the following result: } ---- // TESTRESPONSE[s/1562265491319/$body.$_path/] -// TESTRESPONSE[s/"version": "8.0.0"/"version": $body.version/] +// TESTRESPONSE[s/"version" : "8.0.0"/"version" : $body.version/] [[ml-put-dfanalytics-example-r]] diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index 5454939af1e24..f25cfb94e8bed 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -639,6 +639,14 @@ end::include-model-definition[] tag::indices[] An array of index names. Wildcards are supported. For example: `["it_ops_metrics", "server*"]`. + +tag::num-top-feature-importance-values[] +Advanced configuration option. If set, feature importance for the top +most important features will be computed. Importance is calculated +using the SHAP (SHapley Additive exPlanations) method as described in +https://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf[Lundberg, S. M., & Lee, S.-I. A Unified Approach to Interpreting Model Predictions. In NeurIPS 2017.]. +end::num-top-feature-importance-values[] + + -- NOTE: If any indices are in remote clusters then `cluster.remote.connect` must diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index c69f736feb17c..5a5c5c5e0b748 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -20,8 +20,8 @@ There are several thread pools, but the important ones include: `1000`. [[search-throttled]]`search_throttled`:: - For count/search/suggest/get operations on `search_throttled indices`. - Thread pool type is `fixed_auto_queue_size` with a size of `1`, and initial + For count/search/suggest/get operations on `search_throttled indices`. + Thread pool type is `fixed_auto_queue_size` with a size of `1`, and initial queue_size of `100`. `get`:: @@ -30,7 +30,7 @@ There are several thread pools, but the important ones include: queue_size of `1000`. `analyze`:: - For analyze requests. Thread pool type is `fixed` with a size of `1`, queue + For analyze requests. Thread pool type is `fixed` with a size of `1`, queue size of `16`. `write`:: @@ -51,8 +51,8 @@ There are several thread pools, but the important ones include: keep-alive of `5m` and a max of `min(10, (# of available processors)/2)`. `listener`:: - Mainly for java client executing of action when listener threaded is set to - `true`. Thread pool type is `scaling` with a default max of + Mainly for java client executing of action when listener threaded is set to + `true`. Thread pool type is `scaling` with a default max of `min(10, (# of available processors)/2)`. `fetch_shard_started`:: @@ -66,7 +66,7 @@ There are several thread pools, but the important ones include: size of `2 * # of available processors`. `flush`:: - For <>, <>, and <> `fsync` operations. + For <> and <> `fsync` operations. Thread pool type is `scaling` with a keep-alive of `5m` and a default maximum size of `min(5, (# of available processors)/2)`. @@ -202,13 +202,13 @@ processors: 2 There are a few use-cases for explicitly overriding the `processors` setting: -. If you are running multiple instances of {es} on the same host but want {es} -to size its thread pools as if it only has a fraction of the CPU, you should -override the `processors` setting to the desired fraction, for example, if +. If you are running multiple instances of {es} on the same host but want {es} +to size its thread pools as if it only has a fraction of the CPU, you should +override the `processors` setting to the desired fraction, for example, if you're running two instances of {es} on a 16-core machine, set `processors` to 8. -Note that this is an expert-level use case and there's a lot more involved -than just setting the `processors` setting as there are other considerations -like changing the number of garbage collector threads, pinning processes to +Note that this is an expert-level use case and there's a lot more involved +than just setting the `processors` setting as there are other considerations +like changing the number of garbage collector threads, pinning processes to cores, and so on. . Sometimes the number of processors is wrongly detected and in such cases explicitly setting the `processors` setting will workaround such diff --git a/docs/reference/monitoring/configuring-filebeat.asciidoc b/docs/reference/monitoring/configuring-filebeat.asciidoc index d51c65bbac8d4..6f4b5eee8ec53 100644 --- a/docs/reference/monitoring/configuring-filebeat.asciidoc +++ b/docs/reference/monitoring/configuring-filebeat.asciidoc @@ -117,7 +117,7 @@ If {security-features} are enabled, you must provide a valid user ID and password so that {filebeat} can connect to {kib}: .. Create a user on the monitoring cluster that has the -<> or equivalent +<> or equivalent privileges. .. Add the `username` and `password` settings to the {es} output information in diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index a51283ceeb341..27f37423fa28f 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -84,6 +84,7 @@ be executed in memory or indexed. See <> below for further d Default is `memory`. |======================================================================= +[[query-dsl-geo-bounding-box-query-accepted-formats]] [float] ==== Accepted Formats diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 36b9791ebfdf0..0703260414716 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -764,8 +764,9 @@ end::source-transforms[] tag::source-index-transforms[] The _source indices_ for the {transform}. It can be a single index, an index -pattern (for example, `"myindex*"`), or an array of indices (for example, -`["index1", "index2"]`). +pattern (for example, `"myindex*"`), an array of indices (for example, +`["index1", "index2"]`), or an array of index patterns (for example, +`["myindex1-*", "myindex2-*"]`. end::source-index-transforms[] tag::source-query-transforms[] diff --git a/docs/reference/setup/restart-cluster.asciidoc b/docs/reference/setup/restart-cluster.asciidoc index 58402758a72d8..6b3151f752cf9 100644 --- a/docs/reference/setup/restart-cluster.asciidoc +++ b/docs/reference/setup/restart-cluster.asciidoc @@ -1,11 +1,11 @@ [[restart-cluster]] == Full-cluster restart and rolling restart - -There may be {ref}/configuring-tls.html#tls-transport[situations where you want -to perform a full-cluster restart] or a rolling restart. In the case of -<>, you shut down and restart all the -nodes in the cluster while in the case of -<>, you shut down only one node at a + +There may be {ref}/configuring-tls.html#tls-transport[situations where you want +to perform a full-cluster restart] or a rolling restart. In the case of +<>, you shut down and restart all the +nodes in the cluster while in the case of +<>, you shut down only one node at a time, so the service remains uninterrupted. @@ -21,27 +21,29 @@ include::{docdir}/upgrade/disable-shard-alloc.asciidoc[] -- // end::disable_shard_alloc[] // tag::stop_indexing[] -. *Stop indexing and perform a synced flush.* +. *Stop indexing and perform a flush.* + -- -Performing a <> speeds up shard -recovery. +Performing a <> speeds up shard recovery. -include::{docdir}/upgrade/synced-flush.asciidoc[] +[source,console] +-------------------------------------------------- +POST /_flush +-------------------------------------------------- -- // end::stop_indexing[] //tag::stop_ml[] . *Temporarily stop the tasks associated with active {ml} jobs and {dfeeds}.* (Optional) + -- -{ml-cap} features require a platinum license or higher. For more information about Elastic +{ml-cap} features require a platinum license or higher. For more information about Elastic license levels, see https://www.elastic.co/subscriptions[the subscription page]. -You have two options to handle {ml} jobs and {dfeeds} when you shut down a +You have two options to handle {ml} jobs and {dfeeds} when you shut down a cluster: * Temporarily halt the tasks associated with your {ml} jobs and {dfeeds} and -prevent new jobs from opening by using the +prevent new jobs from opening by using the <>: + [source,console] @@ -50,15 +52,15 @@ POST _ml/set_upgrade_mode?enabled=true -------------------------------------------------- // TEST + -When you disable upgrade mode, the jobs resume using the last model state that -was automatically saved. This option avoids the overhead of managing active jobs -during the shutdown and is faster than explicitly stopping {dfeeds} and closing +When you disable upgrade mode, the jobs resume using the last model state that +was automatically saved. This option avoids the overhead of managing active jobs +during the shutdown and is faster than explicitly stopping {dfeeds} and closing jobs. * {ml-docs}/stopping-ml.html[Stop all {dfeeds} and close all jobs]. This option saves the model state at the time of closure. When you reopen the jobs after the -cluster restart, they use the exact same model. However, saving the latest model -state takes longer than using upgrade mode, especially if you have a lot of jobs +cluster restart, they use the exact same model. However, saving the latest model +state takes longer than using upgrade mode, especially if you have a lot of jobs or jobs with large model states. -- // end::stop_ml[] @@ -102,8 +104,8 @@ When a node joins the cluster, it begins to recover any primary shards that are stored locally. The <> API initially reports a `status` of `red`, indicating that not all primary shards have been allocated. -Once a node recovers its local shards, the cluster `status` switches to -`yellow`, indicating that all primary shards have been recovered, but not all +Once a node recovers its local shards, the cluster `status` switches to +`yellow`, indicating that all primary shards have been recovered, but not all replica shards are allocated. This is to be expected because you have not yet re-enabled allocation. Delaying the allocation of replicas until all nodes are `yellow` allows the master to allocate replicas to nodes that @@ -149,7 +151,7 @@ GET _cat/recovery . *Restart machine learning jobs.* (Optional) + -- -If you temporarily halted the tasks associated with your {ml} jobs, use the +If you temporarily halted the tasks associated with your {ml} jobs, use the <> to return them to active states: [source,console] @@ -158,7 +160,7 @@ POST _ml/set_upgrade_mode?enabled=false -------------------------------------------------- // TEST[continued] -If you closed all {ml} jobs before stopping the nodes, open the jobs and start +If you closed all {ml} jobs before stopping the nodes, open the jobs and start the datafeeds from {kib} or with the <> and <> APIs. -- @@ -177,10 +179,10 @@ include::{docdir}/setup/restart-cluster.asciidoc[tag=stop_indexing] include::{docdir}/setup/restart-cluster.asciidoc[tag=stop_ml] + -- -* If you perform a rolling restart, you can also leave your machine learning -jobs running. When you shut down a machine learning node, its jobs automatically -move to another node and restore the model states. This option enables your jobs -to continue running during the shutdown but it puts increased load on the +* If you perform a rolling restart, you can also leave your machine learning +jobs running. When you shut down a machine learning node, its jobs automatically +move to another node and restore the model states. This option enables your jobs +to continue running during the shutdown but it puts increased load on the cluster. -- @@ -191,11 +193,11 @@ include::{docdir}/upgrade/shut-down-node.asciidoc[] -- . *Perform any needed changes.* - + . *Restart the node you changed.* + -- -Start the node and confirm that it joins the cluster by checking the log file or +Start the node and confirm that it joins the cluster by checking the log file or by submitting a `_cat/nodes` request: [source,console] @@ -208,8 +210,8 @@ GET _cat/nodes . *Reenable shard allocation.* + -- -Once the node has joined the cluster, remove the -`cluster.routing.allocation.enable` setting to enable shard allocation and start +Once the node has joined the cluster, remove the +`cluster.routing.allocation.enable` setting to enable shard allocation and start using the node: [source,console] diff --git a/docs/reference/setup/secure-settings.asciidoc b/docs/reference/setup/secure-settings.asciidoc index d355d64d40d50..e565877f22f5e 100644 --- a/docs/reference/setup/secure-settings.asciidoc +++ b/docs/reference/setup/secure-settings.asciidoc @@ -2,120 +2,33 @@ === Secure settings Some settings are sensitive, and relying on filesystem permissions to protect -their values is not sufficient. For this use case, Elasticsearch provides a -keystore and the `elasticsearch-keystore` tool to manage the settings in the keystore. - -NOTE: All commands here should be run as the user which will run Elasticsearch. +their values is not sufficient. For this use case, {es} provides a +keystore and the <> to +manage the settings in the keystore. IMPORTANT: Only some settings are designed to be read from the keystore. However, -the keystore has no validation to block unsupported settings. -Adding unsupported settings to the keystore will cause {es} -to fail to start. See documentation for each setting to see if it is supported -as part of the keystore. +the keystore has no validation to block unsupported settings. Adding unsupported +settings to the keystore causes {es} to fail to start. To see whether a setting +is supported in the keystore, look for a "Secure" qualifier the setting +reference. -NOTE: All the modifications to the keystore take affect only after restarting -Elasticsearch. +All the modifications to the keystore take affect only after restarting {es}. -NOTE: The elasticsearch keystore currently only provides obfuscation. In the future, +NOTE: The {es} keystore currently only provides obfuscation. In the future, password protection will be added. These settings, just like the regular ones in the `elasticsearch.yml` config file, need to be specified on each node in the cluster. Currently, all secure settings are node-specific settings that must have the same value on every node. -[float] -[[creating-keystore]] -=== Creating the keystore - -To create the `elasticsearch.keystore`, use the `create` command: - -[source,sh] ----------------------------------------------------------------- -bin/elasticsearch-keystore create ----------------------------------------------------------------- - -The file `elasticsearch.keystore` will be created alongside `elasticsearch.yml`. - -[float] -[[list-settings]] -=== Listing settings in the keystore - -A list of the settings in the keystore is available with the `list` command: - -[source,sh] ----------------------------------------------------------------- -bin/elasticsearch-keystore list ----------------------------------------------------------------- - -[float] -[[add-string-to-keystore]] -=== Adding string settings - -Sensitive string settings, like authentication credentials for cloud -plugins, can be added using the `add` command: - -[source,sh] ----------------------------------------------------------------- -bin/elasticsearch-keystore add the.setting.name.to.set ----------------------------------------------------------------- - -The tool will prompt for the value of the setting. To pass the value -through stdin, use the `--stdin` flag: - -[source,sh] ----------------------------------------------------------------- -cat /file/containing/setting/value | bin/elasticsearch-keystore add --stdin the.setting.name.to.set ----------------------------------------------------------------- - -[float] -[[add-file-to-keystore]] -=== Adding file settings -You can add sensitive files, like authentication key files for cloud plugins, -using the `add-file` command. Be sure to include your file path as an argument -after the setting name. - -[source,sh] ----------------------------------------------------------------- -bin/elasticsearch-keystore add-file the.setting.name.to.set /path/example-file.json ----------------------------------------------------------------- - -[float] -[[remove-settings]] -=== Removing settings - -To remove a setting from the keystore, use the `remove` command: - -[source,sh] ----------------------------------------------------------------- -bin/elasticsearch-keystore remove the.setting.name.to.remove ----------------------------------------------------------------- - -[float] -[[keystore-upgrade]] -=== Upgrading the keystore - -Occasionally, the internal format of the keystore changes. When Elasticsearch is -installed from a package manager, an upgrade of the on-disk keystore to the new -format is done during package upgrade. In other cases, Elasticsearch will -perform such an upgrade during node startup. This requires that Elasticsearch -have write permissions to the directory that contains the keystore. -Alternatively, you can manually perform such an upgrade by using the `upgrade` -command: - -[source,sh] ----------------------------------------------------------------- -bin/elasticsearch-keystore upgrade ----------------------------------------------------------------- - -[float] +[discrete] [[reloadable-secure-settings]] === Reloadable secure settings -Just like the settings values in `elasticsearch.yml`, changes to the -keystore contents are not automatically applied to the running -elasticsearch node. Re-reading settings requires a node restart. -However, certain secure settings are marked as *reloadable*. Such settings -can be re-read and applied on a running node. +Just like the settings values in `elasticsearch.yml`, changes to the keystore +contents are not automatically applied to the running {es} node. Re-reading +settings requires a node restart. However, certain secure settings are marked as +*reloadable*. Such settings can be re-read and applied on a running node. The values of all secure settings, *reloadable* or not, must be identical across all cluster nodes. After making the desired secure settings changes, @@ -126,16 +39,16 @@ using the `bin/elasticsearch-keystore add` command, call: POST _nodes/reload_secure_settings ---- -This API will decrypt and re-read the entire keystore, on every cluster node, -but only the *reloadable* secure settings will be applied. Changes to other -settings will not go into effect until the next restart. Once the call returns, -the reload has been completed, meaning that all internal datastructures dependent -on these settings have been changed. Everything should look as if the settings -had the new value from the start. +This API decrypts and re-reads the entire keystore, on every cluster node, +but only the *reloadable* secure settings are applied. Changes to other +settings do not go into effect until the next restart. Once the call returns, +the reload has been completed, meaning that all internal data structures +dependent on these settings have been changed. Everything should look as if the +settings had the new value from the start. -When changing multiple *reloadable* secure settings, modify all of them, on -each cluster node, and then issue a `reload_secure_settings` call, instead -of reloading after each modification. +When changing multiple *reloadable* secure settings, modify all of them on each +cluster node, then issue a `reload_secure_settings` call instead of reloading +after each modification. There are reloadable secure settings for: diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index 5d967929dc18d..3066d44c71010 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -22,13 +22,15 @@ To perform a full cluster restart upgrade to {version}: include::disable-shard-alloc.asciidoc[] -- -. *Stop indexing and perform a synced flush.* +. *Stop indexing and perform a flush.* + -- -Performing a <> speeds up shard -recovery. +Performing a <> speeds up shard recovery. -include::synced-flush.asciidoc[] +[source,console] +-------------------------------------------------- +POST /_flush +-------------------------------------------------- -- . *Temporarily stop the tasks associated with active {ml} jobs and {dfeeds}.* (Optional) @@ -71,7 +73,7 @@ a node. . If you use {es} {security-features} to define realms, verify that your realm settings are up-to-date. The format of realm settings changed in version 7.0, in particular, the placement of the realm type changed. See -<>. +<>. . *Start each upgraded node.* + diff --git a/docs/reference/upgrade/synced-flush.asciidoc b/docs/reference/upgrade/synced-flush.asciidoc deleted file mode 100644 index 59bdea15ba742..0000000000000 --- a/docs/reference/upgrade/synced-flush.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ - -[source,console] --------------------------------------------------- -POST _flush/synced --------------------------------------------------- - -When you perform a synced flush, check the response to make sure there are -no failures. Synced flush operations that fail due to pending indexing -operations are listed in the response body, although the request itself -still returns a 200 OK status. If there are failures, reissue the request. diff --git a/gradle/build-scan.gradle b/gradle/build-scan.gradle index 9bfc26130b95f..7ce2b136a20e6 100644 --- a/gradle/build-scan.gradle +++ b/gradle/build-scan.gradle @@ -3,8 +3,7 @@ import org.gradle.initialization.BuildRequestMetaData import java.util.concurrent.TimeUnit - -long startTime = project.gradle.services.get(BuildRequestMetaData.class).getStartTime() +long startTime = project.gradle.services.get(BuildRequestMetaData).getStartTime() buildScan { URL jenkinsUrl = System.getenv('JENKINS_URL') ? new URL(System.getenv('JENKINS_URL')) : null @@ -14,18 +13,14 @@ buildScan { String nodeName = System.getenv('NODE_NAME') tag OS.current().name() - if (jobName) { - value 'Job name', jobName - } - if (buildNumber) { - value 'Job number', buildNumber - } + // Automatically publish scans from Elasticsearch CI if (jenkinsUrl?.host?.endsWith('elastic.co')) { publishAlways() buildScan.server = 'https://gradle-enterprise.elastic.co' } + // Link to Jenkins worker logs and system metrics if (nodeName) { link 'System logs', "https://infra-stats.elastic.co/app/infra#/logs?" + "&logFilter=(expression:'host.name:${nodeName}',kind:kuery)" @@ -39,10 +34,27 @@ buildScan { // Jenkins-specific build scan metadata if (jenkinsUrl) { + // Parse job name in the case of matrix builds + // Matrix job names come in the form of "base-job-name/matrix_param1=value1,matrix_param2=value2" + def splitJobName = jobName.split('/') + if (splitJobName.length > 1 && splitJobName.last() ==~ /^([a-zA-Z0-9_\-]+=[a-zA-Z0-9_\-]+,?)+$/) { + def baseJobName = splitJobName.dropRight(1).join('/') + tag baseJobName + tag splitJobName.last() + value 'Job Name', baseJobName + def matrixParams = splitJobName.last().split(',') + matrixParams.collect { it.split('=') }.each { param -> + value "MATRIX_${param[0].toUpperCase()}", param[1] + } + } else { + tag jobName + value 'Job Name', jobName + } + tag 'CI' - tag jobName link 'Jenkins Build', buildUrl link 'GCP Upload', "https://console.cloud.google.com/storage/elasticsearch-ci-artifacts/jobs/${jobName}/build/${buildNumber}.tar.bz2" + value 'Job Number', buildNumber System.getenv().getOrDefault('NODE_LABELS', '').split(' ').each { value 'Jenkins Worker Label', it diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ErrorOnUnknown.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ErrorOnUnknown.java new file mode 100644 index 0000000000000..28ad02ff44812 --- /dev/null +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ErrorOnUnknown.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import java.util.ServiceLoader; + +/** + * Extension point to customize the error message for unknown fields. We expect + * Elasticsearch to plug a fancy implementation that uses Lucene's spelling + * correction infrastructure to suggest corrections. + */ +public interface ErrorOnUnknown { + /** + * The implementation of this interface that was loaded from SPI. + */ + ErrorOnUnknown IMPLEMENTATION = findImplementation(); + + /** + * Build the error message to use when {@link ObjectParser} encounters an unknown field. + * @param parserName the name of the thing we're parsing + * @param unknownField the field that we couldn't recognize + * @param candidates the possible fields + */ + String errorMessage(String parserName, String unknownField, Iterable candidates); + + /** + * Priority that this error message handler should be used. + */ + int priority(); + + private static ErrorOnUnknown findImplementation() { + ErrorOnUnknown best = new ErrorOnUnknown() { + @Override + public String errorMessage(String parserName, String unknownField, Iterable candidates) { + return "[" + parserName + "] unknown field [" + unknownField + "]"; + } + + @Override + public int priority() { + return Integer.MIN_VALUE; + } + }; + for (ErrorOnUnknown c : ServiceLoader.load(ErrorOnUnknown.class)) { + if (best.priority() < c.priority()) { + best = c; + } + } + return best; + } +} diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java index 69a4a4bd31e45..478b4c1369463 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java @@ -81,18 +81,17 @@ public static BiConsumer> fromLi } private interface UnknownFieldParser { - - void acceptUnknownField(String parserName, String field, XContentLocation location, XContentParser parser, - Value value, Context context) throws IOException; + void acceptUnknownField(ObjectParser objectParser, String field, XContentLocation location, XContentParser parser, + Value value, Context context) throws IOException; } private static UnknownFieldParser ignoreUnknown() { - return (n, f, l, p, v, c) -> p.skipChildren(); + return (op, f, l, p, v, c) -> p.skipChildren(); } private static UnknownFieldParser errorOnUnknown() { - return (n, f, l, p, v, c) -> { - throw new XContentParseException(l, "[" + n + "] unknown field [" + f + "], parser not found"); + return (op, f, l, p, v, c) -> { + throw new XContentParseException(l, ErrorOnUnknown.IMPLEMENTATION.errorMessage(op.name, f, op.fieldParserMap.keySet())); }; } @@ -104,7 +103,7 @@ public interface UnknownFieldConsumer { } private static UnknownFieldParser consumeUnknownField(UnknownFieldConsumer consumer) { - return (parserName, field, location, parser, value, context) -> { + return (objectParser, field, location, parser, value, context) -> { XContentParser.Token t = parser.currentToken(); switch (t) { case VALUE_STRING: @@ -127,7 +126,7 @@ private static UnknownFieldParser consumeUnknow break; default: throw new XContentParseException(parser.getTokenLocation(), - "[" + parserName + "] cannot parse field [" + field + "] with value type [" + t + "]"); + "[" + objectParser.name + "] cannot parse field [" + field + "] with value type [" + t + "]"); } }; } @@ -136,12 +135,13 @@ private static UnknownFieldParser unk Class categoryClass, BiConsumer consumer ) { - return (parserName, field, location, parser, value, context) -> { + return (objectParser, field, location, parser, value, context) -> { Category o; try { o = parser.namedObject(categoryClass, field, context); } catch (NamedObjectNotFoundException e) { - throw new XContentParseException(location, "[" + parserName + "] " + e.getBareMessage(), e); + // TODO It'd be lovely if we could the options here but we don't have the right stuff plumbed through. We'll get to it! + throw new XContentParseException(location, "[" + objectParser.name + "] " + e.getBareMessage(), e); } consumer.accept(value, o); }; @@ -278,7 +278,7 @@ public Value parse(XContentParser parser, Value value, Context context) throws I throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] no field found"); } if (fieldParser == null) { - unknownFieldParser.acceptUnknownField(name, currentFieldName, currentPosition, parser, value, context); + unknownFieldParser.acceptUnknownField(this, currentFieldName, currentPosition, parser, value, context); } else { fieldParser.assertSupports(name, parser, currentFieldName); parseSub(parser, fieldParser, currentFieldName, value, context); diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java index 96d0f11af7c40..5b52d5b2e120d 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java @@ -206,7 +206,7 @@ public void setTest(int test) { { XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"not_supported_field\" : \"foo\"}"); XContentParseException ex = expectThrows(XContentParseException.class, () -> objectParser.parse(parser, s, null)); - assertEquals(ex.getMessage(), "[1:2] [the_parser] unknown field [not_supported_field], parser not found"); + assertEquals(ex.getMessage(), "[1:2] [the_parser] unknown field [not_supported_field]"); } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index b9de97952eb4b..ea1c810d6696d 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -337,9 +337,29 @@ public Map> getTokenizers() { tokenizers.put("simple_pattern", SimplePatternTokenizerFactory::new); tokenizers.put("simple_pattern_split", SimplePatternSplitTokenizerFactory::new); tokenizers.put("thai", ThaiTokenizerFactory::new); - tokenizers.put("nGram", NGramTokenizerFactory::new); + tokenizers.put("nGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + if (indexSettings.getIndexVersionCreated().onOrAfter(org.elasticsearch.Version.V_8_0_0)) { + throw new IllegalArgumentException("The [nGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [ngram] for indices created in versions 8 or higher instead."); + } else if (indexSettings.getIndexVersionCreated().onOrAfter(org.elasticsearch.Version.V_7_6_0)) { + deprecationLogger.deprecatedAndMaybeLog("nGram_tokenizer_deprecation", + "The [nGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [ngram] instead."); + } + return new NGramTokenizerFactory(indexSettings, environment, name, settings); + }); tokenizers.put("ngram", NGramTokenizerFactory::new); - tokenizers.put("edgeNGram", EdgeNGramTokenizerFactory::new); + tokenizers.put("edgeNGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + if (indexSettings.getIndexVersionCreated().onOrAfter(org.elasticsearch.Version.V_8_0_0)) { + throw new IllegalArgumentException("The [edgeNGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [edge_nGram] for indices created in versions 8 or higher instead."); + } else if (indexSettings.getIndexVersionCreated().onOrAfter(org.elasticsearch.Version.V_7_6_0)) { + deprecationLogger.deprecatedAndMaybeLog("edgeNGram_tokenizer_deprecation", + "The [edgeNGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [edge_ngram] instead."); + } + return new EdgeNGramTokenizerFactory(indexSettings, environment, name, settings); + }); tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new); tokenizers.put("char_group", CharGroupTokenizerFactory::new); tokenizers.put("classic", ClassicTokenizerFactory::new); @@ -487,7 +507,7 @@ public List getPreConfiguredTokenFilters() { | WordDelimiterFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterFilter.SPLIT_ON_NUMERICS | WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null))); - filters.add(PreConfiguredTokenFilter.singletonWithVersion("word_delimiter_graph", false, false, (input, version) -> { + filters.add(PreConfiguredTokenFilter.elasticsearchVersion("word_delimiter_graph", false, false, (input, version) -> { boolean adjustOffsets = version.onOrAfter(Version.V_7_3_0); return new WordDelimiterGraphFilter(input, adjustOffsets, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, WordDelimiterGraphFilter.GENERATE_WORD_PARTS @@ -522,8 +542,26 @@ public List getPreConfiguredTokenizers() { tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", XLowerCaseTokenizer::new)); // Temporary shim for aliases. TODO deprecate after they are moved - tokenizers.add(PreConfiguredTokenizer.singleton("nGram", NGramTokenizer::new)); + tokenizers.add(PreConfiguredTokenizer.elasticsearchVersion("nGram", (version) -> { + if (version.onOrAfter(org.elasticsearch.Version.V_8_0_0)) { + throw new IllegalArgumentException("The [nGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [ngram] for indices created in versions 8 or higher instead."); + } else if (version.onOrAfter(org.elasticsearch.Version.V_7_6_0)) { + deprecationLogger.deprecatedAndMaybeLog("nGram_tokenizer_deprecation", + "The [nGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [ngram] instead."); + } + return new NGramTokenizer(); + })); tokenizers.add(PreConfiguredTokenizer.elasticsearchVersion("edgeNGram", (version) -> { + if (version.onOrAfter(org.elasticsearch.Version.V_8_0_0)) { + throw new IllegalArgumentException("The [edgeNGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [edge_ngram] for indices created in versions 8 or higher instead."); + } else if (version.onOrAfter(org.elasticsearch.Version.V_7_6_0)) { + deprecationLogger.deprecatedAndMaybeLog("edgeNGram_tokenizer_deprecation", + "The [edgeNGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [edge_ngram] instead."); + } if (version.onOrAfter(Version.V_7_3_0)) { return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java index 90190e42f2f85..03200d1966ba3 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -19,19 +19,17 @@ package org.elasticsearch.analysis.common; -import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; -import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.VersionUtils; import java.io.IOException; -import java.io.StringReader; import java.util.Map; public class CommonAnalysisPluginTests extends ESTestCase { @@ -51,13 +49,8 @@ public void testNGramFilterInCustomAnalyzerDeprecationError() throws IOException .build(); try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { - Map tokenFilters = createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), - settings, commonAnalysisPlugin).tokenFilter; - TokenFilterFactory tokenFilterFactory = tokenFilters.get("nGram"); - Tokenizer tokenizer = new MockTokenizer(); - tokenizer.setReader(new StringReader("foo bar")); - - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> tokenFilterFactory.create(tokenizer)); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin)); assertEquals("The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + "Please change the filter name to [ngram] instead.", ex.getMessage()); } @@ -69,12 +62,7 @@ public void testNGramFilterInCustomAnalyzerDeprecationError() throws IOException .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram").put("index.analysis.filter.my_ngram.type", "nGram") .build(); try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { - Map tokenFilters = createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), - settingsPre7, commonAnalysisPlugin).tokenFilter; - TokenFilterFactory tokenFilterFactory = tokenFilters.get("nGram"); - Tokenizer tokenizer = new MockTokenizer(); - tokenizer.setReader(new StringReader("foo bar")); - assertNotNull(tokenFilterFactory.create(tokenizer)); + createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), settingsPre7, commonAnalysisPlugin); assertWarnings("The [nGram] token filter name is deprecated and will be removed in a future version. " + "Please change the filter name to [ngram] instead."); } @@ -95,13 +83,8 @@ public void testEdgeNGramFilterInCustomAnalyzerDeprecationError() throws IOExcep .build(); try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { - Map tokenFilters = createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), - settings, commonAnalysisPlugin).tokenFilter; - TokenFilterFactory tokenFilterFactory = tokenFilters.get("edgeNGram"); - Tokenizer tokenizer = new MockTokenizer(); - tokenizer.setReader(new StringReader("foo bar")); - - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> tokenFilterFactory.create(tokenizer)); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin)); assertEquals("The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + "Please change the filter name to [edge_ngram] instead.", ex.getMessage()); } @@ -116,14 +99,88 @@ public void testEdgeNGramFilterInCustomAnalyzerDeprecationError() throws IOExcep .build(); try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { - Map tokenFilters = createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), - settingsPre7, commonAnalysisPlugin).tokenFilter; - TokenFilterFactory tokenFilterFactory = tokenFilters.get("edgeNGram"); - Tokenizer tokenizer = new MockTokenizer(); - tokenizer.setReader(new StringReader("foo bar")); - assertNotNull(tokenFilterFactory.create(tokenizer)); + createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), + settingsPre7, commonAnalysisPlugin); assertWarnings("The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + "Please change the filter name to [edge_ngram] instead."); } } + + /** + * Check that we log a deprecation warning for "nGram" and "edgeNGram" tokenizer names with 7.6 and + * disallow usages for indices created after 8.0 + */ + public void testNGramTokenizerDeprecation() throws IOException { + // tests for prebuilt tokenizer + doTestPrebuiltTokenizerDeprecation("nGram", "ngram", + VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_5_2), false); + doTestPrebuiltTokenizerDeprecation("edgeNGram", "edge_ngram", + VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_5_2), false); + doTestPrebuiltTokenizerDeprecation("nGram", "ngram", + VersionUtils.randomVersionBetween(random(), Version.V_7_6_0, + Version.max(Version.V_7_6_0, VersionUtils.getPreviousVersion(Version.V_8_0_0))), + true); + doTestPrebuiltTokenizerDeprecation("edgeNGram", "edge_ngram", + VersionUtils.randomVersionBetween(random(), Version.V_7_6_0, + Version.max(Version.V_7_6_0, VersionUtils.getPreviousVersion(Version.V_8_0_0))), true); + expectThrows(IllegalArgumentException.class, () -> doTestPrebuiltTokenizerDeprecation("nGram", "ngram", + VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, Version.CURRENT), true)); + expectThrows(IllegalArgumentException.class, () -> doTestPrebuiltTokenizerDeprecation("edgeNGram", "edge_ngram", + VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, Version.CURRENT), true)); + + // same batch of tests for custom tokenizer definition in the settings + doTestCustomTokenizerDeprecation("nGram", "ngram", + VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_5_2), false); + doTestCustomTokenizerDeprecation("edgeNGram", "edge_ngram", + VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_5_2), false); + doTestCustomTokenizerDeprecation("nGram", "ngram", + VersionUtils.randomVersionBetween(random(), Version.V_7_6_0, + Version.max(Version.V_7_6_0, VersionUtils.getPreviousVersion(Version.V_8_0_0))), + true); + doTestCustomTokenizerDeprecation("edgeNGram", "edge_ngram", + VersionUtils.randomVersionBetween(random(), Version.V_7_6_0, + Version.max(Version.V_7_6_0, VersionUtils.getPreviousVersion(Version.V_8_0_0))), true); + expectThrows(IllegalArgumentException.class, () -> doTestCustomTokenizerDeprecation("nGram", "ngram", + VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, Version.CURRENT), true)); + expectThrows(IllegalArgumentException.class, () -> doTestCustomTokenizerDeprecation("edgeNGram", "edge_ngram", + VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, Version.CURRENT), true)); + } + + public void doTestPrebuiltTokenizerDeprecation(String deprecatedName, String replacement, Version version, boolean expectWarning) + throws IOException { + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map tokenizers = createTestAnalysis( + IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin).tokenizer; + TokenizerFactory tokenizerFactory = tokenizers.get(deprecatedName); + + Tokenizer tokenizer = tokenizerFactory.create(); + assertNotNull(tokenizer); + if (expectWarning) { + assertWarnings("The [" + deprecatedName + "] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [" + replacement + "] instead."); + } + } + } + + public void doTestCustomTokenizerDeprecation(String deprecatedName, String replacement, Version version, boolean expectWarning) + throws IOException { + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, version) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "my_tokenizer") + .put("index.analysis.tokenizer.my_tokenizer.type", deprecatedName) + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin); + + if (expectWarning) { + assertWarnings("The [" + deprecatedName + "] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [" + replacement + "] instead."); + } + } + } } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java index 95bf41f8e92c9..e1fbe90647e4b 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java @@ -86,9 +86,11 @@ public void testPreConfiguredTokenizer() throws IOException { } } - // Check deprecated name as well + // Check deprecated name as well, needs version before 8.0 because throws IAE after that { - try (IndexAnalyzers indexAnalyzers = buildAnalyzers(Version.CURRENT, "edgeNGram")) { + try (IndexAnalyzers indexAnalyzers = buildAnalyzers( + VersionUtils.randomVersionBetween(random(), Version.V_7_3_0, VersionUtils.getPreviousVersion(Version.V_8_0_0)), + "edgeNGram")) { NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); assertNotNull(analyzer); assertAnalyzesTo(analyzer, "test", new String[]{"t", "te"}); diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml index a012536c53bb4..bc82b7f1ca7e1 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_pipeline_processor.yml @@ -202,3 +202,81 @@ teardown: } - match: { error.root_cause.0.type: "illegal_state_exception" } - match: { error.root_cause.0.reason: "Pipeline processor configured for non-existent pipeline [legal-department]" } + +--- +"Test _ingest.pipeline metadata": + - do: + ingest.put_pipeline: + id: "pipeline1" + body: > + { + "processors" : [ + { + "append" : { + "field": "pipelines", + "value": "{{_ingest.pipeline}}" + } + }, + { + "pipeline" : { + "name": "another_pipeline" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.put_pipeline: + id: "another_pipeline" + body: > + { + "processors" : [ + { + "append" : { + "field": "pipelines", + "value": "{{_ingest.pipeline}}" + } + }, + { + "pipeline" : { + "name": "another_pipeline2" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.put_pipeline: + id: "another_pipeline2" + body: > + { + "processors" : [ + { + "append" : { + "field": "pipelines", + "value": "{{_ingest.pipeline}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "pipeline1" + body: > + { + } + + - do: + get: + index: test + id: 1 + - length: { _source.pipelines: 3 } + - match: { _source.pipelines.0: "pipeline1" } + - match: { _source.pipelines.1: "another_pipeline" } + - match: { _source.pipelines.2: "another_pipeline2" } diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/90_simulate.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/90_simulate.yml index 456a2ba15dd4c..6203326e1c2f9 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/90_simulate.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/90_simulate.yml @@ -284,26 +284,30 @@ teardown: - length: { docs.0.processor_results.0.doc._source: 2 } - match: { docs.0.processor_results.0.doc._source.foo.bar.0.item: "HELLO" } - match: { docs.0.processor_results.0.doc._source.field2.value: "_value" } - - length: { docs.0.processor_results.0.doc._ingest: 1 } + - length: { docs.0.processor_results.0.doc._ingest: 2 } - is_true: docs.0.processor_results.0.doc._ingest.timestamp + - is_true: docs.0.processor_results.0.doc._ingest.pipeline - length: { docs.0.processor_results.1.doc._source: 3 } - match: { docs.0.processor_results.1.doc._source.foo.bar.0.item: "HELLO" } - match: { docs.0.processor_results.1.doc._source.field2.value: "_value" } - match: { docs.0.processor_results.1.doc._source.field3: "third_val" } - - length: { docs.0.processor_results.1.doc._ingest: 1 } + - length: { docs.0.processor_results.1.doc._ingest: 2 } - is_true: docs.0.processor_results.1.doc._ingest.timestamp + - is_true: docs.0.processor_results.1.doc._ingest.pipeline - length: { docs.0.processor_results.2.doc._source: 3 } - match: { docs.0.processor_results.2.doc._source.foo.bar.0.item: "HELLO" } - match: { docs.0.processor_results.2.doc._source.field2.value: "_VALUE" } - match: { docs.0.processor_results.2.doc._source.field3: "third_val" } - - length: { docs.0.processor_results.2.doc._ingest: 1 } + - length: { docs.0.processor_results.2.doc._ingest: 2 } - is_true: docs.0.processor_results.2.doc._ingest.timestamp + - is_true: docs.0.processor_results.2.doc._ingest.pipeline - length: { docs.0.processor_results.3.doc._source: 3 } - match: { docs.0.processor_results.3.doc._source.foo.bar.0.item: "hello" } - match: { docs.0.processor_results.3.doc._source.field2.value: "_VALUE" } - match: { docs.0.processor_results.3.doc._source.field3: "third_val" } - - length: { docs.0.processor_results.3.doc._ingest: 1 } + - length: { docs.0.processor_results.3.doc._ingest: 2 } - is_true: docs.0.processor_results.3.doc._ingest.timestamp + - is_true: docs.0.processor_results.3.doc._ingest.pipeline --- "Test simulate with exception thrown": @@ -393,12 +397,14 @@ teardown: - match: { docs.1.processor_results.0.doc._index: "index" } - match: { docs.1.processor_results.0.doc._source.foo: 5 } - match: { docs.1.processor_results.0.doc._source.bar: "hello" } - - length: { docs.1.processor_results.0.doc._ingest: 1 } + - length: { docs.1.processor_results.0.doc._ingest: 2 } - is_true: docs.1.processor_results.0.doc._ingest.timestamp + - is_true: docs.1.processor_results.0.doc._ingest.pipeline - match: { docs.1.processor_results.1.doc._source.foo: 5 } - match: { docs.1.processor_results.1.doc._source.bar: "HELLO" } - - length: { docs.1.processor_results.1.doc._ingest: 1 } + - length: { docs.1.processor_results.1.doc._ingest: 2 } - is_true: docs.1.processor_results.1.doc._ingest.timestamp + - is_true: docs.1.processor_results.1.doc._ingest.pipeline --- "Test verbose simulate with on_failure": diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldMapperTests.java index 04ac52376d4c9..165dbe66f5f66 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldMapperTests.java @@ -64,11 +64,11 @@ static int getFrequency(TokenStream tk) throws IOException { } public void testDefaults() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "rank_feature").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -99,12 +99,12 @@ public void testDefaults() throws Exception { } public void testNegativeScoreImpact() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "rank_feature") .field("positive_score_impact", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -135,12 +135,12 @@ public void testNegativeScoreImpact() throws Exception { } public void testRejectMultiValuedFields() throws MapperParsingException, IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "rank_feature").endObject().startObject("foo") .startObject("properties").startObject("field").field("type", "rank_feature").endObject().endObject() .endObject().endObject().endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldMapperTests.java index f293e10f9e9ea..46c2316aa161e 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldMapperTests.java @@ -44,13 +44,13 @@ public void setup() { protected Collection> getPlugins() { return pluginList(MapperExtrasPlugin.class); } - + public void testBasics() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "rank_feature").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); assertNotNull(mapper.metadataMapper(RankFeatureMetaFieldMapper.class)); diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapperTests.java index f6dd518191e2c..ea8f2ed16f81b 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapperTests.java @@ -53,11 +53,11 @@ protected Collection> getPlugins() { } public void testDefaults() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "rank_features").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -85,12 +85,12 @@ public void testDefaults() throws Exception { } public void testRejectMultiValuedFields() throws MapperParsingException, IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "rank_features").endObject().startObject("foo") .startObject("properties").startObject("field").field("type", "rank_features").endObject().endObject() .endObject().endObject().endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java index 94ae0cee76bb7..c5c7358c8220f 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -58,12 +58,12 @@ protected Collection> getPlugins() { } public void testDefaults() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "scaled_float") .field("scaling_factor", 10.0).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -108,12 +108,12 @@ public void testIllegalScalingFactor() throws IOException { } public void testNotIndexed() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "scaled_float") .field("index", false).field("scaling_factor", 10.0).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -132,12 +132,12 @@ public void testNotIndexed() throws Exception { } public void testNoDocValues() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "scaled_float") .field("doc_values", false).field("scaling_factor", 10.0).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -156,12 +156,12 @@ public void testNoDocValues() throws Exception { } public void testStore() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "scaled_float") .field("store", true).field("scaling_factor", 10.0).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -185,12 +185,12 @@ public void testStore() throws Exception { } public void testCoerce() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "scaled_float") .field("scaling_factor", 10.0).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -209,12 +209,12 @@ public void testCoerce() throws Exception { IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "scaled_float") .field("scaling_factor", 10.0).field("coerce", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper2 = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper2.mappingSource().toString()); @@ -238,12 +238,12 @@ public void testIgnoreMalformed() throws Exception { } private void doTestIgnoreMalformed(String value, String exceptionMessageContains) throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "scaled_float") .field("scaling_factor", 10.0).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -256,12 +256,12 @@ private void doTestIgnoreMalformed(String value, String exceptionMessageContains MapperParsingException e = expectThrows(MapperParsingException.class, runnable); assertThat(e.getCause().getMessage(), containsString(exceptionMessageContains)); - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "scaled_float") .field("scaling_factor", 10.0).field("ignore_malformed", true).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper2 = parser.parse("_doc", new CompressedXContent(mapping)); ParsedDocument doc = mapper2.parse(new SourceToParse("test", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -276,7 +276,7 @@ private void doTestIgnoreMalformed(String value, String exceptionMessageContains public void testNullValue() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "scaled_float") @@ -285,7 +285,7 @@ public void testNullValue() throws IOException { .endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", BytesReference @@ -297,7 +297,7 @@ public void testNullValue() throws IOException { assertArrayEquals(new IndexableField[0], doc.rootDoc().getFields("field")); mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "scaled_float") @@ -307,7 +307,7 @@ public void testNullValue() throws IOException { .endObject() .endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); doc = mapper.parse(new SourceToParse("test", "1", BytesReference diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java index 74683d8135ed7..a10459abfdce3 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java @@ -143,7 +143,6 @@ public void testXContentParsingIsNotLenient() throws IOException { exception = exception.getCause(); } assertThat(exception.getMessage(), containsString("unknown field")); - assertThat(exception.getMessage(), containsString("parser not found")); } } diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java index e4db71bc07bb7..630113daf621f 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java @@ -64,11 +64,11 @@ public void setup() { } public void testDefaults() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", FIELD_TYPE).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -105,11 +105,11 @@ public void testDefaults() throws Exception { } public void testNullValue() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", FIELD_TYPE).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", BytesReference @@ -120,12 +120,12 @@ public void testNullValue() throws IOException { XContentType.JSON)); assertArrayEquals(new IndexableField[0], doc.rootDoc().getFields("field")); - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", FIELD_TYPE) .field("null_value", "1234").endObject().endObject() .endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -155,12 +155,12 @@ public void testNullValue() throws IOException { } public void testEnableStore() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", FIELD_TYPE) .field("store", true).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -177,12 +177,12 @@ public void testEnableStore() throws IOException { } public void testDisableIndex() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", FIELD_TYPE) .field("index", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -200,12 +200,12 @@ public void testDisableIndex() throws IOException { } public void testDisableDocValues() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", FIELD_TYPE) .field("doc_values", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -222,11 +222,11 @@ public void testDisableDocValues() throws IOException { } public void testMultipleValues() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", FIELD_TYPE).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -284,12 +284,12 @@ public void testMultipleValues() throws IOException { } public void testIndexOptions() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", FIELD_TYPE) .field("index_options", "freqs").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -317,12 +317,12 @@ public void testIndexOptions() throws IOException { } public void testEnableNorms() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", FIELD_TYPE) .field("norms", true).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -339,14 +339,14 @@ public void testEnableNorms() throws IOException { } public void testCollator() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", FIELD_TYPE) .field("language", "tr") .field("strength", "primary") .endObject().endObject().endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -406,12 +406,12 @@ public void testUpdateCollator() throws IOException { public void testIgnoreAbove() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", FIELD_TYPE) .field("ignore_above", 5).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java b/plugins/examples/custom-significance-heuristic/build.gradle similarity index 64% rename from server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java rename to plugins/examples/custom-significance-heuristic/build.gradle index d5180c799ade7..4aebd4ac31150 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java +++ b/plugins/examples/custom-significance-heuristic/build.gradle @@ -16,18 +16,13 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' +apply plugin: 'elasticsearch.esplugin' -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.ActionType; - - -public class SyncedFlushAction extends ActionType { - - public static final SyncedFlushAction INSTANCE = new SyncedFlushAction(); - public static final String NAME = "indices:admin/synced_flush"; - - private SyncedFlushAction() { - super(NAME, SyncedFlushResponse::new); - } +esplugin { + name 'custom-significance-heuristic' + description 'An example plugin showing how to write and register a custom significance heuristic' + classname 'org.elasticsearch.example.customsigheuristic.CustomSignificanceHeuristicPlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } diff --git a/plugins/examples/custom-significance-heuristic/src/main/java/org/elasticsearch/example/customsigheuristic/CustomSignificanceHeuristicPlugin.java b/plugins/examples/custom-significance-heuristic/src/main/java/org/elasticsearch/example/customsigheuristic/CustomSignificanceHeuristicPlugin.java new file mode 100644 index 0000000000000..f0e5ae2a313e7 --- /dev/null +++ b/plugins/examples/custom-significance-heuristic/src/main/java/org/elasticsearch/example/customsigheuristic/CustomSignificanceHeuristicPlugin.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.example.customsigheuristic; + +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; + +import java.util.List; + +import static java.util.Collections.singletonList; + +/** + * Plugin declaring a custom {@link SignificanceHeuristic}. + */ +public class CustomSignificanceHeuristicPlugin extends Plugin implements SearchPlugin { + @Override + public List> getSignificanceHeuristics() { + return singletonList(new SignificanceHeuristicSpec<>(SimpleHeuristic.NAME, SimpleHeuristic::new, SimpleHeuristic.PARSER)); + } +} diff --git a/plugins/examples/custom-significance-heuristic/src/main/java/org/elasticsearch/example/customsigheuristic/SimpleHeuristic.java b/plugins/examples/custom-significance-heuristic/src/main/java/org/elasticsearch/example/customsigheuristic/SimpleHeuristic.java new file mode 100644 index 0000000000000..0f32330190f37 --- /dev/null +++ b/plugins/examples/custom-significance-heuristic/src/main/java/org/elasticsearch/example/customsigheuristic/SimpleHeuristic.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.example.customsigheuristic; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; + +import java.io.IOException; + +/** + * A simple {@linkplain SignificanceHeuristic} used an example of declaring a custom heuristic. + */ +public class SimpleHeuristic extends SignificanceHeuristic { + public static final String NAME = "simple"; + public static final ObjectParser PARSER = new ObjectParser<>(NAME, SimpleHeuristic::new); + + public SimpleHeuristic() { + } + + /** + * Read from a stream. + */ + public SimpleHeuristic(StreamInput in) throws IOException { + // Nothing to read + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // Nothing to write + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME).endObject(); + return builder; + } + + @Override + public int hashCode() { + return 1; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + return true; + } + + /** + * @param subsetFreq The frequency of the term in the selected sample + * @param subsetSize The size of the selected sample (typically number of docs) + * @param supersetFreq The frequency of the term in the superset from which the sample was taken + * @param supersetSize The size of the superset from which the sample was taken (typically number of docs) + * @return a "significance" score + */ + @Override + public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) { + return subsetFreq / subsetSize > supersetFreq / supersetSize ? 2.0 : 1.0; + } +} \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java b/plugins/examples/custom-significance-heuristic/src/test/java/org/elasticsearch/example/customsigheuristic/CustomSignificanceHeuristicClientYamlTestSuiteIT.java similarity index 50% rename from server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java rename to plugins/examples/custom-significance-heuristic/src/test/java/org/elasticsearch/example/customsigheuristic/CustomSignificanceHeuristicClientYamlTestSuiteIT.java index aee7c4688bb6c..b9ed2dda74d83 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java +++ b/plugins/examples/custom-significance-heuristic/src/test/java/org/elasticsearch/example/customsigheuristic/CustomSignificanceHeuristicClientYamlTestSuiteIT.java @@ -17,25 +17,20 @@ * under the License. */ -package org.elasticsearch.action.admin.indices.flush; +package org.elasticsearch.example.customsigheuristic; -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.ElasticsearchClient; +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; -public class SyncedFlushRequestBuilder extends ActionRequestBuilder { - - public SyncedFlushRequestBuilder(ElasticsearchClient client, SyncedFlushAction action) { - super(client, action, new SyncedFlushRequest()); - } - - public SyncedFlushRequestBuilder setIndices(String[] indices) { - super.request().indices(indices); - return this; +public class CustomSignificanceHeuristicClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + public CustomSignificanceHeuristicClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); } - public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { - super.request().indicesOptions(indicesOptions); - return this; + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); } } diff --git a/plugins/examples/custom-significance-heuristic/src/test/java/org/elasticsearch/example/customsigheuristic/SimpleHeuristicWireTests.java b/plugins/examples/custom-significance-heuristic/src/test/java/org/elasticsearch/example/customsigheuristic/SimpleHeuristicWireTests.java new file mode 100644 index 0000000000000..e43ed8be5ba5a --- /dev/null +++ b/plugins/examples/custom-significance-heuristic/src/test/java/org/elasticsearch/example/customsigheuristic/SimpleHeuristicWireTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.example.customsigheuristic; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class SimpleHeuristicWireTests extends AbstractSerializingTestCase { + @Override + protected SimpleHeuristic doParseInstance(XContentParser parser) throws IOException { + /* Because Heuristics are XContent "fragments" we need to throw away + * the "extra" stuff before calling the parser. */ + parser.nextToken(); + assertThat(parser.currentToken(), equalTo(Token.START_OBJECT)); + parser.nextToken(); + assertThat(parser.currentToken(), equalTo(Token.FIELD_NAME)); + assertThat(parser.currentName(), equalTo("simple")); + parser.nextToken(); + SimpleHeuristic h = SimpleHeuristic.PARSER.apply(parser, null); + assertThat(parser.currentToken(), equalTo(Token.END_OBJECT)); + parser.nextToken(); + return h; + } + + @Override + protected Reader instanceReader() { + return SimpleHeuristic::new; + } + + @Override + protected SimpleHeuristic createTestInstance() { + return new SimpleHeuristic(); + } +} diff --git a/plugins/examples/custom-significance-heuristic/src/test/resources/rest-api-spec/test/custom-significance-heuristic/10_basic.yml b/plugins/examples/custom-significance-heuristic/src/test/resources/rest-api-spec/test/custom-significance-heuristic/10_basic.yml new file mode 100644 index 0000000000000..620aa393a6b5b --- /dev/null +++ b/plugins/examples/custom-significance-heuristic/src/test/resources/rest-api-spec/test/custom-significance-heuristic/10_basic.yml @@ -0,0 +1,16 @@ +# tests that the custom suggester plugin is installed +--- +"plugin loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains + + # Get master node id + - do: + cluster.state: {} + - set: { master_node: master } + + - do: + nodes.info: {} + + - contains: { nodes.$master.plugins: { name: custom-significance-heuristic } } diff --git a/plugins/examples/custom-significance-heuristic/src/test/resources/rest-api-spec/test/custom-significance-heuristic/20_custom_heuristic.yml b/plugins/examples/custom-significance-heuristic/src/test/resources/rest-api-spec/test/custom-significance-heuristic/20_custom_heuristic.yml new file mode 100644 index 0000000000000..262c91403bfdb --- /dev/null +++ b/plugins/examples/custom-significance-heuristic/src/test/resources/rest-api-spec/test/custom-significance-heuristic/20_custom_heuristic.yml @@ -0,0 +1,121 @@ +setup: + - do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: text + fielddata: true + long: + type: long + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {"_id": "1"}}' + - '{"text": "foo", "long": 1, "class": 1}' + - '{"index": {"_id": "2"}}' + - '{"text": "foo", "long": 1, "class": 1}' + - '{"index": {"_id": "3"}}' + - '{"text": "bar", "long": 0, "class": 0}' + - '{"index": {"_id": "4"}}' + - '{"text": "bar", "long": 0, "class": 0}' + - '{"index": {"_id": "5"}}' + - '{"text": ["foo", "bar"], "long": [1, 0], "class": 1}' + - '{"index": {"_id": "6"}}' + - '{"text": ["foo", "bar"], "long": [1, 0], "class": 0}' + - '{"index": {"_id": "7"}}' + - '{"text": "bar", "long": 0, "class": 0}' + +--- +"test custom heuristic on significant_text": + - do: + search: + index: test + size: 0 + body: + aggs: + class: + terms: + field: class + order: { _key: asc } + aggs: + sig: + significant_text: + field: text + simple: {} + min_doc_count: 1 + + - match: { aggregations.class.buckets.0.key: 0 } + - match: { aggregations.class.buckets.0.sig.buckets.0.key: bar } + - match: { aggregations.class.buckets.0.sig.buckets.0.score: 2.0 } + - match: { aggregations.class.buckets.0.sig.buckets.1.key: foo } + - match: { aggregations.class.buckets.0.sig.buckets.1.score: 1.0 } + - match: { aggregations.class.buckets.1.key: 1 } + - match: { aggregations.class.buckets.1.sig.buckets.0.key: foo } + - match: { aggregations.class.buckets.1.sig.buckets.0.score: 2.0 } + - match: { aggregations.class.buckets.1.sig.buckets.1.key: bar } + - match: { aggregations.class.buckets.1.sig.buckets.1.score: 1.0 } + +--- +"test custom heuristic on text significant_terms": + - do: + search: + index: test + size: 0 + body: + aggs: + class: + terms: + field: class + order: { _key: asc } + aggs: + sig: + significant_terms: + field: text + simple: {} + min_doc_count: 1 + + - match: { aggregations.class.buckets.0.key: 0 } + - match: { aggregations.class.buckets.0.sig.buckets.0.key: bar } + - match: { aggregations.class.buckets.0.sig.buckets.0.score: 2.0 } + - match: { aggregations.class.buckets.0.sig.buckets.1.key: foo } + - match: { aggregations.class.buckets.0.sig.buckets.1.score: 1.0 } + - match: { aggregations.class.buckets.1.key: 1 } + - match: { aggregations.class.buckets.1.sig.buckets.0.key: foo } + - match: { aggregations.class.buckets.1.sig.buckets.0.score: 2.0 } + - match: { aggregations.class.buckets.1.sig.buckets.1.key: bar } + - match: { aggregations.class.buckets.1.sig.buckets.1.score: 1.0 } + +--- +"test custom heuristic on long significant_terms": + - do: + search: + index: test + size: 0 + body: + aggs: + class: + terms: + field: class + order: { _key: asc } + aggs: + sig: + significant_terms: + field: long + simple: {} + min_doc_count: 1 + + - match: { aggregations.class.buckets.0.key: 0 } + - match: { aggregations.class.buckets.0.sig.buckets.0.key: 0 } + - match: { aggregations.class.buckets.0.sig.buckets.0.score: 2.0 } + - match: { aggregations.class.buckets.0.sig.buckets.1.key: 1 } + - match: { aggregations.class.buckets.0.sig.buckets.1.score: 1.0 } + - match: { aggregations.class.buckets.1.key: 1 } + - match: { aggregations.class.buckets.1.sig.buckets.0.key: 1 } + - match: { aggregations.class.buckets.1.sig.buckets.0.score: 2.0 } + - match: { aggregations.class.buckets.1.sig.buckets.1.key: 0 } + - match: { aggregations.class.buckets.1.sig.buckets.1.score: 1.0 } diff --git a/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java b/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java index 14f4d4f8ba326..b459c91283585 100644 --- a/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java +++ b/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java @@ -64,7 +64,7 @@ public ExampleRescoreBuilder(float factor, @Nullable String factorField) { this.factorField = factorField; } - ExampleRescoreBuilder(StreamInput in) throws IOException { + public ExampleRescoreBuilder(StreamInput in) throws IOException { super(in); factor = in.readFloat(); factorField = in.readOptionalString(); diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java index 5acc8c9a82280..885bf834ab23d 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java @@ -252,11 +252,11 @@ public void testAgainstTermVectorsAPI() throws IOException { // ===== Code below copied from TextFieldMapperTests ======== public void testDefaults() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", getFieldType()).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -284,11 +284,11 @@ public void testDefaults() throws IOException { } public void testEnableStore() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", getFieldType()).field("store", true).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -305,14 +305,14 @@ public void testEnableStore() throws IOException { } public void testDisableNorms() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", getFieldType()) .field("norms", false) .endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -360,11 +360,11 @@ public void testIndexOptions() throws IOException { } public void testDefaultPositionIncrementGap() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", getFieldType()).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = indexService.mapperService().merge("type", + DocumentMapper mapper = indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); @@ -399,14 +399,14 @@ public void testDefaultPositionIncrementGap() throws IOException { public void testPositionIncrementGap() throws IOException { final int positionIncrementGap = randomIntBetween(1, 1000); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", getFieldType()) .field("position_increment_gap", positionIncrementGap) .endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = indexService.mapperService().merge("type", + DocumentMapper mapper = indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); @@ -440,7 +440,7 @@ public void testPositionIncrementGap() throws IOException { } public void testSearchAnalyzerSerialization() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", getFieldType()) @@ -449,11 +449,11 @@ public void testSearchAnalyzerSerialization() throws IOException { .endObject() .endObject().endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); // special case: default index analyzer - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", getFieldType()) @@ -462,10 +462,10 @@ public void testSearchAnalyzerSerialization() throws IOException { .endObject() .endObject().endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", getFieldType()) @@ -473,11 +473,11 @@ public void testSearchAnalyzerSerialization() throws IOException { .endObject() .endObject().endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); // special case: default search analyzer - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", getFieldType()) @@ -486,17 +486,17 @@ public void testSearchAnalyzerSerialization() throws IOException { .endObject() .endObject().endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", getFieldType()) .field("analyzer", "keyword") .endObject() .endObject().endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); @@ -510,7 +510,7 @@ public void testSearchAnalyzerSerialization() throws IOException { } public void testSearchQuoteAnalyzerSerialization() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", getFieldType()) @@ -520,11 +520,11 @@ public void testSearchQuoteAnalyzerSerialization() throws IOException { .endObject() .endObject().endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); // special case: default index/search analyzer - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", getFieldType()) @@ -534,7 +534,7 @@ public void testSearchQuoteAnalyzerSerialization() throws IOException { .endObject() .endObject().endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); } diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 21ae48e9c77d4..427383d7d8292 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -23,7 +23,6 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; @@ -679,16 +678,10 @@ public void testRecovery() throws Exception { flushRequest.addParameter("force", "true"); flushRequest.addParameter("wait_if_ongoing", "true"); assertOK(client().performRequest(flushRequest)); - if (randomBoolean()) { - // We had a bug before where we failed to perform peer recovery with sync_id from 5.x to 6.x. - // We added this synced flush so we can exercise different paths of recovery code. - try { - client().performRequest(new Request("POST", index + "/_flush/synced")); - } catch (ResponseException ignored) { - // synced flush is optional here - } + syncedFlush(index); } + if (shouldHaveTranslog) { // Update a few documents so we are sure to have a translog indexRandomDocuments( diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java index 3acefce8e4e8b..d951c4d0c56eb 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -21,13 +21,17 @@ import org.apache.http.HttpHost; import org.elasticsearch.Version; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ObjectPath; @@ -38,6 +42,7 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; public class IndexingIT extends ESRestTestCase { @@ -274,6 +279,57 @@ public void testUpdateSnapshotStatus() throws Exception { request.setJsonEntity("{\"indices\": \"" + index + "\"}"); } + public void testSyncedFlushTransition() throws Exception { + Nodes nodes = buildNodeAndVersions(); + assertTrue("bwc version is on 7.x", nodes.getBWCVersion().before(Version.V_8_0_0)); + assumeFalse("no new node found", nodes.getNewNodes().isEmpty()); + assumeFalse("no bwc node found", nodes.getBWCNodes().isEmpty()); + // Allocate shards to new nodes then verify synced flush requests processed by old nodes/new nodes + String newNodes = nodes.getNewNodes().stream().map(Node::getNodeName).collect(Collectors.joining(",")); + int numShards = randomIntBetween(1, 10); + int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1); + int totalShards = numShards * (numOfReplicas + 1); + final String index = "test_synced_flush"; + createIndex(index, Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) + .put("index.routing.allocation.include._name", newNodes).build()); + ensureGreen(index); + indexDocs(index, randomIntBetween(0, 100), between(1, 100)); + try (RestClient oldNodeClient = buildClient(restClientSettings(), + nodes.getBWCNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { + Request request = new Request("POST", index + "/_flush/synced"); + assertBusy(() -> { + ResponseException responseException = expectThrows(ResponseException.class, () -> oldNodeClient.performRequest(request)); + assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus())); + assertThat(responseException.getResponse().getWarnings(), + contains("Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead.")); + Map result = ObjectPath.createFromResponse(responseException.getResponse()).evaluate("_shards"); + assertThat(result.get("total"), equalTo(totalShards)); + assertThat(result.get("successful"), equalTo(0)); + assertThat(result.get("failed"), equalTo(totalShards)); + }); + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + assertThat(XContentMapValues.extractValue("indices." + index + ".total.translog.uncommitted_operations", stats), equalTo(0)); + } + indexDocs(index, randomIntBetween(0, 100), between(1, 100)); + try (RestClient newNodeClient = buildClient(restClientSettings(), + nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { + Request request = new Request("POST", index + "/_flush/synced"); + List warningMsg = List.of("Synced flush was removed and a normal flush was performed instead. " + + "This transition will be removed in a future version."); + request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> warnings.equals(warningMsg) == false)); + assertBusy(() -> { + Map result = ObjectPath.createFromResponse(newNodeClient.performRequest(request)).evaluate("_shards"); + assertThat(result.get("total"), equalTo(totalShards)); + assertThat(result.get("successful"), equalTo(totalShards)); + assertThat(result.get("failed"), equalTo(0)); + }); + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + assertThat(XContentMapValues.extractValue("indices." + index + ".total.translog.uncommitted_operations", stats), equalTo(0)); + } + } + private void assertCount(final String index, final String preference, final int expectedCount) throws IOException { Request request = new Request("GET", index + "/_count"); request.addParameter("preference", preference); diff --git a/qa/os/bats/default/10_basic.bats b/qa/os/bats/default/10_basic.bats deleted file mode 100644 index b3683b5dd158e..0000000000000 --- a/qa/os/bats/default/10_basic.bats +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bats - -# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -# or more contributor license agreements. Licensed under the Elastic License; -# you may not use this file except in compliance with the Elastic License. - -# This file is used to test the X-Pack package. - -# WARNING: This testing file must be executed as root and can -# dramatically change your system. It removes the 'elasticsearch' -# user/group and also many directories. Do not execute this file -# unless you know exactly what you are doing. - -load $BATS_UTILS/utils.bash -load $BATS_UTILS/tar.bash -load $BATS_UTILS/plugins.bash -load $BATS_UTILS/xpack.bash - -setup() { - skip_not_tar_gz - export ESHOME=/tmp/elasticsearch - export PACKAGE_NAME="elasticsearch" - export_elasticsearch_paths - export ESPLUGIN_COMMAND_USER=elasticsearch -} - -@test "[X-PACK] install default distribution" { - # Cleans everything for the 1st execution - clean_before_test - - # Install the archive - install_archive - set_debug_logging -} - -@test "[X-PACK] verify x-pack installation" { - verify_xpack_installation -} - -@test "[X-PACK] verify croneval works" { - run $ESHOME/bin/elasticsearch-croneval "0 0 20 ? * MON-THU" -c 2 - [ "$status" -eq 0 ] - [[ "$output" == *"Valid!"* ]] || { - echo "Expected output message to contain [Valid!] but found: $output" - false - } -} diff --git a/qa/os/bats/default/40_tar_certgen.bats b/qa/os/bats/default/40_tar_certgen.bats deleted file mode 120000 index c9a929d829edd..0000000000000 --- a/qa/os/bats/default/40_tar_certgen.bats +++ /dev/null @@ -1 +0,0 @@ -certgen.bash \ No newline at end of file diff --git a/qa/os/bats/default/45_package_certgen.bats b/qa/os/bats/default/45_package_certgen.bats deleted file mode 120000 index c9a929d829edd..0000000000000 --- a/qa/os/bats/default/45_package_certgen.bats +++ /dev/null @@ -1 +0,0 @@ -certgen.bash \ No newline at end of file diff --git a/qa/os/bats/default/certgen.bash b/qa/os/bats/default/certgen.bash deleted file mode 100644 index 0f637f7c34090..0000000000000 --- a/qa/os/bats/default/certgen.bash +++ /dev/null @@ -1,436 +0,0 @@ -#!/usr/bin/env bats - -# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -# or more contributor license agreements. Licensed under the Elastic License; -# you may not use this file except in compliance with the Elastic License. - -load $BATS_UTILS/utils.bash -load $BATS_UTILS/plugins.bash -load $BATS_UTILS/xpack.bash - -# Description of the nodes instances -instances="/tmp/instances.yml" - -# Destination for generated certificates -certificates="/tmp/certificates.zip" - -setup() { - if [ $BATS_TEST_NUMBER == 1 ]; then - clean_before_test - fi -} - -DEFAULT_ARCHIVE_USER=elasticsearch -DEFAULT_ARCHIVE_ESHOME="/tmp/elasticsearch" -DEFAULT_ARCHIVE_UTILS=$BATS_UTILS/tar.bash - -DEFAULT_PACKAGE_USER=root -DEFAULT_PACKAGE_ESHOME="/usr/share/elasticsearch" -DEFAULT_PACKAGE_UTILS=$BATS_UTILS/packages.bash - -if [[ "$BATS_TEST_FILENAME" =~ 40_tar_certgen.bats$ ]]; then - GROUP='TAR CERTGEN' - - MASTER_USER=$DEFAULT_ARCHIVE_USER - MASTER_GROUP=$DEFAULT_ARCHIVE_USER - MASTER_DPERMS=755 - MASTER_HOME=$DEFAULT_ARCHIVE_ESHOME - MASTER_UTILS=$DEFAULT_ARCHIVE_UTILS - - DATA_USER=$DEFAULT_PACKAGE_USER - DATA_GROUP=elasticsearch - DATA_DPERMS=2755 - DATA_HOME=$DEFAULT_PACKAGE_ESHOME - DATA_UTILS=$DEFAULT_PACKAGE_UTILS - - install_master_node() { - install_node_using_archive - } - start_master_node() { - start_node_using_archive - } - install_data_node() { - install_node_using_package - } - start_data_node() { - start_node_using_package - } -else - if is_rpm; then - GROUP='RPM CERTGEN' - elif is_dpkg; then - GROUP='DEB CERTGEN' - fi - - MASTER_USER=$DEFAULT_PACKAGE_USER - MASTER_GROUP=elasticsearch - MASTER_DPERMS=2755 - MASTER_HOME=$DEFAULT_PACKAGE_ESHOME - MASTER_UTILS=$DEFAULT_PACKAGE_UTILS - - DATA_USER=$DEFAULT_ARCHIVE_USER - DATA_GROUP=$DEFAULT_ARCHIVE_USER - DATA_DPERMS=755 - DATA_HOME=$DEFAULT_ARCHIVE_ESHOME - DATA_UTILS=$DEFAULT_ARCHIVE_UTILS - - install_master_node() { - install_node_using_package - } - start_master_node() { - start_node_using_package - } - install_data_node() { - install_node_using_archive - } - start_data_node() { - start_node_using_archive - } -fi - -# Install a node with x-pack using the archive file -install_node_using_archive() { - load $BATS_UTILS/tar.bash - export ESHOME="$DEFAULT_ARCHIVE_ESHOME" - export_elasticsearch_paths - - assert_file_not_exist "/home/elasticsearch" - install_archive - set_debug_logging - verify_archive_installation - assert_file_not_exist "/home/elasticsearch" - - export ESPLUGIN_COMMAND_USER=$DEFAULT_ARCHIVE_USER - generate_trial_license - verify_xpack_installation -} - -# Starts a node installed using the archive -start_node_using_archive() { - load $BATS_UTILS/tar.bash - export ESHOME="$DEFAULT_ARCHIVE_ESHOME" - export_elasticsearch_paths - - run sudo -u $DEFAULT_ARCHIVE_USER "$ESHOME/bin/elasticsearch" -d -p $ESHOME/elasticsearch.pid - [ "$status" -eq "0" ] || { - echo "Failed to start node using archive: $output" - false - } -} - -# Install a node with x-pack using a package file -install_node_using_package() { - load $BATS_UTILS/packages.bash - export ESHOME="$DEFAULT_PACKAGE_ESHOME" - export_elasticsearch_paths - - assert_file_not_exist "/home/elasticsearch" - install_package - set_debug_logging - verify_package_installation - - export ESPLUGIN_COMMAND_USER=$DEFAULT_PACKAGE_USER - generate_trial_license - verify_xpack_installation -} - -# Starts a node installed using a package -start_node_using_package() { - if is_systemd; then - run systemctl daemon-reload - [ "$status" -eq 0 ] - - run sudo systemctl start elasticsearch.service - [ "$status" -eq "0" ] - - elif is_sysvinit; then - run sudo service elasticsearch start - [ "$status" -eq "0" ] - fi -} - - -@test "[$GROUP] install master node" { - install_master_node -} - -@test "[$GROUP] add bootstrap password" { - load $MASTER_UTILS - export ESHOME="$MASTER_HOME" - export_elasticsearch_paths - - # For the sake of simplicity we use a bootstrap password in this test. The - # alternative would be to start the master node, use - # elasticsearch-setup-passwords and restart the node once ssl/tls is - # configured. Or use elasticsearch-setup-passwords over HTTPS with the right - # cacerts imported into a Java keystore. - run sudo -E -u $MASTER_USER bash <<"NEW_PASS" -if [[ ! -f $ESCONFIG/elasticsearch.keystore ]]; then - $ESHOME/bin/elasticsearch-keystore create -fi -echo "changeme" | $ESHOME/bin/elasticsearch-keystore add --stdin bootstrap.password -NEW_PASS - [ "$status" -eq 0 ] || { - echo "Expected elasticsearch-keystore tool exit code to be zero" - echo "$output" - false - } -} - -@test "[$GROUP] create instances file" { - rm -f /tmp/instances.yml - run sudo -E -u $MASTER_USER bash <<"CREATE_INSTANCES_FILE" -cat > /tmp/instances.yml <<- EOF -instances: - - name: "node-master" - ip: - - "127.0.0.1" - - name: "node-data" - ip: - - "127.0.0.1" -EOF -CREATE_INSTANCES_FILE - - [ "$status" -eq 0 ] || { - echo "Failed to create instances file [$instances]: $output" - false - } -} - -@test "[$GROUP] create certificates" { - if [[ -f "$certificates" ]]; then - sudo rm -f "$certificates" - fi - - run sudo -E -u $MASTER_USER "$MASTER_HOME/bin/elasticsearch-certgen" --in "$instances" --out "$certificates" - [ "$status" -eq 0 ] || { - echo "Expected elasticsearch-certgen tool exit code to be zero" - echo "$output" - false - } - - echo "$output" | grep "Certificates written to $certificates" - assert_file "$certificates" f $MASTER_USER $MASTER_USER 600 -} - -@test "[$GROUP] install certificates on master node" { - load $MASTER_UTILS - export ESHOME="$MASTER_HOME" - export_elasticsearch_paths - - certs="$ESCONFIG/certs" - if [[ -d "$certs" ]]; then - sudo rm -rf "$certs" - fi - - run sudo -E -u $MASTER_USER "unzip" $certificates -d $certs - [ "$status" -eq 0 ] || { - echo "Failed to unzip certificates in $certs: $output" - false - } - - assert_file "$certs/ca/ca.key" f $MASTER_USER $MASTER_GROUP 644 - assert_file "$certs/ca/ca.crt" f $MASTER_USER $MASTER_GROUP 644 - - assert_file "$certs/node-master" d $MASTER_USER $MASTER_GROUP $MASTER_DPERMS - assert_file "$certs/node-master/node-master.key" f $MASTER_USER $MASTER_GROUP 644 - assert_file "$certs/node-master/node-master.crt" f $MASTER_USER $MASTER_GROUP 644 - - assert_file "$certs/node-data" d $MASTER_USER $MASTER_GROUP $MASTER_DPERMS - assert_file "$certs/node-data/node-data.key" f $MASTER_USER $MASTER_GROUP 644 - assert_file "$certs/node-data/node-data.crt" f $MASTER_USER $MASTER_GROUP 644 -} - -@test "[$GROUP] update master node settings" { - load $MASTER_UTILS - export ESHOME="$MASTER_HOME" - export_elasticsearch_paths - - run sudo -E -u $MASTER_USER bash <<"MASTER_SETTINGS" -cat >> $ESCONFIG/elasticsearch.yml <<- EOF -node.name: "node-master" -node.master: true -node.data: false -discovery.seed_hosts: ["127.0.0.1:9301"] -cluster.initial_master_nodes: ["node-master"] - -xpack.security.transport.ssl.key: $ESCONFIG/certs/node-master/node-master.key -xpack.security.transport.ssl.certificate: $ESCONFIG/certs/node-master/node-master.crt -xpack.security.transport.ssl.certificate_authorities: ["$ESCONFIG/certs/ca/ca.crt"] -xpack.security.http.ssl.key: $ESCONFIG/certs/node-master/node-master.key -xpack.security.http.ssl.certificate: $ESCONFIG/certs/node-master/node-master.crt -xpack.security.http.ssl.certificate_authorities: ["$ESCONFIG/certs/ca/ca.crt"] - -xpack.security.transport.ssl.enabled: true -transport.port: 9300 - -xpack.security.http.ssl.enabled: true -http.port: 9200 - -EOF -MASTER_SETTINGS - - start_master_node - wait_for_xpack 127.0.0.1 9200 -} - -@test "[$GROUP] test connection to master node using HTTPS" { - load $MASTER_UTILS - export ESHOME="$MASTER_HOME" - export_elasticsearch_paths - - run sudo -E -u $MASTER_USER curl -u "elastic:changeme" --cacert "$ESCONFIG/certs/ca/ca.crt" -XGET "https://127.0.0.1:9200" - [ "$status" -eq 0 ] || { - echo "Failed to connect to master node using HTTPS:" - echo "$output" - debug_collect_logs - false - } - echo "$output" | grep "node-master" -} - -@test "[$GROUP] install data node" { - install_data_node -} - -@test "[$GROUP] install certificates on data node" { - load $DATA_UTILS - export ESHOME="$DATA_HOME" - export_elasticsearch_paths - - sudo chown $DATA_USER:$DATA_USER "$certificates" - [ -f "$certificates" ] || { - echo "Could not find certificates: $certificates" - false - } - - certs="$ESCONFIG/certs" - if [[ -d "$certs" ]]; then - sudo rm -rf "$certs" - fi - - run sudo -E -u $DATA_USER "unzip" $certificates -d $certs - [ "$status" -eq 0 ] || { - echo "Failed to unzip certificates in $certs: $output" - false - } - - assert_file "$certs/ca" d $DATA_USER $DATA_GROUP - assert_file "$certs/ca/ca.key" f $DATA_USER $DATA_GROUP 644 - assert_file "$certs/ca/ca.crt" f $DATA_USER $DATA_GROUP 644 - - assert_file "$certs/node-master" d $DATA_USER $DATA_GROUP - assert_file "$certs/node-master/node-master.key" f $DATA_USER $DATA_GROUP 644 - assert_file "$certs/node-master/node-master.crt" f $DATA_USER $DATA_GROUP 644 - - assert_file "$certs/node-data" d $DATA_USER $DATA_GROUP - assert_file "$certs/node-data/node-data.key" f $DATA_USER $DATA_GROUP 644 - assert_file "$certs/node-data/node-data.crt" f $DATA_USER $DATA_GROUP 644 -} - -@test "[$GROUP] update data node settings" { - load $DATA_UTILS - export ESHOME="$DATA_HOME" - export_elasticsearch_paths - - run sudo -E -u $DATA_USER bash <<"DATA_SETTINGS" -cat >> $ESCONFIG/elasticsearch.yml <<- EOF -node.name: "node-data" -node.master: false -node.data: true -discovery.seed_hosts: ["127.0.0.1:9300"] - -xpack.security.transport.ssl.key: $ESCONFIG/certs/node-data/node-data.key -xpack.security.transport.ssl.certificate: $ESCONFIG/certs/node-data/node-data.crt -xpack.security.transport.ssl.certificate_authorities: ["$ESCONFIG/certs/ca/ca.crt"] -xpack.security.http.ssl.key: $ESCONFIG/certs/node-data/node-data.key -xpack.security.http.ssl.certificate: $ESCONFIG/certs//node-data/node-data.crt -xpack.security.http.ssl.certificate_authorities: ["$ESCONFIG/certs/ca/ca.crt"] - -xpack.security.transport.ssl.enabled: true -transport.port: 9301 - -xpack.security.http.ssl.enabled: true -http.port: 9201 - -EOF -DATA_SETTINGS - - start_data_node - wait_for_xpack 127.0.0.1 9201 -} - -@test "[$GROUP] test connection to data node using HTTPS" { - load $DATA_UTILS - export ESHOME="$DATA_HOME" - export_elasticsearch_paths - - run sudo -E -u $DATA_USER curl --cacert "$ESCONFIG/certs/ca/ca.crt" -XGET "https://127.0.0.1:9201" - [ "$status" -eq 0 ] || { - echo "Failed to connect to data node using HTTPS:" - echo "$output" - false - } - echo "$output" | grep "missing authentication credentials" -} - -@test "[$GROUP] test node to node communication" { - load $MASTER_UTILS - export ESHOME="$MASTER_HOME" - export_elasticsearch_paths - - testIndex=$(sudo curl -u "elastic:changeme" \ - -H "Content-Type: application/json" \ - --cacert "$ESCONFIG/certs/ca/ca.crt" \ - -XPOST "https://127.0.0.1:9200/books/_doc/0?refresh" \ - -d '{"title": "Elasticsearch The Definitive Guide"}') - - debug_collect_logs - echo "$testIndex" | grep '"result":"created"' - - masterSettings=$(sudo curl -u "elastic:changeme" \ - -H "Content-Type: application/json" \ - --cacert "$ESCONFIG/certs/ca/ca.crt" \ - -XGET "https://127.0.0.1:9200/_nodes/node-master?filter_path=nodes.*.settings.xpack,nodes.*.settings.http.type,nodes.*.settings.transport.type") - - echo "$masterSettings" | grep '"http":{"ssl":{"enabled":"true"}' - echo "$masterSettings" | grep '"http":{"type":"security4"}' - echo "$masterSettings" | grep '"transport":{"ssl":{"enabled":"true"}' - echo "$masterSettings" | grep '"transport":{"type":"security4"}' - - load $DATA_UTILS - export ESHOME="$DATA_HOME" - export_elasticsearch_paths - - dataSettings=$(curl -u "elastic:changeme" \ - -H "Content-Type: application/json" \ - --cacert "$ESCONFIG/certs/ca/ca.crt" \ - -XGET "https://127.0.0.1:9200/_nodes/node-data?filter_path=nodes.*.settings.xpack,nodes.*.settings.http.type,nodes.*.settings.transport.type") - - echo "$dataSettings" | grep '"http":{"ssl":{"enabled":"true"}' - echo "$dataSettings" | grep '"http":{"type":"security4"}' - echo "$dataSettings" | grep '"transport":{"ssl":{"enabled":"true"}' - echo "$dataSettings" | grep '"transport":{"type":"security4"}' - - testSearch=$(curl -u "elastic:changeme" \ - -H "Content-Type: application/json" \ - --cacert "$ESCONFIG/certs/ca/ca.crt" \ - -XGET "https://127.0.0.1:9200/_search?q=title:guide") - - echo "$testSearch" | grep '"_index":"books"' - echo "$testSearch" | grep '"_id":"0"' -} - -@test "[$GROUP] exit code on failure" { - run sudo -E -u $MASTER_USER "$MASTER_HOME/bin/elasticsearch-certgen" --not-a-valid-option - [ "$status" -ne 0 ] || { - echo "Expected elasticsearch-certgen tool exit code to be non-zero" - echo "$output" - false - } -} - -@test "[$GROUP] remove Elasticsearch" { - # NOTE: this must be the last test, so that running oss tests does not already have the default distro still installed - clean_before_test -} diff --git a/qa/os/build.gradle b/qa/os/build.gradle index fc91e77606219..204ff38bce349 100644 --- a/qa/os/build.gradle +++ b/qa/os/build.gradle @@ -58,7 +58,7 @@ tasks.dependenciesInfo.enabled = false tasks.thirdPartyAudit.ignoreMissingClasses() tasks.register('destructivePackagingTest') { - dependsOn 'destructiveDistroTest', 'destructiveBatsTest.oss', 'destructiveBatsTest.default' + dependsOn 'destructiveDistroTest', 'destructiveBatsTest.oss' } processTestResources { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java new file mode 100644 index 0000000000000..1893e5f682f7d --- /dev/null +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java @@ -0,0 +1,119 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.apache.http.client.fluent.Request; +import org.elasticsearch.packaging.util.Distribution; +import org.elasticsearch.packaging.util.FileUtils; +import org.elasticsearch.packaging.util.Platforms; +import org.elasticsearch.packaging.util.ServerUtils; +import org.elasticsearch.packaging.util.Shell; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.assumeFalse; +import static org.elasticsearch.packaging.util.FileMatcher.Fileness.File; +import static org.elasticsearch.packaging.util.FileMatcher.file; +import static org.elasticsearch.packaging.util.FileMatcher.p600; +import static org.elasticsearch.packaging.util.FileUtils.append; +import static org.elasticsearch.packaging.util.FileUtils.escapePath; +import static org.elasticsearch.packaging.util.FileUtils.getTempDir; +import static org.hamcrest.CoreMatchers.containsString; +import static org.junit.Assume.assumeTrue; + +public class CertGenCliTests extends PackagingTestCase { + private static final Path instancesFile = getTempDir().resolve("instances.yml"); + private static final Path certificatesFile = getTempDir().resolve("certificates.zip"); + + @Before + public void filterDistros() { + assumeTrue("only default distro", distribution.flavor == Distribution.Flavor.DEFAULT); + assumeTrue("no docker", distribution.packaging != Distribution.Packaging.DOCKER); + } + + @BeforeClass + public static void cleanupFiles() { + FileUtils.rm(instancesFile, certificatesFile); + } + + public void test10Install() throws Exception { + install(); + } + + public void test20Help() throws Exception { + Shell.Result result = installation.executables().certgenTool.run("--help"); + assertThat(result.stdout, containsString("Simplifies certificate creation")); + } + + public void test30Generate() throws Exception { + Files.write(instancesFile, Arrays.asList( + "instances:", + " - name: \"mynode\"", + " ip:", + " - \"127.0.0.1\"")); + + installation.executables().certgenTool.run("--in " + instancesFile + " --out " + certificatesFile); + + String owner = installation.getOwner(); + assertThat(certificatesFile, file(File, owner, owner, p600)); + } + + public void test31ExtractCerts() throws Exception { + // windows 2012 r2 has powershell 4.0, which lacks Expand-Archive + assumeFalse(Platforms.OS_NAME.equals("Windows Server 2012 R2")); + + Path certsDir = installation.config("certs"); + sh.extractZip(certificatesFile, certsDir); + + Path caDir = certsDir.resolve("ca"); + assertThat(caDir.resolve("ca.key"), file(File, null, null, null)); + assertThat(caDir.resolve("ca.crt"), file(File, null, null, null)); + + Path nodeDir = certsDir.resolve("mynode"); + assertThat(nodeDir.resolve("mynode.key"), file(File, null, null, null)); + assertThat(nodeDir.resolve("mynode.crt"), file(File, null, null, null)); + + FileUtils.cp(certsDir, installation.config("certs")); + } + + public void test40RunWithCert() throws Exception { + // windows 2012 r2 has powershell 4.0, which lacks Expand-Archive + assumeFalse(Platforms.OS_NAME.equals("Windows Server 2012 R2")); + + append(installation.config("elasticsearch.yml"), String.join("\n", + "node.name: mynode", + "xpack.security.transport.ssl.key: " + escapePath(installation.config("certs/mynode/mynode.key")), + "xpack.security.transport.ssl.certificate: " + escapePath(installation.config("certs/mynode/mynode.crt")), + "xpack.security.transport.ssl.certificate_authorities: [\"" + escapePath(installation.config("certs/ca/ca.crt")) + "\"]", + "xpack.security.http.ssl.key: " + escapePath(installation.config("certs/mynode/mynode.key")), + "xpack.security.http.ssl.certificate: "+ escapePath(installation.config("certs/mynode/mynode.crt")), + "xpack.security.http.ssl.certificate_authorities: [\"" + escapePath(installation.config("certs/ca/ca.crt")) + "\"]", + "xpack.security.transport.ssl.enabled: true", + "xpack.security.http.ssl.enabled: true")); + + assertWhileRunning(() -> { + ServerUtils.makeRequest(Request.Get("https://127.0.0.1:9200"), null, null, installation.config("certs/ca/ca.crt")); + }); + } +} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/CronEvalCliTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/CronEvalCliTests.java new file mode 100644 index 0000000000000..4ae95713af9fe --- /dev/null +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/CronEvalCliTests.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; +import org.elasticsearch.packaging.util.Shell; +import org.junit.Before; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.junit.Assume.assumeTrue; + +public class CronEvalCliTests extends PackagingTestCase { + + @Before + public void filterDistros() { + assumeTrue("only default distro", distribution.flavor == Distribution.Flavor.DEFAULT); + assumeTrue("no docker", distribution.packaging != Distribution.Packaging.DOCKER); + } + + public void test10Install() throws Exception { + install(); + } + + public void test20Help() throws Exception { + Shell.Result result = installation.executables().cronevalTool.run("--help"); + assertThat(result.stdout, containsString("Validates and evaluates a cron expression")); + } + + public void test30Run() throws Exception { + Shell.Result result = installation.executables().cronevalTool.run("'0 0 20 ? * MON-THU' -c 2"); + assertThat(result.stdout, containsString("Valid!")); + } +} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index a9fa84962490f..42e4262e4b947 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -59,6 +59,7 @@ import static org.elasticsearch.packaging.util.Docker.waitForPathToExist; import static org.elasticsearch.packaging.util.FileMatcher.p600; import static org.elasticsearch.packaging.util.FileMatcher.p660; +import static org.elasticsearch.packaging.util.FileMatcher.p775; import static org.elasticsearch.packaging.util.FileUtils.append; import static org.elasticsearch.packaging.util.FileUtils.getTempDir; import static org.elasticsearch.packaging.util.FileUtils.rm; @@ -73,6 +74,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assume.assumeFalse; import static org.junit.Assume.assumeTrue; public class DockerTests extends PackagingTestCase { @@ -334,10 +336,51 @@ public void test081ConfigurePasswordThroughEnvironmentVariableFile() throws Exce assertThat("Expected server to require authentication", statusCode, equalTo(401)); } + /** + * Check that when verifying the file permissions of _FILE environment variables, symlinks + * are followed. + */ + public void test082SymlinksAreFollowedWithEnvironmentVariableFiles() throws Exception { + // Test relies on configuring security + assumeTrue(distribution.isDefault()); + // Test relies on symlinks + assumeFalse(Platforms.WINDOWS); + + final String xpackPassword = "hunter2"; + final String passwordFilename = "password.txt"; + final String symlinkFilename = "password_symlink"; + + // ELASTIC_PASSWORD_FILE + Files.writeString(tempDir.resolve(passwordFilename), xpackPassword + "\n"); + + // Link to the password file. We can't use an absolute path for the target, because + // it won't resolve inside the container. + Files.createSymbolicLink(tempDir.resolve(symlinkFilename), Path.of(passwordFilename)); + + Map envVars = Map.of( + "ELASTIC_PASSWORD_FILE", + "/run/secrets/" + symlinkFilename, + // Enable security so that we can test that the password has been used + "xpack.security.enabled", + "true" + ); + + // File permissions need to be secured in order for the ES wrapper to accept + // them for populating env var values. The wrapper will resolve the symlink + // and check the target's permissions. + Files.setPosixFilePermissions(tempDir.resolve(passwordFilename), p600); + + final Map volumes = Map.of(tempDir, Path.of("/run/secrets")); + + // Restart the container - this will check that Elasticsearch started correctly, + // and didn't fail to follow the symlink and check the file permissions + runContainer(distribution(), volumes, envVars); + } + /** * Check that environment variables cannot be used with _FILE environment variables. */ - public void test081CannotUseEnvVarsAndFiles() throws Exception { + public void test083CannotUseEnvVarsAndFiles() throws Exception { final String optionsFilename = "esJavaOpts.txt"; // ES_JAVA_OPTS_FILE @@ -368,7 +411,7 @@ public void test081CannotUseEnvVarsAndFiles() throws Exception { * Check that when populating environment variables by setting variables with the suffix "_FILE", * the files' permissions are checked. */ - public void test082EnvironmentVariablesUsingFilesHaveCorrectPermissions() throws Exception { + public void test084EnvironmentVariablesUsingFilesHaveCorrectPermissions() throws Exception { final String optionsFilename = "esJavaOpts.txt"; // ES_JAVA_OPTS_FILE @@ -390,11 +433,60 @@ public void test082EnvironmentVariablesUsingFilesHaveCorrectPermissions() throws ); } + /** + * Check that when verifying the file permissions of _FILE environment variables, symlinks + * are followed, and that invalid target permissions are detected. + */ + public void test085SymlinkToFileWithInvalidPermissionsIsRejected() throws Exception { + // Test relies on configuring security + assumeTrue(distribution.isDefault()); + // Test relies on symlinks + assumeFalse(Platforms.WINDOWS); + + final String xpackPassword = "hunter2"; + final String passwordFilename = "password.txt"; + final String symlinkFilename = "password_symlink"; + + // ELASTIC_PASSWORD_FILE + Files.writeString(tempDir.resolve(passwordFilename), xpackPassword + "\n"); + + // Link to the password file. We can't use an absolute path for the target, because + // it won't resolve inside the container. + Files.createSymbolicLink(tempDir.resolve(symlinkFilename), Path.of(passwordFilename)); + + Map envVars = Map.of( + "ELASTIC_PASSWORD_FILE", + "/run/secrets/" + symlinkFilename, + // Enable security so that we can test that the password has been used + "xpack.security.enabled", + "true" + ); + + // Set invalid permissions on the file that the symlink targets + Files.setPosixFilePermissions(tempDir.resolve(passwordFilename), p775); + + final Map volumes = Map.of(tempDir, Path.of("/run/secrets")); + + // Restart the container + final Result dockerLogs = runContainerExpectingFailure(distribution(), volumes, envVars); + + assertThat( + dockerLogs.stderr, + containsString( + "ERROR: File " + + passwordFilename + + " (target of symlink /run/secrets/" + + symlinkFilename + + " from ELASTIC_PASSWORD_FILE) must have file permissions 400 or 600, but actually has: 775" + ) + ); + } + /** * Check that environment variables are translated to -E options even for commands invoked under * `docker exec`, where the Docker image's entrypoint is not executed. */ - public void test83EnvironmentVariablesAreRespectedUnderDockerExec() { + public void test086EnvironmentVariablesAreRespectedUnderDockerExec() { // This test relies on a CLI tool attempting to connect to Elasticsearch, and the // tool in question is only in the default distribution. assumeTrue(distribution.isDefault()); @@ -405,10 +497,7 @@ public void test83EnvironmentVariablesAreRespectedUnderDockerExec() { final Result result = sh.runIgnoreExitCode("elasticsearch-setup-passwords auto"); assertFalse("elasticsearch-setup-passwords command should have failed", result.isSuccess()); - assertThat( - result.stdout, - containsString("java.net.UnknownHostException: this.is.not.valid: Name or service not known") - ); + assertThat(result.stdout, containsString("java.net.UnknownHostException: this.is.not.valid: Name or service not known")); } /** diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java index da3f55daae035..c66bb3cb7f329 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -175,6 +175,12 @@ protected void assertWhileRunning(Platforms.PlatformAction assertions) throws Ex logger.warn("Elasticsearch log:\n" + FileUtils.slurpAllLogs(installation.logs, "elasticsearch.log", "*.log.gz")); } + if (Files.exists(installation.logs.resolve("output.out"))) { + logger.warn("Stdout:\n" + FileUtils.slurpTxtorGz(installation.logs.resolve("output.out"))); + } + if (Files.exists(installation.logs.resolve("output.err"))) { + logger.warn("Stderr:\n" + FileUtils.slurpTxtorGz(installation.logs.resolve("output.err"))); + } throw e; } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PasswordToolsTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PasswordToolsTests.java index b08b3bfe52987..edec71a12a063 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PasswordToolsTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PasswordToolsTests.java @@ -62,7 +62,8 @@ public void test20GeneratePasswords() throws Exception { Shell.Result result = installation.executables().setupPasswordsTool.run("auto --batch", null); Map userpasses = parseUsersAndPasswords(result.stdout); for (Map.Entry userpass : userpasses.entrySet()) { - String response = ServerUtils.makeRequest(Request.Get("http://localhost:9200"), userpass.getKey(), userpass.getValue()); + String response = ServerUtils.makeRequest( + Request.Get("http://localhost:9200"), userpass.getKey(), userpass.getValue(), null); assertThat(response, containsString("You Know, for Search")); } }); @@ -111,7 +112,7 @@ public void test30AddBootstrapPassword() throws Exception { assertWhileRunning(() -> { String response = ServerUtils.makeRequest( Request.Get("http://localhost:9200/_cluster/health?wait_for_status=green&timeout=180s"), - "elastic", BOOTSTRAP_PASSWORD); + "elastic", BOOTSTRAP_PASSWORD, null); assertThat(response, containsString("\"status\":\"green\"")); }); } @@ -123,7 +124,8 @@ public void test40GeneratePasswordsBootstrapAlreadySet() throws Exception { Map userpasses = parseUsersAndPasswords(result.stdout); assertThat(userpasses, hasKey("elastic")); for (Map.Entry userpass : userpasses.entrySet()) { - String response = ServerUtils.makeRequest(Request.Get("http://localhost:9200"), userpass.getKey(), userpass.getValue()); + String response = ServerUtils.makeRequest( + Request.Get("http://localhost:9200"), userpass.getKey(), userpass.getValue(), null); assertThat(response, containsString("You Know, for Search")); } }); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java index 5f4fac5f5352c..d261a2627a622 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java @@ -160,7 +160,7 @@ public void test21CustomizeServiceDisplayName() { } // NOTE: service description is not attainable through any powershell api, so checking it is not possible... - public void assertStartedAndStop() throws IOException { + public void assertStartedAndStop() throws Exception { ServerUtils.waitForElasticsearch(installation); ServerUtils.runElasticsearchTests(); @@ -191,7 +191,7 @@ public void assertStartedAndStop() throws IOException { "}"); } - public void test30StartStop() throws IOException { + public void test30StartStop() throws Exception { sh.run(serviceScript + " install"); assertCommand(serviceScript + " start"); assertStartedAndStop(); @@ -209,7 +209,7 @@ public void test32StopNotStarted() throws IOException { assertThat(result.stdout, containsString("The service '" + DEFAULT_ID + "' has been stopped")); } - public void test33JavaChanged() throws IOException { + public void test33JavaChanged() throws Exception { final Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); try { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Cleanup.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Cleanup.java index f9b98d58ccacc..77384a38d2737 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Cleanup.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Cleanup.java @@ -20,10 +20,12 @@ package org.elasticsearch.packaging.util; import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.function.Consumer; import static org.elasticsearch.packaging.util.FileUtils.getTempDir; import static org.elasticsearch.packaging.util.FileUtils.lsGlob; @@ -79,13 +81,13 @@ public static void cleanEverything() throws Exception { // delete files that may still exist lsGlob(getTempDir(), "elasticsearch*").forEach(FileUtils::rm); - final List filesToDelete = Platforms.WINDOWS - ? ELASTICSEARCH_FILES_WINDOWS - : ELASTICSEARCH_FILES_LINUX; + final List filesToDelete = Platforms.WINDOWS ? ELASTICSEARCH_FILES_WINDOWS : ELASTICSEARCH_FILES_LINUX; + // windows needs leniency due to asinine releasing of file locking async from a process exiting + Consumer rm = Platforms.WINDOWS ? FileUtils::rmWithRetries : FileUtils::rm; filesToDelete.stream() .map(Paths::get) .filter(Files::exists) - .forEach(FileUtils::rm); + .forEach(rm); // disable elasticsearch service // todo add this for windows when adding tests for service intallation diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java index ef3382cd86eff..8cbe7a7c6a987 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java @@ -26,7 +26,6 @@ import org.apache.http.client.fluent.Request; import org.elasticsearch.common.CheckedRunnable; -import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.PosixFileAttributes; @@ -503,7 +502,7 @@ private static void withLogging(CheckedRunnable r) thro } } - public static JsonNode getJson(String path) throws IOException { + public static JsonNode getJson(String path) throws Exception { final String pluginsResponse = makeRequest(Request.Get("http://localhost:9200/" + path)); ObjectMapper mapper = new ObjectMapper(); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/FileMatcher.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/FileMatcher.java index 57c74a8c68e01..90edb42829db1 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/FileMatcher.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/FileMatcher.java @@ -62,7 +62,7 @@ public enum Fileness { File, Directory } public FileMatcher(Fileness fileness, String owner, String group, Set posixPermissions) { this.fileness = Objects.requireNonNull(fileness); - this.owner = Objects.requireNonNull(owner); + this.owner = owner; this.group = group; this.posixPermissions = posixPermissions; } @@ -76,16 +76,18 @@ protected boolean matchesSafely(Path path) { if (Platforms.WINDOWS) { final BasicFileAttributes attributes = getBasicFileAttributes(path); - final String attributeViewOwner = getFileOwner(path); if (fileness.equals(Fileness.Directory) != attributes.isDirectory()) { mismatch = "Is " + (attributes.isDirectory() ? "a directory" : "a file"); return false; } - if (attributeViewOwner.contains(owner) == false) { - mismatch = "Owned by " + attributeViewOwner; - return false; + if (owner != null) { + final String attributeViewOwner = getFileOwner(path); + if (attributeViewOwner.contains(owner) == false) { + mismatch = "Owned by " + attributeViewOwner; + return false; + } } } else { final PosixFileAttributes attributes = getPosixFileAttributes(path); @@ -95,7 +97,7 @@ protected boolean matchesSafely(Path path) { return false; } - if (owner.equals(attributes.owner().getName()) == false) { + if (owner != null && owner.equals(attributes.owner().getName()) == false) { mismatch = "Owned by " + attributes.owner().getName(); return false; } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/FileUtils.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/FileUtils.java index efd2c85bf9e3a..eb57e66239eec 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/FileUtils.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/FileUtils.java @@ -84,6 +84,30 @@ public static void rm(Path... paths) { } } + public static void rmWithRetries(Path... paths) { + int tries = 10; + Exception exception = null; + while (tries-- > 0) { + try { + IOUtils.rm(paths); + return; + } catch (IOException e) { + if (exception == null) { + exception = e; + } else { + exception.addSuppressed(e); + } + } + try { + Thread.sleep(1000); + } catch (InterruptedException interrupted) { + Thread.currentThread().interrupt(); + return; + } + } + throw new RuntimeException(exception); + } + public static Path mktempDir(Path path) { try { return Files.createTempDirectory(path,"tmp"); @@ -262,7 +286,7 @@ public static Map getNumericUnixPathOwnership(Path path) { // vagrant creates /tmp for us in windows so we use that to avoid long paths public static Path getTempDir() { - return Paths.get("/tmp"); + return Paths.get("/tmp").toAbsolutePath(); } public static Path getDefaultArchiveInstallPath() { @@ -315,4 +339,15 @@ public static void deleteIfExists(Path path) { } } } + + /** + * Return the given path a string suitable for using on the host system. + */ + public static String escapePath(Path path) { + if (Platforms.WINDOWS) { + // replace single backslash with forward slash, to avoid unintended escapes in scripts + return path.toString().replace('\\', '/'); + } + return path.toString(); + } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java index a237da3ea1962..e879fe24c8be2 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java @@ -114,6 +114,19 @@ public static Installation ofContainer(Shell sh, Distribution distribution) { ); } + /** + * Returns the user that owns this installation. + * + * For packages this is root, and for archives it is the user doing the installation. + */ + public String getOwner() { + if (Platforms.WINDOWS) { + // windows is always administrator, since there is no sudo + return "BUILTIN\\Administrators"; + } + return distribution.isArchive() ? ARCHIVE_OWNER : "root"; + } + public Path bin(String executableName) { return bin.resolve(executableName); } @@ -147,7 +160,7 @@ public Shell.Result run(String args) { public Shell.Result run(String args, String input) { String command = path + " " + args; - if (distribution.isArchive() && distribution.platform != Distribution.Platform.WINDOWS) { + if (distribution.isArchive() && Platforms.WINDOWS == false) { command = "sudo -E -u " + ARCHIVE_OWNER + " " + command; } if (input != null) { @@ -163,6 +176,8 @@ public class Executables { public final Executable pluginTool = new Executable("elasticsearch-plugin"); public final Executable keystoreTool = new Executable("elasticsearch-keystore"); public final Executable certutilTool = new Executable("elasticsearch-certutil"); + public final Executable certgenTool = new Executable("elasticsearch-certgen"); + public final Executable cronevalTool = new Executable("elasticsearch-croneval"); public final Executable shardTool = new Executable("elasticsearch-shard"); public final Executable nodeTool = new Executable("elasticsearch-node"); public final Executable setupPasswordsTool = new Executable("elasticsearch-setup-passwords"); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java index 036358696d21d..26817a60889fe 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java @@ -305,7 +305,7 @@ public static void clearJournal(Shell sh) { } } - public static void assertElasticsearchStarted(Shell sh, Installation installation) throws IOException { + public static void assertElasticsearchStarted(Shell sh, Installation installation) throws Exception { waitForElasticsearch(installation); if (isSystemd()) { @@ -324,7 +324,7 @@ public static void stopElasticsearch(Shell sh) { } } - public static void restartElasticsearch(Shell sh, Installation installation) throws IOException { + public static void restartElasticsearch(Shell sh, Installation installation) throws Exception { if (isSystemd()) { sh.run("systemctl restart elasticsearch.service"); } else { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java index 40aee0a014175..6ba33b56b30d4 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java @@ -23,17 +23,32 @@ import org.apache.http.HttpResponse; import org.apache.http.client.fluent.Executor; import org.apache.http.client.fluent.Request; +import org.apache.http.config.Registry; +import org.apache.http.config.RegistryBuilder; +import org.apache.http.conn.socket.ConnectionSocketFactory; +import org.apache.http.conn.socket.LayeredConnectionSocketFactory; +import org.apache.http.conn.socket.PlainConnectionSocketFactory; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.entity.ContentType; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.util.EntityUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; import java.io.IOException; +import java.io.InputStream; import java.net.InetAddress; import java.net.Socket; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.security.KeyStore; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; import java.util.Objects; import java.util.concurrent.TimeUnit; @@ -45,24 +60,25 @@ public class ServerUtils { private static final Logger logger = LogManager.getLogger(ServerUtils.class); private static String SECURITY_ENABLED = "xpack.security.enabled: true"; + private static String SSL_ENABLED = "xpack.security.http.ssl.enabled: true"; // generous timeout as nested virtualization can be quite slow ... private static final long waitTime = TimeUnit.MINUTES.toMillis(3); private static final long timeoutLength = TimeUnit.SECONDS.toMillis(30); private static final long requestInterval = TimeUnit.SECONDS.toMillis(5); - public static void waitForElasticsearch(Installation installation) throws IOException { - boolean securityEnabled = false; + public static void waitForElasticsearch(Installation installation) throws Exception { + boolean xpackEnabled = false; // TODO: need a way to check if docker has security enabled, the yml config is not bind mounted so can't look from here if (installation.distribution.packaging != Distribution.Packaging.DOCKER) { Path configFilePath = installation.config("elasticsearch.yml"); // this is fragile, but currently doesn't deviate from a single line enablement and not worth the parsing effort String configFile = Files.readString(configFilePath, StandardCharsets.UTF_8); - securityEnabled = configFile.contains(SECURITY_ENABLED); + xpackEnabled = configFile.contains(SECURITY_ENABLED) || configFile.contains(SSL_ENABLED); } - if (securityEnabled) { + if (xpackEnabled) { // with security enabled, we may or may not have setup a user/pass, so we use a more generic port being available check. // this isn't as good as a health check, but long term all this waiting should go away when node startup does not // make the http port available until the system is really ready to serve requests @@ -78,17 +94,46 @@ public static void waitForElasticsearch(Installation installation) throws IOExce * @param request the request to execute * @param username the username to supply, or null * @param password the password to supply, or null + * @param caCert path to the ca certificate the server side ssl cert was generated from, or no if not using ssl * @return the response from the server * @throws IOException if an error occurs */ - private static HttpResponse execute(Request request, String username, String password) throws IOException { - final Executor executor = Executor.newInstance(); + private static HttpResponse execute(Request request, String username, String password, Path caCert) throws Exception { + final Executor executor; + if (caCert != null) { + try (InputStream inStream = Files.newInputStream(caCert)) { + CertificateFactory cf = CertificateFactory.getInstance("X.509"); + X509Certificate cert = (X509Certificate) cf.generateCertificate(inStream); + KeyStore truststore = KeyStore.getInstance(KeyStore.getDefaultType()); + truststore.load(null, null); + truststore.setCertificateEntry("myClusterCA", cert); + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(truststore, null); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(truststore); + SSLContext context = SSLContext.getInstance("TLSv1.2"); + context.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); + final LayeredConnectionSocketFactory ssl = new SSLConnectionSocketFactory(context); + final Registry sfr = RegistryBuilder.create() + .register("http", PlainConnectionSocketFactory.getSocketFactory()) + .register("https", ssl) + .build(); + PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(sfr); + connectionManager.setDefaultMaxPerRoute(100); + connectionManager.setMaxTotal(200); + connectionManager.setValidateAfterInactivity(1000); + executor = Executor.newInstance(HttpClientBuilder.create() + .setConnectionManager(connectionManager) + .build()); + } + } else { + executor = Executor.newInstance(); + } if (username != null && password != null) { executor.auth(username, password); executor.authPreemptive(new HttpHost("localhost", 9200)); } - return executor.execute(request).returnResponse(); } @@ -118,7 +163,7 @@ public static void waitForElasticsearch( Installation installation, String username, String password - ) throws IOException { + ) throws Exception { Objects.requireNonNull(status); @@ -129,6 +174,11 @@ public static void waitForElasticsearch( boolean started = false; Throwable thrownException = null; + Path caCert = installation.config("certs/ca/ca.crt"); + if (Files.exists(caCert) == false) { + caCert = null; // no cert, so don't use ssl + } + while (started == false && timeElapsed < waitTime) { if (System.currentTimeMillis() - lastRequest > requestInterval) { try { @@ -139,7 +189,8 @@ public static void waitForElasticsearch( .connectTimeout((int) timeoutLength) .socketTimeout((int) timeoutLength), username, - password + password, + caCert ); if (response.getStatusLine().getStatusCode() >= 300) { @@ -179,11 +230,11 @@ public static void waitForElasticsearch( url = "http://localhost:9200/_cluster/health/" + index + "?wait_for_status=" + status + "&timeout=60s&pretty"; } - final String body = makeRequest(Request.Get(url), username, password); + final String body = makeRequest(Request.Get(url), username, password, caCert); assertThat("cluster health response must contain desired status", body, containsString(status)); } - public static void runElasticsearchTests() throws IOException { + public static void runElasticsearchTests() throws Exception { makeRequest( Request.Post("http://localhost:9200/library/_doc/1?refresh=true&pretty") .bodyString("{ \"title\": \"Book #1\", \"pages\": 123 }", ContentType.APPLICATION_JSON)); @@ -198,12 +249,12 @@ public static void runElasticsearchTests() throws IOException { makeRequest(Request.Delete("http://localhost:9200/_all")); } - public static String makeRequest(Request request) throws IOException { - return makeRequest(request, null, null); + public static String makeRequest(Request request) throws Exception { + return makeRequest(request, null, null, null); } - public static String makeRequest(Request request, String username, String password) throws IOException { - final HttpResponse response = execute(request, username, password); + public static String makeRequest(Request request, String username, String password, Path caCert) throws Exception { + final HttpResponse response = execute(request, username, password, caCert); final String body = EntityUtils.toString(response.getEntity()); if (response.getStatusLine().getStatusCode() >= 300) { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Shell.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Shell.java index e0fae3d095578..95141aae17359 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Shell.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Shell.java @@ -88,9 +88,13 @@ public void chown(Path path) throws Exception { Platforms.onLinux(() -> run("chown -R elasticsearch:elasticsearch " + path)); Platforms.onWindows(() -> run( "$account = New-Object System.Security.Principal.NTAccount '" + System.getenv("username") + "'; " + - "$tempConf = Get-ChildItem '" + path + "' -Recurse; " + - "$tempConf += Get-Item '" + path + "'; " + - "$tempConf | ForEach-Object { " + + "$pathInfo = Get-Item '" + path + "'; " + + "$toChown = @(); " + + "if ($pathInfo.PSIsContainer) { " + + " $toChown += Get-ChildItem '" + path + "' -Recurse; " + + "}" + + "$toChown += $pathInfo; " + + "$toChown | ForEach-Object { " + "$acl = Get-Acl $_.FullName; " + "$acl.SetOwner($account); " + "Set-Acl $_.FullName $acl " + @@ -98,7 +102,12 @@ public void chown(Path path) throws Exception { )); } - public Result run( String command, Object... args) { + public void extractZip(Path zipPath, Path destinationDir) throws Exception { + Platforms.onLinux(() -> run("unzip \"" + zipPath + "\" -d \"" + destinationDir + "\"")); + Platforms.onWindows(() -> run("Expand-Archive -Path \"" + zipPath + "\" -DestinationPath \"" + destinationDir + "\"")); + } + + public Result run(String command, Object... args) { String formattedCommand = String.format(Locale.ROOT, command, args); return run(formattedCommand); } diff --git a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java index ec2a70c6f84cd..57c38e5cff267 100644 --- a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.upgrades; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; @@ -30,6 +31,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.settings.Settings; @@ -37,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.snapshots.RestoreInfo; +import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; @@ -60,8 +63,6 @@ *
  • Run against the old version cluster from the first step: {@link TestStep#STEP3_OLD_CLUSTER}
  • *
  • Run against the current version cluster from the second step: {@link TestStep#STEP4_NEW_CLUSTER}
  • * - * TODO: Add two more steps: delete all old version snapshots from the repository, then downgrade again and verify that the repository - * is not being corrupted. This requires first merging the logic for reading the min_version field in RepositoryData back to 7.6. */ public class MultiVersionRepositoryAccessIT extends ESRestTestCase { @@ -98,7 +99,7 @@ public static TestStep parse(String value) { } } - protected static final TestStep TEST_STEP = TestStep.parse(System.getProperty("tests.rest.suite")); + private static final TestStep TEST_STEP = TestStep.parse(System.getProperty("tests.rest.suite")); @Override protected boolean preserveSnapshotsUponCompletion() { @@ -192,31 +193,46 @@ public void testReadOnlyRepo() throws IOException { } public void testUpgradeMovesRepoToNewMetaVersion() throws IOException { - if (TEST_STEP.ordinal() > 1) { - // Only testing the first 2 steps here - return; - } final String repoName = getTestName(); try (RestHighLevelClient client = new RestHighLevelClient(RestClient.builder(adminClient().getNodes().toArray(new Node[0])))) { final int shards = 3; createIndex(client, "test-index", shards); createRepository(client, repoName, false); - createSnapshot(client, repoName, "snapshot-" + TEST_STEP); - final List> snapshots = listSnapshots(repoName); - // Every step creates one snapshot - assertThat(snapshots, hasSize(TEST_STEP.ordinal() + 1)); - assertSnapshotStatusSuccessful(client, repoName, - snapshots.stream().map(sn -> (String) sn.get("snapshot")).toArray(String[]::new)); - if (TEST_STEP == TestStep.STEP1_OLD_CLUSTER) { - ensureSnapshotRestoreWorks(client, repoName, "snapshot-" + TestStep.STEP1_OLD_CLUSTER, shards); + // only create some snapshots in the first two steps + if (TEST_STEP == TestStep.STEP1_OLD_CLUSTER || TEST_STEP == TestStep.STEP2_NEW_CLUSTER) { + createSnapshot(client, repoName, "snapshot-" + TEST_STEP); + final List> snapshots = listSnapshots(repoName); + // Every step creates one snapshot + assertThat(snapshots, hasSize(TEST_STEP.ordinal() + 1)); + assertSnapshotStatusSuccessful(client, repoName, + snapshots.stream().map(sn -> (String) sn.get("snapshot")).toArray(String[]::new)); + if (TEST_STEP == TestStep.STEP1_OLD_CLUSTER) { + ensureSnapshotRestoreWorks(client, repoName, "snapshot-" + TestStep.STEP1_OLD_CLUSTER, shards); + } else { + deleteSnapshot(client, repoName, "snapshot-" + TestStep.STEP1_OLD_CLUSTER); + ensureSnapshotRestoreWorks(client, repoName, "snapshot-" + TestStep.STEP2_NEW_CLUSTER, shards); + createSnapshot(client, repoName, "snapshot-1"); + ensureSnapshotRestoreWorks(client, repoName, "snapshot-1", shards); + deleteSnapshot(client, repoName, "snapshot-" + TestStep.STEP2_NEW_CLUSTER); + createSnapshot(client, repoName, "snapshot-2"); + ensureSnapshotRestoreWorks(client, repoName, "snapshot-2", shards); + } } else { - deleteSnapshot(client, repoName, "snapshot-" + TestStep.STEP1_OLD_CLUSTER); - ensureSnapshotRestoreWorks(client, repoName, "snapshot-" + TestStep.STEP2_NEW_CLUSTER, shards); - createSnapshot(client, repoName, "snapshot-1"); - ensureSnapshotRestoreWorks(client, repoName, "snapshot-1", shards); - deleteSnapshot(client, repoName, "snapshot-" + TestStep.STEP2_NEW_CLUSTER); - createSnapshot(client, repoName, "snapshot-2"); - ensureSnapshotRestoreWorks(client, repoName, "snapshot-2", shards); + if (minimumNodeVersion().before(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { + assertThat(TEST_STEP, is(TestStep.STEP3_OLD_CLUSTER)); + final List> expectedExceptions = + List.of(ResponseException.class, ElasticsearchStatusException.class); + expectThrowsAnyOf(expectedExceptions, () -> listSnapshots(repoName)); + expectThrowsAnyOf(expectedExceptions, () -> deleteSnapshot(client, repoName, "snapshot-1")); + expectThrowsAnyOf(expectedExceptions, () -> deleteSnapshot(client, repoName, "snapshot-2")); + expectThrowsAnyOf(expectedExceptions, () -> createSnapshot(client, repoName, "snapshot-impossible")); + } else { + assertThat(listSnapshots(repoName), hasSize(2)); + if (TEST_STEP == TestStep.STEP4_NEW_CLUSTER) { + ensureSnapshotRestoreWorks(client, repoName, "snapshot-1", shards); + ensureSnapshotRestoreWorks(client, repoName, "snapshot-2", shards); + } + } } } finally { deleteRepository(repoName); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 88dabadbd09fd..d0892fd914f57 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -44,7 +44,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Objects; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; @@ -283,7 +282,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { } public void testRecovery() throws Exception { - final String index = "recover_with_soft_deletes"; + final String index = "test_recovery"; if (CLUSTER_TYPE == ClusterType.OLD) { Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) @@ -315,6 +314,9 @@ public void testRecovery() throws Exception { } } } + if (randomBoolean()) { + syncedFlush(index); + } ensureGreen(index); } @@ -557,40 +559,6 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab } } - private void syncedFlush(String index) throws Exception { - // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. - // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. - assertBusy(() -> { - try { - Response resp = client().performRequest(new Request("POST", index + "/_flush/synced")); - Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); - assertThat(result.get("failed"), equalTo(0)); - } catch (ResponseException ex) { - throw new AssertionError(ex); // cause assert busy to retry - } - }); - // ensure the global checkpoint is synced; otherwise we might trim the commit with syncId - ensureGlobalCheckpointSynced(index); - } - - @SuppressWarnings("unchecked") - private void ensureGlobalCheckpointSynced(String index) throws Exception { - assertBusy(() -> { - Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); - List> shardStats = (List>) XContentMapValues.extractValue("indices." + index + ".shards.0", stats); - shardStats.stream() - .map(shard -> (Map) XContentMapValues.extractValue("seq_no", shard)) - .filter(Objects::nonNull) - .forEach(seqNoStat -> { - long globalCheckpoint = ((Number) XContentMapValues.extractValue("global_checkpoint", seqNoStat)).longValue(); - long localCheckpoint = ((Number) XContentMapValues.extractValue("local_checkpoint", seqNoStat)).longValue(); - long maxSeqNo = ((Number) XContentMapValues.extractValue("max_seq_no", seqNoStat)).longValue(); - assertThat(shardStats.toString(), localCheckpoint, equalTo(maxSeqNo)); - assertThat(shardStats.toString(), globalCheckpoint, equalTo(maxSeqNo)); - }); - }, 60, TimeUnit.SECONDS); - } - /** Ensure that we can always execute update requests regardless of the version of cluster */ public void testUpdateDoc() throws Exception { final String index = "test_update_doc"; diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json deleted file mode 100644 index e7c98d66451bd..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "indices.flush_synced":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-synced-flush-api.html", - "description":"Performs a synced flush operation on one or more indices." - }, - "stability":"stable", - "url":{ - "paths":[ - { - "path":"/_flush/synced", - "methods":[ - "POST", - "GET" - ] - }, - { - "path":"/{index}/_flush/synced", - "methods":[ - "POST", - "GET" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names; use `_all` or empty string for all indices" - } - } - } - ] - }, - "params":{ - "ignore_unavailable":{ - "type":"boolean", - "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices":{ - "type":"boolean", - "description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards":{ - "type":"enum", - "options":[ - "open", - "closed", - "none", - "all" - ], - "default":"open", - "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - } - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index 6632d912cd57e..de5e632975752 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -88,54 +88,6 @@ - match: $body: | /^$/ - - - do: - indices.create: - index: sync_id_test - body: - settings: - number_of_shards: 5 - number_of_replicas: 0 - - - do: - indices.flush_synced: - index: sync_id_test - - - is_false: _shards.failed - - - do: - cat.shards: - index: sync_id_test - h: index,state,sync_id -# 20 chars for sync ids with 5.x which uses time-based uuids and 22 with 6.x which uses random uuids - - match: - $body: | - /^(sync_id_test\s+STARTED\s+[A-Za-z0-9_\-]{20,22}\n){5}$/ - - - do: - indices.delete: - index: sync_id_test - - - do: - indices.create: - index: sync_id_no_flush_test - body: - settings: - number_of_shards: 5 - number_of_replicas: 0 - - - do: - cat.shards: - index: sync_id_no_flush_test - h: index,state,sync_id - - match: - $body: | - /^(sync_id_no_flush_test\s+STARTED\s+\n){5}$/ - - - do: - indices.delete: - index: sync_id_no_flush_test - - do: indices.create: index: index1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml index ff99d20e9b761..89b8236225c0a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml @@ -1,27 +1,3 @@ ---- -"Index synced flush rest test": - - do: - indices.create: - index: testing - body: - settings: - index: - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - do: - indices.flush_synced: - index: testing - - - is_false: _shards.failed - - - do: - indices.stats: {level: shards} - - - is_true: indices.testing.shards.0.0.commit.user_data.sync_id - --- "Flush stats": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 1e9a2ac8df4c0..adc61f1204fa9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -243,7 +243,7 @@ setup: --- "Composite aggregation with format": - skip: - version: " - 7.99.99" # after BWC merged revert to 7.1.99 + version: " - 7.1.99" reason: calendar_interval introduced in 7.2.0 features: warnings @@ -309,7 +309,7 @@ setup: --- "Composite aggregation with format and calendar_interval": - skip: - version: " - 7.99.99" # after BWC merged revert to 7.1.99 + version: " - 7.1.99" reason: calendar_interval introduced in 7.2.0 - do: @@ -370,8 +370,8 @@ setup: --- "Composite aggregation with date_histogram offset": - skip: - version: " - 7.99.99" # after BWC merged revert to 7.5.99 - reason: offset introduced in 8.0.0 + version: " - 7.5.99" + reason: offset introduced in 7.6.0 - do: search: @@ -694,6 +694,7 @@ setup: - match: { aggregations.test.buckets.3.key.geo: "12/2048/0" } - match: { aggregations.test.buckets.3.key.kw: "bar" } - match: { aggregations.test.buckets.3.doc_count: 1 } + --- "Simple Composite aggregation with geotile grid add aggregate after": - skip: @@ -735,3 +736,49 @@ setup: - match: { aggregations.test.buckets.2.key.geo: "12/2048/0" } - match: { aggregations.test.buckets.2.key.kw: "bar" } - match: { aggregations.test.buckets.2.doc_count: 1 } + +--- +"Mixed ip and unmapped fields": + - skip: + version: " - 7.99.99" + reason: This will fail against 7.x until the fix is backported there + # It is important that the index *without* the ip field be sorted *before* + # the index *with* the ip field because that has caused bugs in the past. + - do: + indices.create: + index: test_1 + - do: + indices.create: + index: test_2 + body: + mappings: + properties: + f: + type: ip + - do: + index: + index: test_2 + id: 1 + body: { "f": "192.168.0.1" } + refresh: true + + - do: + search: + index: test_* + body: + aggregations: + test: + composite: + sources: [ + "f": { + "terms": { + "field": "f" + } + } + ] + + - match: { hits.total.value: 1 } + - match: { hits.total.relation: eq } + - length: { aggregations.test.buckets: 1 } + - match: { aggregations.test.buckets.0.key.f: "192.168.0.1" } + - match: { aggregations.test.buckets.0.doc_count: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_error.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_error.yml new file mode 100644 index 0000000000000..9a48d24783b44 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_error.yml @@ -0,0 +1,13 @@ +--- +'Misspelled fields get "did you mean"': + - skip: + version: " - 7.5.99" + reason: Implemented in 7.6 + - do: + catch: /\[UpdateRequest\] unknown field \[dac\] did you mean \[doc\]\?/ + update: + index: test + id: 1 + body: + dac: { foo: baz } + upsert: { foo: bar } diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index ce8d0053e76dc..41e108638ad39 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -73,6 +73,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_5_1 = new Version(7050199, org.apache.lucene.util.Version.LUCENE_8_3_0); public static final Version V_7_5_2 = new Version(7050299, org.apache.lucene.util.Version.LUCENE_8_3_0); public static final Version V_7_6_0 = new Version(7060099, org.apache.lucene.util.Version.LUCENE_8_4_0); + public static final Version V_7_7_0 = new Version(7070099, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final Version CURRENT = V_8_0_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 3d60a1fb698d2..1ef3c1418adba 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -107,10 +107,8 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.flush.FlushAction; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction; import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; import org.elasticsearch.action.admin.indices.flush.TransportShardFlushAction; -import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.forcemerge.TransportForceMergeAction; import org.elasticsearch.action.admin.indices.get.GetIndexAction; @@ -492,7 +490,6 @@ public void reg actions.register(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); actions.register(RefreshAction.INSTANCE, TransportRefreshAction.class); actions.register(FlushAction.INSTANCE, TransportFlushAction.class); - actions.register(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class); actions.register(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); actions.register(UpgradeAction.INSTANCE, TransportUpgradeAction.class); actions.register(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java deleted file mode 100644 index cb3333354b8e1..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.support.broadcast.BroadcastRequest; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; -import java.util.Arrays; - -/** - * A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush - * and writes the same sync id to primary and all copies. - * - *

    Best created with {@link org.elasticsearch.client.Requests#syncedFlushRequest(String...)}.

    - * - * @see org.elasticsearch.client.Requests#flushRequest(String...) - * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) - * @see SyncedFlushResponse - */ -public class SyncedFlushRequest extends BroadcastRequest { - - /** - * Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will - * be sync flushed. - */ - public SyncedFlushRequest(String... indices) { - super(indices); - } - - public SyncedFlushRequest(StreamInput in) throws IOException { - super(in); - } - - @Override - public String toString() { - return "SyncedFlushRequest{" + - "indices=" + Arrays.toString(indices) + "}"; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java deleted file mode 100644 index 5e286b184fecc..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.iterable.Iterables; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; -import org.elasticsearch.indices.flush.SyncedFlushService; -import org.elasticsearch.rest.RestStatus; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * The result of performing a sync flush operation on all shards of multiple indices - */ -public class SyncedFlushResponse extends ActionResponse implements ToXContentFragment { - - private final Map> shardsResultPerIndex; - private final ShardCounts shardCounts; - - public SyncedFlushResponse(Map> shardsResultPerIndex) { - // shardsResultPerIndex is never modified after it is passed to this - // constructor so this is safe even though shardsResultPerIndex is a - // ConcurrentHashMap - this.shardsResultPerIndex = unmodifiableMap(shardsResultPerIndex); - this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values())); - } - - public SyncedFlushResponse(StreamInput in) throws IOException { - super(in); - shardCounts = new ShardCounts(in); - Map> tmpShardsResultPerIndex = new HashMap<>(); - int numShardsResults = in.readInt(); - for (int i =0 ; i< numShardsResults; i++) { - String index = in.readString(); - List shardsSyncedFlushResults = new ArrayList<>(); - int numShards = in.readInt(); - for (int j =0; j< numShards; j++) { - shardsSyncedFlushResults.add(new ShardsSyncedFlushResult(in)); - } - tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults); - } - shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex); - } - - /** - * total number shards, including replicas, both assigned and unassigned - */ - public int totalShards() { - return shardCounts.total; - } - - /** - * total number of shards for which the operation failed - */ - public int failedShards() { - return shardCounts.failed; - } - - /** - * total number of shards which were successfully sync-flushed - */ - public int successfulShards() { - return shardCounts.successful; - } - - public RestStatus restStatus() { - return failedShards() == 0 ? RestStatus.OK : RestStatus.CONFLICT; - } - - public Map> getShardsResultPerIndex() { - return shardsResultPerIndex; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields._SHARDS); - shardCounts.toXContent(builder, params); - builder.endObject(); - for (Map.Entry> indexEntry : shardsResultPerIndex.entrySet()) { - List indexResult = indexEntry.getValue(); - builder.startObject(indexEntry.getKey()); - ShardCounts indexShardCounts = calculateShardCounts(indexResult); - indexShardCounts.toXContent(builder, params); - if (indexShardCounts.failed > 0) { - builder.startArray(Fields.FAILURES); - for (ShardsSyncedFlushResult shardResults : indexResult) { - if (shardResults.failed()) { - builder.startObject(); - builder.field(Fields.SHARD, shardResults.shardId().id()); - builder.field(Fields.REASON, shardResults.failureReason()); - builder.endObject(); - continue; - } - Map failedShards = shardResults.failedShards(); - for (Map.Entry shardEntry : failedShards.entrySet()) { - builder.startObject(); - builder.field(Fields.SHARD, shardResults.shardId().id()); - builder.field(Fields.REASON, shardEntry.getValue().failureReason()); - builder.field(Fields.ROUTING, shardEntry.getKey()); - builder.endObject(); - } - } - builder.endArray(); - } - builder.endObject(); - } - return builder; - } - - static ShardCounts calculateShardCounts(Iterable results) { - int total = 0, successful = 0, failed = 0; - for (ShardsSyncedFlushResult result : results) { - total += result.totalShards(); - successful += result.successfulShards(); - if (result.failed()) { - // treat all shard copies as failed - failed += result.totalShards(); - } else { - // some shards may have failed during the sync phase - failed += result.failedShards().size(); - } - } - return new ShardCounts(total, successful, failed); - } - - static final class ShardCounts implements ToXContentFragment, Writeable { - - public final int total; - public final int successful; - public final int failed; - - ShardCounts(int total, int successful, int failed) { - this.total = total; - this.successful = successful; - this.failed = failed; - } - - ShardCounts(StreamInput in) throws IOException { - total = in.readInt(); - successful = in.readInt(); - failed = in.readInt(); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(Fields.TOTAL, total); - builder.field(Fields.SUCCESSFUL, successful); - builder.field(Fields.FAILED, failed); - return builder; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeInt(total); - out.writeInt(successful); - out.writeInt(failed); - } - } - - static final class Fields { - static final String _SHARDS = "_shards"; - static final String TOTAL = "total"; - static final String SUCCESSFUL = "successful"; - static final String FAILED = "failed"; - static final String FAILURES = "failures"; - static final String SHARD = "shard"; - static final String ROUTING = "routing"; - static final String REASON = "reason"; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - shardCounts.writeTo(out); - out.writeInt(shardsResultPerIndex.size()); - for (Map.Entry> entry : shardsResultPerIndex.entrySet()) { - out.writeString(entry.getKey()); - out.writeInt(entry.getValue().size()); - for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) { - shardsSyncedFlushResult.writeTo(out); - } - } - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 077657cc62dd4..397ce43747d53 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.flush; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; @@ -28,10 +29,16 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -48,6 +55,8 @@ public TransportShardFlushAction(Settings settings, TransportService transportSe ActionFilters actionFilters) { super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, ShardFlushRequest::new, ShardFlushRequest::new, ThreadPool.Names.FLUSH); + transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, + ThreadPool.Names.FLUSH, PreShardSyncedFlushRequest::new, new PreSyncedFlushTransportHandler(indicesService)); } @Override @@ -71,4 +80,43 @@ protected ReplicaResult shardOperationOnReplica(ShardFlushRequest request, Index logger.trace("{} flush request executed on replica", replica.shardId()); return new ReplicaResult(); } + + // TODO: Remove this transition in 9.0 + private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre"; + + private static class PreShardSyncedFlushRequest extends TransportRequest { + private final ShardId shardId; + + private PreShardSyncedFlushRequest(StreamInput in) throws IOException { + super(in); + assert in.getVersion().before(Version.V_8_0_0) : "received pre_sync request from a new node"; + this.shardId = new ShardId(in); + } + + @Override + public String toString() { + return "PreShardSyncedFlushRequest{" + "shardId=" + shardId + '}'; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + assert false : "must not send pre_sync request from a new node"; + throw new UnsupportedOperationException(""); + } + } + + private static final class PreSyncedFlushTransportHandler implements TransportRequestHandler { + private final IndicesService indicesService; + + PreSyncedFlushTransportHandler(IndicesService indicesService) { + this.indicesService = indicesService; + } + + @Override + public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel, Task task) { + IndexShard indexShard = indicesService.indexServiceSafe(request.shardId.getIndex()).getShard(request.shardId.id()); + indexShard.flush(new FlushRequest().force(false).waitIfOngoing(true)); + throw new UnsupportedOperationException("Synced flush was removed and a normal flush was performed instead."); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java deleted file mode 100644 index 3eb72e0b02277..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.indices.flush.SyncedFlushService; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.transport.TransportService; - -/** - * Synced flush Action. - */ -public class TransportSyncedFlushAction extends HandledTransportAction { - - SyncedFlushService syncedFlushService; - - @Inject - public TransportSyncedFlushAction(TransportService transportService, ActionFilters actionFilters, - SyncedFlushService syncedFlushService) { - super(SyncedFlushAction.NAME, transportService, actionFilters, SyncedFlushRequest::new); - this.syncedFlushService = syncedFlushService; - } - - @Override - protected void doExecute(Task task, SyncedFlushRequest request, ActionListener listener) { - syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequestBuilder.java index f385746365e9a..f7852e43148c0 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequestBuilder.java @@ -26,9 +26,11 @@ /** * Base request builder for master node read operations that can be executed on the local node as well */ -public abstract class MasterNodeReadOperationRequestBuilder, Response extends ActionResponse, - RequestBuilder extends MasterNodeReadOperationRequestBuilder> - extends MasterNodeOperationRequestBuilder { +public abstract class MasterNodeReadOperationRequestBuilder< + Request extends MasterNodeReadRequest, + Response extends ActionResponse, + RequestBuilder extends MasterNodeReadOperationRequestBuilder + > extends MasterNodeOperationRequestBuilder { protected MasterNodeReadOperationRequestBuilder(ElasticsearchClient client, ActionType action, Request request) { super(client, action, request); diff --git a/server/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/server/src/main/java/org/elasticsearch/client/IndicesAdminClient.java index 40c4c1046577b..36b34a7b24c85 100644 --- a/server/src/main/java/org/elasticsearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -42,9 +42,6 @@ import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; @@ -336,29 +333,6 @@ public interface IndicesAdminClient extends ElasticsearchClient { */ FlushRequestBuilder prepareFlush(String... indices); - /** - * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). - * - * @param request The sync flush request - * @return A result future - * @see org.elasticsearch.client.Requests#syncedFlushRequest(String...) - */ - ActionFuture syncedFlush(SyncedFlushRequest request); - - /** - * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). - * - * @param request The sync flush request - * @param listener A listener to be notified with a result - * @see org.elasticsearch.client.Requests#syncedFlushRequest(String...) - */ - void syncedFlush(SyncedFlushRequest request, ActionListener listener); - - /** - * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). - */ - SyncedFlushRequestBuilder prepareSyncedFlush(String... indices); - /** * Explicitly force merge one or more indices into a the number of segments. * diff --git a/server/src/main/java/org/elasticsearch/client/Requests.java b/server/src/main/java/org/elasticsearch/client/Requests.java index 01d04c64ae1b1..bec0865bea8a6 100644 --- a/server/src/main/java/org/elasticsearch/client/Requests.java +++ b/server/src/main/java/org/elasticsearch/client/Requests.java @@ -47,7 +47,6 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -248,17 +247,6 @@ public static FlushRequest flushRequest(String... indices) { return new FlushRequest(indices); } - /** - * Creates a synced flush indices request. - * - * @param indices The indices to sync flush. Use {@code null} or {@code _all} to execute against all indices - * @return The synced flush request - * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) - */ - public static SyncedFlushRequest syncedFlushRequest(String... indices) { - return new SyncedFlushRequest(indices); - } - /** * Creates a force merge request. * diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 5bb480d8c23c3..1ee480fb55edd 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -163,10 +163,6 @@ import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; @@ -1351,21 +1347,6 @@ public FlushRequestBuilder prepareFlush(String... indices) { return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices); } - @Override - public ActionFuture syncedFlush(SyncedFlushRequest request) { - return execute(SyncedFlushAction.INSTANCE, request); - } - - @Override - public void syncedFlush(SyncedFlushRequest request, ActionListener listener) { - execute(SyncedFlushAction.INSTANCE, request, listener); - } - - @Override - public SyncedFlushRequestBuilder prepareSyncedFlush(String... indices) { - return new SyncedFlushRequestBuilder(this, SyncedFlushAction.INSTANCE).setIndices(indices); - } - @Override public void getMappings(GetMappingsRequest request, ActionListener listener) { execute(GetMappingsAction.INSTANCE, request, listener); diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 874b5eb00646c..f1051bca36c43 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -30,10 +30,13 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.Mapping; +import java.util.concurrent.Semaphore; + /** * Called by shards in the cluster when their mapping was dynamically updated and it needs to be updated * in the cluster state meta data (and broadcast to all members). @@ -44,19 +47,30 @@ public class MappingUpdatedAction { Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), Property.Dynamic, Property.NodeScope); + public static final Setting INDICES_MAX_IN_FLIGHT_UPDATES_SETTING = + Setting.intSetting("indices.mapping.max_in_flight_updates", 10, 1, 1000, + Property.Dynamic, Property.NodeScope); + private IndicesAdminClient client; private volatile TimeValue dynamicMappingUpdateTimeout; + private final AdjustableSemaphore semaphore; @Inject public MappingUpdatedAction(Settings settings, ClusterSettings clusterSettings) { this.dynamicMappingUpdateTimeout = INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.get(settings); + this.semaphore = new AdjustableSemaphore(INDICES_MAX_IN_FLIGHT_UPDATES_SETTING.get(settings), true); clusterSettings.addSettingsUpdateConsumer(INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, this::setDynamicMappingUpdateTimeout); + clusterSettings.addSettingsUpdateConsumer(INDICES_MAX_IN_FLIGHT_UPDATES_SETTING, this::setMaxInFlightUpdates); } private void setDynamicMappingUpdateTimeout(TimeValue dynamicMappingUpdateTimeout) { this.dynamicMappingUpdateTimeout = dynamicMappingUpdateTimeout; } + private void setMaxInFlightUpdates(int maxInFlightUpdates) { + semaphore.setMaxPermits(maxInFlightUpdates); + } + public void setClient(Client client) { this.client = client.admin().indices(); } @@ -68,6 +82,32 @@ public void setClient(Client client) { * potentially waiting for a master node to be available. */ public void updateMappingOnMaster(Index index, Mapping mappingUpdate, ActionListener listener) { + final RunOnce release = new RunOnce(() -> semaphore.release()); + try { + semaphore.acquire(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + listener.onFailure(e); + return; + } + boolean successFullySent = false; + try { + sendUpdateMapping(index, mappingUpdate, ActionListener.runBefore(listener, release::run)); + successFullySent = true; + } finally { + if (successFullySent == false) { + release.run(); + } + } + } + + // used by tests + int blockedThreads() { + return semaphore.getQueueLength(); + } + + // can be overridden by tests + protected void sendUpdateMapping(Index index, Mapping mappingUpdate, ActionListener listener) { client.preparePutMapping().setConcreteIndex(index).setSource(mappingUpdate.toString(), XContentType.JSON) .setMasterNodeTimeout(dynamicMappingUpdateTimeout).setTimeout(TimeValue.ZERO) .execute(new ActionListener<>() { @@ -82,4 +122,30 @@ public void onFailure(Exception e) { } }); } + + static class AdjustableSemaphore extends Semaphore { + + private final Object maxPermitsMutex = new Object(); + private int maxPermits; + + AdjustableSemaphore(int maxPermits, boolean fair) { + super(maxPermits, fair); + this.maxPermits = maxPermits; + } + + void setMaxPermits(int permits) { + synchronized (maxPermitsMutex) { + final int diff = Math.subtractExact(permits, maxPermits); + if (diff > 0) { + // add permits + release(diff); + } else if (diff < 0) { + // remove permits + reducePermits(Math.negateExact(diff)); + } + + maxPermits = permits; + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index 6973f165e7d4b..4862e9e4c0862 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -25,6 +25,8 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import java.io.Closeable; +import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -432,15 +434,14 @@ public void invariant() { assert publishVotes.isEmpty() || electionWon(); } - public void close() { + public void close() throws IOException { persistedState.close(); } /** * Pluggable persistence layer for {@link CoordinationState}. - * */ - public interface PersistedState { + public interface PersistedState extends Closeable { /** * Returns the current term @@ -497,7 +498,8 @@ default void markLastAcceptedStateAsCommitted() { } } - default void close() {} + default void close() throws IOException { + } } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index d664a3de193e5..20b3637680fb9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -72,6 +72,7 @@ import org.elasticsearch.transport.TransportResponse.Empty; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -702,7 +703,7 @@ protected void doStop() { } @Override - protected void doClose() { + protected void doClose() throws IOException { final CoordinationState coordinationState = this.coordinationState.get(); if (coordinationState != null) { // This looks like a race that might leak an unclosed CoordinationState if it's created while execution is here, but this method diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java index dff7ae5a2ee03..c1cf496711f92 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java @@ -18,11 +18,12 @@ */ package org.elasticsearch.cluster.coordination; +import joptsimple.OptionSet; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.env.Environment; +import org.elasticsearch.gateway.PersistedClusterStateService; import java.io.IOException; import java.nio.file.Path; @@ -48,14 +49,21 @@ public DetachClusterCommand() { @Override - protected void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { - final Tuple manifestMetaDataTuple = loadMetaData(terminal, dataPaths); - final Manifest manifest = manifestMetaDataTuple.v1(); - final MetaData metaData = manifestMetaDataTuple.v2(); + protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException { + final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths); + + terminal.println(Terminal.Verbosity.VERBOSE, "Loading cluster state"); + final ClusterState oldClusterState = loadTermAndClusterState(persistedClusterStateService, env).v2(); + final ClusterState newClusterState = ClusterState.builder(oldClusterState) + .metaData(updateMetaData(oldClusterState.metaData())).build(); + terminal.println(Terminal.Verbosity.VERBOSE, + "[old cluster state = " + oldClusterState + ", new cluster state = " + newClusterState + "]"); confirm(terminal, CONFIRMATION_MSG); - writeNewMetaData(terminal, manifest, updateCurrentTerm(), metaData, updateMetaData(metaData), dataPaths); + try (PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter()) { + writer.writeFullStateAndCommit(updateCurrentTerm(), newClusterState); + } terminal.println(NODE_DETACHED_MSG); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java index 800269520e366..754dacab0c003 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -26,42 +26,82 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cluster.metadata.Manifest; -import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.NodeMetaData; +import org.elasticsearch.gateway.PersistedClusterStateService; +import org.elasticsearch.indices.IndicesModule; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { private static final Logger logger = LogManager.getLogger(ElasticsearchNodeCommand.class); protected static final String DELIMITER = "------------------------------------------------------------------------\n"; - static final String STOP_WARNING_MSG = DELIMITER + "\n" + " WARNING: Elasticsearch MUST be stopped before running this tool." + "\n"; protected static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = "failed to lock node's directory, is Elasticsearch still running?"; - static final String NO_NODE_FOLDER_FOUND_MSG = "no node folder is found in data folder(s), node has not been started yet?"; - static final String NO_MANIFEST_FILE_FOUND_MSG = "no manifest file is found, do you run pre 7.0 Elasticsearch?"; - protected static final String GLOBAL_GENERATION_MISSING_MSG = - "no metadata is referenced from the manifest file, cluster has never been bootstrapped?"; - static final String NO_GLOBAL_METADATA_MSG = "failed to find global metadata, metadata corrupted?"; - static final String WRITE_METADATA_EXCEPTION_MSG = "exception occurred when writing new metadata to disk"; protected static final String ABORTED_BY_USER_MSG = "aborted by user"; + static final String NO_NODE_FOLDER_FOUND_MSG = "no node folder is found in data folder(s), node has not been started yet?"; + static final String NO_NODE_METADATA_FOUND_MSG = "no node meta data is found, node has not been started yet?"; + protected static final String CS_MISSING_MSG = + "cluster state is empty, cluster has never been bootstrapped?"; + + protected static final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry( + Stream.of(ClusterModule.getNamedXWriteables().stream(), IndicesModule.getNamedXContents().stream()) + .flatMap(Function.identity()) + .collect(Collectors.toList())); public ElasticsearchNodeCommand(String description) { super(description); } - protected void processNodePaths(Terminal terminal, OptionSet options, Environment env) throws IOException { + public static PersistedClusterStateService createPersistedClusterStateService(Settings settings, Path[] dataPaths) throws IOException { + final NodeMetaData nodeMetaData = PersistedClusterStateService.nodeMetaData(dataPaths); + if (nodeMetaData == null) { + throw new ElasticsearchException(NO_NODE_METADATA_FOUND_MSG); + } + + String nodeId = nodeMetaData.nodeId(); + return new PersistedClusterStateService(dataPaths, nodeId, namedXContentRegistry, BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L, true); + } + + public static ClusterState clusterState(Environment environment, PersistedClusterStateService.OnDiskState onDiskState) { + return ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(environment.settings())) + .version(onDiskState.lastAcceptedVersion) + .metaData(onDiskState.metaData) + .build(); + } + + public static Tuple loadTermAndClusterState(PersistedClusterStateService psf, + Environment env) throws IOException { + final PersistedClusterStateService.OnDiskState bestOnDiskState = psf.loadBestOnDiskState(); + if (bestOnDiskState.empty()) { + throw new ElasticsearchException(CS_MISSING_MSG); + } + return Tuple.tuple(bestOnDiskState.currentTerm, clusterState(env, bestOnDiskState)); + } + + protected void processNodePaths(Terminal terminal, OptionSet options, Environment env) throws IOException, UserException { terminal.println(Terminal.Verbosity.VERBOSE, "Obtaining lock for node"); try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(logger, env, Files::exists)) { final Path[] dataPaths = @@ -69,32 +109,12 @@ protected void processNodePaths(Terminal terminal, OptionSet options, Environmen if (dataPaths.length == 0) { throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG); } - processNodePaths(terminal, dataPaths, env); + processNodePaths(terminal, dataPaths, options, env); } catch (LockObtainFailedException e) { throw new ElasticsearchException(FAILED_TO_OBTAIN_NODE_LOCK_MSG, e); } } - protected Tuple loadMetaData(Terminal terminal, Path[] dataPaths) throws IOException { - terminal.println(Terminal.Verbosity.VERBOSE, "Loading manifest file"); - final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, dataPaths); - - if (manifest == null) { - throw new ElasticsearchException(NO_MANIFEST_FILE_FOUND_MSG); - } - if (manifest.isGlobalGenerationMissing()) { - throw new ElasticsearchException(GLOBAL_GENERATION_MISSING_MSG); - } - terminal.println(Terminal.Verbosity.VERBOSE, "Loading global metadata file"); - final MetaData metaData = MetaData.FORMAT_PRESERVE_CUSTOMS.loadGeneration( - logger, NamedXContentRegistry.EMPTY, manifest.getGlobalGeneration(), dataPaths); - if (metaData == null) { - throw new ElasticsearchException(NO_GLOBAL_METADATA_MSG + " [generation = " + manifest.getGlobalGeneration() + "]"); - } - - return Tuple.tuple(manifest, metaData); - } - protected void confirm(Terminal terminal, String msg) { terminal.println(msg); String text = terminal.readText("Confirm [y/N] "); @@ -104,7 +124,7 @@ protected void confirm(Terminal terminal, String msg) { } @Override - protected final void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + public final void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { terminal.println(STOP_WARNING_MSG); if (validateBeforeLock(terminal, env)) { processNodePaths(terminal, options, env); @@ -126,44 +146,11 @@ protected boolean validateBeforeLock(Terminal terminal, Environment env) { * Process the paths. Locks for the paths is held during this method invocation. * @param terminal the terminal to use for messages * @param dataPaths the paths of the node to process + * @param options the command line options * @param env the env of the node to process */ - protected abstract void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException; - - - protected void writeNewMetaData(Terminal terminal, Manifest oldManifest, long newCurrentTerm, - MetaData oldMetaData, MetaData newMetaData, Path[] dataPaths) { - long newGeneration; - try { - terminal.println(Terminal.Verbosity.VERBOSE, - "[clusterUUID = " + oldMetaData.clusterUUID() + ", committed = " + oldMetaData.clusterUUIDCommitted() + "] => " + - "[clusterUUID = " + newMetaData.clusterUUID() + ", committed = " + newMetaData.clusterUUIDCommitted() + "]"); - terminal.println(Terminal.Verbosity.VERBOSE, "New coordination metadata is " + newMetaData.coordinationMetaData()); - terminal.println(Terminal.Verbosity.VERBOSE, "Writing new global metadata to disk"); - newGeneration = MetaData.FORMAT.write(newMetaData, dataPaths); - Manifest newManifest = new Manifest(newCurrentTerm, oldManifest.getClusterStateVersion(), newGeneration, - oldManifest.getIndexGenerations()); - terminal.println(Terminal.Verbosity.VERBOSE, "New manifest is " + newManifest); - terminal.println(Terminal.Verbosity.VERBOSE, "Writing new manifest file to disk"); - Manifest.FORMAT.writeAndCleanup(newManifest, dataPaths); - } catch (Exception e) { - terminal.println(Terminal.Verbosity.VERBOSE, "Cleaning up new metadata"); - MetaData.FORMAT.cleanupOldFiles(oldManifest.getGlobalGeneration(), dataPaths); - throw new ElasticsearchException(WRITE_METADATA_EXCEPTION_MSG, e); - } - // if cleaning old files fail, we still succeeded. - try { - cleanUpOldMetaData(terminal, dataPaths, newGeneration); - } catch (Exception e) { - terminal.println(Terminal.Verbosity.SILENT, - "Warning: Cleaning up old metadata failed, but operation was otherwise successful (message: " + e.getMessage() + ")"); - } - } - - protected void cleanUpOldMetaData(Terminal terminal, Path[] dataPaths, long newGeneration) { - terminal.println(Terminal.Verbosity.VERBOSE, "Cleaning up old metadata"); - MetaData.FORMAT.cleanupOldFiles(newGeneration, dataPaths); - } + protected abstract void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) + throws IOException, UserException; protected NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) { return Arrays.stream(dataPaths).map(ElasticsearchNodeCommand::createNodePath).toArray(NodeEnvironment.NodePath[]::new); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java index ff054e71eee3a..7c96f631be6dd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java @@ -41,6 +41,8 @@ public NodeToolCli() { subcommands.put("unsafe-bootstrap", new UnsafeBootstrapMasterCommand()); subcommands.put("detach-cluster", new DetachClusterCommand()); subcommands.put("override-version", new OverrideNodeVersionCommand()); + subcommands.put("remove-settings", new RemoveSettingsCommand()); + subcommands.put("remove-customs", new RemoveCustomsCommand()); } public static void main(String[] args) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommand.java new file mode 100644 index 0000000000000..923c6721f9131 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommand.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.env.Environment; +import org.elasticsearch.gateway.PersistedClusterStateService; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; + +public class RemoveCustomsCommand extends ElasticsearchNodeCommand { + + static final String CUSTOMS_REMOVED_MSG = "Customs were successfully removed from the cluster state"; + static final String CONFIRMATION_MSG = + DELIMITER + + "\n" + + "You should only run this tool if you have broken custom metadata in the\n" + + "cluster state that prevents the cluster state from being loaded.\n" + + "This tool can cause data loss and its use should be your last resort.\n" + + "\n" + + "Do you want to proceed?\n"; + + private final OptionSpec arguments; + + public RemoveCustomsCommand() { + super("Removes custom metadata from the cluster state"); + arguments = parser.nonOptions("custom metadata names"); + } + + @Override + protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) + throws IOException, UserException { + final List customsToRemove = arguments.values(options); + if (customsToRemove.isEmpty()) { + throw new UserException(ExitCodes.USAGE, "Must supply at least one custom metadata name to remove"); + } + + final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths); + + terminal.println(Terminal.Verbosity.VERBOSE, "Loading cluster state"); + final Tuple termAndClusterState = loadTermAndClusterState(persistedClusterStateService, env); + final ClusterState oldClusterState = termAndClusterState.v2(); + terminal.println(Terminal.Verbosity.VERBOSE, "custom metadata names: " + oldClusterState.metaData().customs().keys()); + final MetaData.Builder metaDataBuilder = MetaData.builder(oldClusterState.metaData()); + for (String customToRemove : customsToRemove) { + boolean matched = false; + for (ObjectCursor customKeyCur : oldClusterState.metaData().customs().keys()) { + final String customKey = customKeyCur.value; + if (Regex.simpleMatch(customToRemove, customKey)) { + metaDataBuilder.removeCustom(customKey); + if (matched == false) { + terminal.println("The following customs will be removed:"); + } + matched = true; + terminal.println(customKey); + } + } + if (matched == false) { + throw new UserException(ExitCodes.USAGE, + "No custom metadata matching [" + customToRemove + "] were found on this node"); + } + } + final ClusterState newClusterState = ClusterState.builder(oldClusterState).metaData(metaDataBuilder.build()).build(); + terminal.println(Terminal.Verbosity.VERBOSE, + "[old cluster state = " + oldClusterState + ", new cluster state = " + newClusterState + "]"); + + confirm(terminal, CONFIRMATION_MSG); + + try (PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter()) { + writer.writeFullStateAndCommit(termAndClusterState.v1(), newClusterState); + } + + terminal.println(CUSTOMS_REMOVED_MSG); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommand.java new file mode 100644 index 0000000000000..5781e2bdf5a54 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommand.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.gateway.PersistedClusterStateService; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; + +public class RemoveSettingsCommand extends ElasticsearchNodeCommand { + + static final String SETTINGS_REMOVED_MSG = "Settings were successfully removed from the cluster state"; + static final String CONFIRMATION_MSG = + DELIMITER + + "\n" + + "You should only run this tool if you have incompatible settings in the\n" + + "cluster state that prevent the cluster from forming.\n" + + "This tool can cause data loss and its use should be your last resort.\n" + + "\n" + + "Do you want to proceed?\n"; + + private final OptionSpec arguments; + + public RemoveSettingsCommand() { + super("Removes persistent settings from the cluster state"); + arguments = parser.nonOptions("setting names"); + } + + @Override + protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) + throws IOException, UserException { + final List settingsToRemove = arguments.values(options); + if (settingsToRemove.isEmpty()) { + throw new UserException(ExitCodes.USAGE, "Must supply at least one setting to remove"); + } + + final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths); + + terminal.println(Terminal.Verbosity.VERBOSE, "Loading cluster state"); + final Tuple termAndClusterState = loadTermAndClusterState(persistedClusterStateService, env); + final ClusterState oldClusterState = termAndClusterState.v2(); + final Settings oldPersistentSettings = oldClusterState.metaData().persistentSettings(); + terminal.println(Terminal.Verbosity.VERBOSE, "persistent settings: " + oldPersistentSettings); + final Settings.Builder newPersistentSettingsBuilder = Settings.builder().put(oldPersistentSettings); + for (String settingToRemove : settingsToRemove) { + boolean matched = false; + for (String settingKey : oldPersistentSettings.keySet()) { + if (Regex.simpleMatch(settingToRemove, settingKey)) { + newPersistentSettingsBuilder.remove(settingKey); + if (matched == false) { + terminal.println("The following settings will be removed:"); + } + matched = true; + terminal.println(settingKey + ": " + oldPersistentSettings.get(settingKey)); + } + } + if (matched == false) { + throw new UserException(ExitCodes.USAGE, + "No persistent cluster settings matching [" + settingToRemove + "] were found on this node"); + } + } + final ClusterState newClusterState = ClusterState.builder(oldClusterState) + .metaData(MetaData.builder(oldClusterState.metaData()).persistentSettings(newPersistentSettingsBuilder.build()).build()) + .build(); + terminal.println(Terminal.Verbosity.VERBOSE, + "[old cluster state = " + oldClusterState + ", new cluster state = " + newClusterState + "]"); + + confirm(terminal, CONFIRMATION_MSG); + + try (PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter()) { + writer.writeFullStateAndCommit(termAndClusterState.v1(), newClusterState); + } + + terminal.println(SETTINGS_REMOVED_MSG); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java index 05bc0116c13c6..30dfe5f1722d1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java @@ -18,19 +18,17 @@ */ package org.elasticsearch.cluster.coordination; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import joptsimple.OptionSet; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeMetaData; +import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.node.Node; import java.io.IOException; @@ -40,8 +38,6 @@ public class UnsafeBootstrapMasterCommand extends ElasticsearchNodeCommand { - private static final Logger logger = LogManager.getLogger(UnsafeBootstrapMasterCommand.class); - static final String CLUSTER_STATE_TERM_VERSION_MSG_FORMAT = "Current node cluster state (term, version) pair is (%s, %s)"; static final String CONFIRMATION_MSG = @@ -58,8 +54,6 @@ public class UnsafeBootstrapMasterCommand extends ElasticsearchNodeCommand { static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on master eligible node"; - static final String NO_NODE_METADATA_FOUND_MSG = "no node meta data is found, node has not been started yet?"; - static final String EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG = "last committed voting voting configuration is empty, cluster has never been bootstrapped?"; @@ -83,49 +77,54 @@ protected boolean validateBeforeLock(Terminal terminal, Environment env) { return true; } - protected void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { - terminal.println(Terminal.Verbosity.VERBOSE, "Loading node metadata"); - final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, dataPaths); - if (nodeMetaData == null) { - throw new ElasticsearchException(NO_NODE_METADATA_FOUND_MSG); - } + protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException { + final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths); - String nodeId = nodeMetaData.nodeId(); - terminal.println(Terminal.Verbosity.VERBOSE, "Current nodeId is " + nodeId); + final Tuple state = loadTermAndClusterState(persistedClusterStateService, env); + final ClusterState oldClusterState = state.v2(); + + final MetaData metaData = oldClusterState.metaData(); - final Tuple manifestMetaDataTuple = loadMetaData(terminal, dataPaths); - final Manifest manifest = manifestMetaDataTuple.v1(); - final MetaData metaData = manifestMetaDataTuple.v2(); final CoordinationMetaData coordinationMetaData = metaData.coordinationMetaData(); if (coordinationMetaData == null || - coordinationMetaData.getLastCommittedConfiguration() == null || - coordinationMetaData.getLastCommittedConfiguration().isEmpty()) { + coordinationMetaData.getLastCommittedConfiguration() == null || + coordinationMetaData.getLastCommittedConfiguration().isEmpty()) { throw new ElasticsearchException(EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG); } terminal.println(String.format(Locale.ROOT, CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, coordinationMetaData.term(), - metaData.version())); - - confirm(terminal, CONFIRMATION_MSG); + metaData.version())); CoordinationMetaData newCoordinationMetaData = CoordinationMetaData.builder(coordinationMetaData) - .clearVotingConfigExclusions() - .lastAcceptedConfiguration(new CoordinationMetaData.VotingConfiguration(Collections.singleton(nodeId))) - .lastCommittedConfiguration(new CoordinationMetaData.VotingConfiguration(Collections.singleton(nodeId))) - .build(); + .clearVotingConfigExclusions() + .lastAcceptedConfiguration(new CoordinationMetaData.VotingConfiguration( + Collections.singleton(persistedClusterStateService.getNodeId()))) + .lastCommittedConfiguration(new CoordinationMetaData.VotingConfiguration( + Collections.singleton(persistedClusterStateService.getNodeId()))) + .build(); Settings persistentSettings = Settings.builder() - .put(metaData.persistentSettings()) - .put(UNSAFE_BOOTSTRAP.getKey(), true) - .build(); + .put(metaData.persistentSettings()) + .put(UNSAFE_BOOTSTRAP.getKey(), true) + .build(); MetaData newMetaData = MetaData.builder(metaData) - .clusterUUID(MetaData.UNKNOWN_CLUSTER_UUID) - .generateClusterUuidIfNeeded() - .clusterUUIDCommitted(true) - .persistentSettings(persistentSettings) - .coordinationMetaData(newCoordinationMetaData) - .build(); - - writeNewMetaData(terminal, manifest, manifest.getCurrentTerm(), metaData, newMetaData, dataPaths); + .clusterUUID(MetaData.UNKNOWN_CLUSTER_UUID) + .generateClusterUuidIfNeeded() + .clusterUUIDCommitted(true) + .persistentSettings(persistentSettings) + .coordinationMetaData(newCoordinationMetaData) + .build(); + + final ClusterState newClusterState = ClusterState.builder(oldClusterState) + .metaData(newMetaData).build(); + + terminal.println(Terminal.Verbosity.VERBOSE, + "[old cluster state = " + oldClusterState + ", new cluster state = " + newClusterState + "]"); + + confirm(terminal, CONFIRMATION_MSG); + + try (PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter()) { + writer.writeFullStateAndCommit(state.v1(), newClusterState); + } terminal.println(MASTER_NODE_BOOTSTRAPPED_MSG); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 1ed7fb04407f1..ded77ce4287d6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -649,7 +649,7 @@ private static IndexService validateActiveShardCountAndCreateIndexService(String "]: cannot be greater than number of shard copies [" + (tmpImd.getNumberOfReplicas() + 1) + "]"); } - return indicesService.createIndex(tmpImd, Collections.emptyList()); + return indicesService.createIndex(tmpImd, Collections.emptyList(), false); } private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 5efd4b6eae8bc..c6149682a203a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -140,7 +140,7 @@ public ClusterState applyAliasActions(ClusterState currentState, Iterable get(Object key) { assert key instanceof String : "key must be a string but was: " + key.getClass(); - return SimilarityService.BUILT_IN.get(SimilarityService.DEFAULT_SIMILARITY); + return (settings, version, scriptService) -> new BM25Similarity(); } // this entrySet impl isn't fully correct but necessary as SimilarityService will iterate diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 5271d401d2410..140423577751f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -143,7 +143,7 @@ ClusterState executeRefresh(final ClusterState currentState, final List The list element type. * @return A comma-separated string of the first few elements. */ - static String firstListElementsToCommaDelimitedString(List elements, Function formatter, boolean isDebugEnabled) { + public static String firstListElementsToCommaDelimitedString(List elements, Function formatter, + boolean isDebugEnabled) { final int maxNumberOfElements = 10; if (isDebugEnabled || elements.size() <= maxNumberOfElements) { return elements.stream().map(formatter).collect(Collectors.joining(", ")); diff --git a/server/src/main/java/org/elasticsearch/common/Rounding.java b/server/src/main/java/org/elasticsearch/common/Rounding.java index e36a8fa45d581..37e89389bba92 100644 --- a/server/src/main/java/org/elasticsearch/common/Rounding.java +++ b/server/src/main/java/org/elasticsearch/common/Rounding.java @@ -164,6 +164,19 @@ public void writeTo(StreamOutput out) throws IOException { */ public abstract long nextRoundingValue(long value); + /** + * How "offset" this rounding is from the traditional "start" of the period. + * @deprecated We're in the process of abstracting offset *into* Rounding + * so keep any usage to migratory shims + */ + @Deprecated + public abstract long offset(); + + /** + * Strip the {@code offset} from these bounds. + */ + public abstract Rounding withoutOffset(); + @Override public abstract boolean equals(Object obj); @@ -420,6 +433,16 @@ public long nextRoundingValue(long utcMillis) { } } + @Override + public long offset() { + return 0; + } + + @Override + public Rounding withoutOffset() { + return this; + } + @Override public int hashCode() { return Objects.hash(unit, timeZone); @@ -546,6 +569,16 @@ public long nextRoundingValue(long time) { .toInstant().toEpochMilli(); } + @Override + public long offset() { + return 0; + } + + @Override + public Rounding withoutOffset() { + return this; + } + @Override public int hashCode() { return Objects.hash(interval, timeZone); @@ -607,8 +640,17 @@ public long round(long value) { @Override public long nextRoundingValue(long value) { - // This isn't needed by the current users. We'll implement it when we migrate other users to it. - throw new UnsupportedOperationException("not yet supported"); + return delegate.nextRoundingValue(value - offset) + offset; + } + + @Override + public long offset() { + return offset; + } + + @Override + public Rounding withoutOffset() { + return delegate; } @Override diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoBoundingBox.java b/server/src/main/java/org/elasticsearch/common/geo/GeoBoundingBox.java index 78e81925275d7..01315d3e9848c 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoBoundingBox.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoBoundingBox.java @@ -68,6 +68,11 @@ public GeoBoundingBox(StreamInput input) throws IOException { this.bottomRight = input.readGeoPoint(); } + public boolean isUnbounded() { + return Double.isNaN(topLeft.lon()) || Double.isNaN(topLeft.lat()) + || Double.isNaN(bottomRight.lon()) || Double.isNaN(bottomRight.lat()); + } + public GeoPoint topLeft() { return topLeft; } @@ -120,6 +125,26 @@ public XContentBuilder toXContentFragment(XContentBuilder builder, boolean build return builder; } + /** + * If the bounding box crosses the date-line (left greater-than right) then the + * longitude of the point need only to be higher than the left or lower + * than the right. Otherwise, it must be both. + * + * @param lon the longitude of the point + * @param lat the latitude of the point + * @return whether the point (lon, lat) is in the specified bounding box + */ + public boolean pointInBounds(double lon, double lat) { + if (lat >= bottom() && lat <= top()) { + if (left() <= right()) { + return lon >= left() && lon <= right(); + } else { + return lon >= left() || lon <= right(); + } + } + return false; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeGeoPoint(topLeft); diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index 359b7781b895f..d33f90043b1d5 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -93,6 +93,7 @@ public static boolean isValidLongitude(double longitude) { return true; } + /** * Calculate the width (in meters) of geohash cells at a specific level * @param level geohash level must be greater or equal to zero diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 928de4a0cb5e3..f8ea133c0ed63 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -72,7 +72,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.DanglingIndicesState; import org.elasticsearch.gateway.GatewayService; -import org.elasticsearch.gateway.IncrementalClusterStateWriter; +import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; @@ -102,9 +102,9 @@ import org.elasticsearch.search.aggregations.MultiBucketConsumerService; import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ProxyConnectionStrategy; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.RemoteConnectionStrategy; -import org.elasticsearch.transport.ProxyConnectionStrategy; import org.elasticsearch.transport.SniffConnectionStrategy; import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.watcher.ResourceWatcherService; @@ -199,7 +199,9 @@ public void apply(Settings value, Settings current, Settings previous) { IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING, IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING, IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING, + IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING, MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, + MappingUpdatedAction.INDICES_MAX_IN_FLIGHT_UPDATES_SETTING, MetaData.SETTING_READ_ONLY_SETTING, MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING, MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE, @@ -231,7 +233,7 @@ public void apply(Settings value, Settings current, Settings previous) { GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING, GatewayService.RECOVER_AFTER_NODES_SETTING, GatewayService.RECOVER_AFTER_TIME_SETTING, - IncrementalClusterStateWriter.SLOW_WRITE_LOGGING_THRESHOLD, + PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD, NetworkModule.HTTP_DEFAULT_TYPE_SETTING, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING, NetworkModule.HTTP_TYPE_SETTING, @@ -295,7 +297,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteConnectionStrategy.REMOTE_CONNECTION_MODE, ProxyConnectionStrategy.REMOTE_CLUSTER_ADDRESSES, ProxyConnectionStrategy.REMOTE_SOCKET_CONNECTIONS, - ProxyConnectionStrategy.INCLUDE_SERVER_NAME, + ProxyConnectionStrategy.SERVER_NAME, SniffConnectionStrategy.REMOTE_CLUSTERS_PROXY, SniffConnectionStrategy.REMOTE_CLUSTER_SEEDS, SniffConnectionStrategy.REMOTE_NODE_CONNECTIONS, diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/SuggestingErrorOnUnknown.java b/server/src/main/java/org/elasticsearch/common/xcontent/SuggestingErrorOnUnknown.java new file mode 100644 index 0000000000000..c24f74ef814a7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/xcontent/SuggestingErrorOnUnknown.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import org.apache.lucene.search.spell.LevenshteinDistance; +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.common.collect.Tuple; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static java.util.stream.Collectors.toList; + +public class SuggestingErrorOnUnknown implements ErrorOnUnknown { + @Override + public String errorMessage(String parserName, String unknownField, Iterable candidates) { + String message = String.format(Locale.ROOT, "[%s] unknown field [%s]", parserName, unknownField); + // TODO it'd be nice to combine this with BaseRestHandler's implementation. + LevenshteinDistance ld = new LevenshteinDistance(); + final List> scored = new ArrayList<>(); + for (String candidate : candidates) { + float distance = ld.getDistance(unknownField, candidate); + if (distance > 0.5f) { + scored.add(new Tuple<>(distance, candidate)); + } + } + if (scored.isEmpty()) { + return message; + } + CollectionUtil.timSort(scored, (a, b) -> { + // sort by distance in reverse order, then parameter name for equal distances + int compare = a.v1().compareTo(b.v1()); + if (compare != 0) { + return -compare; + } + return a.v2().compareTo(b.v2()); + }); + List keys = scored.stream().map(Tuple::v2).collect(toList()); + StringBuilder builder = new StringBuilder(message).append(" did you mean "); + if (keys.size() == 1) { + builder.append("[").append(keys.get(0)).append("]"); + } else { + builder.append("any of ").append(keys.toString()); + } + builder.append("?"); + return builder.toString(); + } + + @Override + public int priority() { + return 0; + } +} diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index ec5b4fe43c8fe..9164f6f23a8cd 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -51,6 +51,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.gateway.MetaDataStateFormat; +import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; @@ -301,7 +302,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce ensureNoShardData(nodePaths); } - this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths); + this.nodeMetaData = loadNodeMetaData(settings, logger, nodePaths); success = true; } finally { @@ -381,6 +382,13 @@ private static boolean upgradeLegacyNodeFolders(Logger logger, Settings settings // determine folders to move and check that there are no extra files/folders final Set folderNames = new HashSet<>(); + final Set expectedFolderNames = new HashSet<>(Arrays.asList( + + // node state directory, containing MetaDataStateFormat-based node metadata as well as cluster state + MetaDataStateFormat.STATE_DIR_NAME, + + // indices + INDICES_FOLDER)); try (DirectoryStream stream = Files.newDirectoryStream(legacyNodePath.path)) { for (Path subFolderPath : stream) { @@ -388,8 +396,7 @@ private static boolean upgradeLegacyNodeFolders(Logger logger, Settings settings if (FileSystemUtils.isDesktopServicesStore(subFolderPath)) { // ignore } else if (FileSystemUtils.isAccessibleDirectory(subFolderPath, logger)) { - if (fileName.equals(INDICES_FOLDER) == false && // indices folder - fileName.equals(MetaDataStateFormat.STATE_DIR_NAME) == false) { // global metadata & node state folder + if (expectedFolderNames.contains(fileName) == false) { throw new IllegalStateException("unexpected folder encountered during data folder upgrade: " + subFolderPath); } @@ -407,7 +414,7 @@ private static boolean upgradeLegacyNodeFolders(Logger logger, Settings settings } } - assert Sets.difference(folderNames, Sets.newHashSet(INDICES_FOLDER, MetaDataStateFormat.STATE_DIR_NAME)).isEmpty() : + assert Sets.difference(folderNames, expectedFolderNames).isEmpty() : "expected indices and/or state dir folder but was " + folderNames; upgradeActions.add(() -> { @@ -422,7 +429,7 @@ private static boolean upgradeLegacyNodeFolders(Logger logger, Settings settings } // now do the actual upgrade. start by upgrading the node metadata file before moving anything, since a downgrade in an // intermediate state would be pretty disastrous - loadOrCreateNodeMetaData(settings, logger, legacyNodeLock.getNodePaths()); + loadNodeMetaData(settings, logger, legacyNodeLock.getNodePaths()); for (CheckedRunnable upgradeAction : upgradeActions) { upgradeAction.run(); } @@ -491,36 +498,36 @@ private void maybeLogHeapDetails() { /** * scans the node paths and loads existing metaData file. If not found a new meta data will be generated - * and persisted into the nodePaths */ - private static NodeMetaData loadOrCreateNodeMetaData(Settings settings, Logger logger, - NodePath... nodePaths) throws IOException { + private static NodeMetaData loadNodeMetaData(Settings settings, Logger logger, + NodePath... nodePaths) throws IOException { final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new); - - final Set nodeIds = new HashSet<>(); - for (final Path path : paths) { - final NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); - if (metaData != null) { - nodeIds.add(metaData.nodeId()); - } - } - if (nodeIds.size() > 1) { - throw new IllegalStateException( - "data paths " + Arrays.toString(paths) + " belong to multiple nodes with IDs " + nodeIds); - } - - NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, paths); + NodeMetaData metaData = PersistedClusterStateService.nodeMetaData(paths); if (metaData == null) { - assert nodeIds.isEmpty() : nodeIds; - metaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT); - } else { - assert nodeIds.equals(Collections.singleton(metaData.nodeId())) : nodeIds + " doesn't match " + metaData; - metaData = metaData.upgradeToCurrentVersion(); + // load legacy metadata + final Set nodeIds = new HashSet<>(); + for (final Path path : paths) { + final NodeMetaData oldStyleMetaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); + if (oldStyleMetaData != null) { + nodeIds.add(oldStyleMetaData.nodeId()); + } + } + if (nodeIds.size() > 1) { + throw new IllegalStateException( + "data paths " + Arrays.toString(paths) + " belong to multiple nodes with IDs " + nodeIds); + } + // load legacy metadata + final NodeMetaData legacyMetaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, paths); + if (legacyMetaData == null) { + assert nodeIds.isEmpty() : nodeIds; + metaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT); + } else { + assert nodeIds.equals(Collections.singleton(legacyMetaData.nodeId())) : nodeIds + " doesn't match " + legacyMetaData; + metaData = legacyMetaData; + } } - - // we write again to make sure all paths have the latest state file + metaData = metaData.upgradeToCurrentVersion(); assert metaData.nodeVersion().equals(Version.CURRENT) : metaData.nodeVersion() + " != " + Version.CURRENT; - NodeMetaData.FORMAT.writeAndCleanup(metaData, paths); return metaData; } diff --git a/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java index 25b4f79866eaa..bf91f87109a87 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java @@ -18,42 +18,41 @@ */ package org.elasticsearch.env; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import joptsimple.OptionParser; import joptsimple.OptionSet; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.ElasticsearchNodeCommand; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.gateway.WriteStateException; +import org.elasticsearch.gateway.MetaDataStateFormat; +import org.elasticsearch.gateway.PersistedClusterStateService; import java.io.IOException; import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; -import java.util.HashMap; import java.util.List; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; -public class NodeRepurposeCommand extends ElasticsearchNodeCommand { +import static org.elasticsearch.env.NodeEnvironment.INDICES_FOLDER; - private static final Logger logger = LogManager.getLogger(NodeRepurposeCommand.class); +public class NodeRepurposeCommand extends ElasticsearchNodeCommand { static final String ABORTED_BY_USER_MSG = ElasticsearchNodeCommand.ABORTED_BY_USER_MSG; static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = ElasticsearchNodeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG; static final String NO_CLEANUP = "Node has node.data=true -> no clean up necessary"; static final String NO_DATA_TO_CLEAN_UP_FOUND = "No data to clean-up found"; static final String NO_SHARD_DATA_TO_CLEAN_UP_FOUND = "No shard data to clean-up found"; - static final String PRE_V7_MESSAGE = - "No manifest file found. If you were previously running this node on Elasticsearch version 6, please proceed.\n" + - "If this node was ever started on Elasticsearch version 7 or higher, it might mean metadata corruption, please abort."; public NodeRepurposeCommand() { super("Repurpose this node to another master/data role, cleaning up any excess persisted data"); @@ -75,17 +74,17 @@ protected boolean validateBeforeLock(Terminal terminal, Environment env) { } @Override - protected void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { + protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException { assert DiscoveryNode.isDataNode(env.settings()) == false; if (DiscoveryNode.isMasterNode(env.settings()) == false) { - processNoMasterNoDataNode(terminal, dataPaths); + processNoMasterNoDataNode(terminal, dataPaths, env); } else { - processMasterNoDataNode(terminal, dataPaths); + processMasterNoDataNode(terminal, dataPaths, env); } } - private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths) throws IOException { + private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { NodeEnvironment.NodePath[] nodePaths = toNodePaths(dataPaths); terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths"); @@ -95,32 +94,36 @@ private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths) thro List indexMetaDataPaths = NodeEnvironment.collectIndexMetaDataPaths(nodePaths); Set indexPaths = uniqueParentPaths(shardDataPaths, indexMetaDataPaths); - if (indexPaths.isEmpty()) { + + final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths); + + final MetaData metaData = loadClusterState(terminal, env, persistedClusterStateService).metaData(); + if (indexPaths.isEmpty() && metaData.indices().isEmpty()) { terminal.println(Terminal.Verbosity.NORMAL, NO_DATA_TO_CLEAN_UP_FOUND); return; } - Set indexUUIDs = indexUUIDsFor(indexPaths); - outputVerboseInformation(terminal, nodePaths, indexPaths, indexUUIDs); + final Set indexUUIDs = Sets.union(indexUUIDsFor(indexPaths), + StreamSupport.stream(metaData.indices().values().spliterator(), false) + .map(imd -> imd.value.getIndexUUID()).collect(Collectors.toSet())); + + outputVerboseInformation(terminal, indexPaths, indexUUIDs, metaData); terminal.println(noMasterMessage(indexUUIDs.size(), shardDataPaths.size(), indexMetaDataPaths.size())); outputHowToSeeVerboseInformation(terminal); - final Manifest manifest = loadManifest(terminal, dataPaths); - terminal.println("Node is being re-purposed as no-master and no-data. Clean-up of index data will be performed."); confirm(terminal, "Do you want to proceed?"); - if (manifest != null) { - rewriteManifest(terminal, manifest, dataPaths); - } - - removePaths(terminal, indexPaths); + removePaths(terminal, indexPaths); // clean-up shard dirs + // clean-up all metadata dirs + MetaDataStateFormat.deleteMetaState(dataPaths); + IOUtils.rm(Stream.of(dataPaths).map(path -> path.resolve(INDICES_FOLDER)).toArray(Path[]::new)); terminal.println("Node successfully repurposed to no-master and no-data."); } - private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths) throws IOException { + private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { NodeEnvironment.NodePath[] nodePaths = toNodePaths(dataPaths); terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths"); @@ -130,9 +133,14 @@ private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths) throws return; } - Set indexPaths = uniqueParentPaths(shardDataPaths); - Set indexUUIDs = indexUUIDsFor(indexPaths); - outputVerboseInformation(terminal, nodePaths, shardDataPaths, indexUUIDs); + final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths); + + final MetaData metaData = loadClusterState(terminal, env, persistedClusterStateService).metaData(); + + final Set indexPaths = uniqueParentPaths(shardDataPaths); + final Set indexUUIDs = indexUUIDsFor(indexPaths); + + outputVerboseInformation(terminal, shardDataPaths, indexUUIDs, metaData); terminal.println(shardMessage(shardDataPaths.size(), indexUUIDs.size())); outputHowToSeeVerboseInformation(terminal); @@ -140,18 +148,22 @@ private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths) throws terminal.println("Node is being re-purposed as master and no-data. Clean-up of shard data will be performed."); confirm(terminal, "Do you want to proceed?"); - removePaths(terminal, shardDataPaths); + removePaths(terminal, shardDataPaths); // clean-up shard dirs terminal.println("Node successfully repurposed to master and no-data."); } - private void outputVerboseInformation(Terminal terminal, NodeEnvironment.NodePath[] nodePaths, - Collection pathsToCleanup, Set indexUUIDs) { + private ClusterState loadClusterState(Terminal terminal, Environment env, PersistedClusterStateService psf) throws IOException { + terminal.println(Terminal.Verbosity.VERBOSE, "Loading cluster state"); + return clusterState(env, psf.loadBestOnDiskState()); + } + + private void outputVerboseInformation(Terminal terminal, Collection pathsToCleanup, Set indexUUIDs, MetaData metaData) { if (terminal.isPrintable(Terminal.Verbosity.VERBOSE)) { terminal.println(Terminal.Verbosity.VERBOSE, "Paths to clean up:"); pathsToCleanup.forEach(p -> terminal.println(Terminal.Verbosity.VERBOSE, " " + p.toString())); terminal.println(Terminal.Verbosity.VERBOSE, "Indices affected:"); - indexUUIDs.forEach(uuid -> terminal.println(Terminal.Verbosity.VERBOSE, " " + toIndexName(nodePaths, uuid))); + indexUUIDs.forEach(uuid -> terminal.println(Terminal.Verbosity.VERBOSE, " " + toIndexName(uuid, metaData))); } } @@ -160,17 +172,15 @@ private void outputHowToSeeVerboseInformation(Terminal terminal) { terminal.println("Use -v to see list of paths and indices affected"); } } - private String toIndexName(NodeEnvironment.NodePath[] nodePaths, String uuid) { - Path[] indexPaths = new Path[nodePaths.length]; - for (int i = 0; i < nodePaths.length; i++) { - indexPaths[i] = nodePaths[i].resolve(uuid); - } - try { - IndexMetaData metaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, indexPaths); - return metaData.getIndex().getName(); - } catch (Exception e) { - return "no name for uuid: " + uuid + ": " + e; + private String toIndexName(String uuid, MetaData metaData) { + if (metaData != null) { + for (ObjectObjectCursor indexMetaData : metaData.indices()) { + if (indexMetaData.value.getIndexUUID().equals(uuid)) { + return indexMetaData.value.getIndex().getName(); + } + } } + return "no name for uuid: " + uuid; } private Set indexUUIDsFor(Set indexPaths) { @@ -186,23 +196,6 @@ static String shardMessage(int shards, int indices) { return "Found " + shards + " shards in " + indices + " indices to clean up"; } - private void rewriteManifest(Terminal terminal, Manifest manifest, Path[] dataPaths) throws WriteStateException { - terminal.println(Terminal.Verbosity.VERBOSE, "Re-writing manifest"); - Manifest newManifest = new Manifest(manifest.getCurrentTerm(), manifest.getClusterStateVersion(), manifest.getGlobalGeneration(), - new HashMap<>()); - Manifest.FORMAT.writeAndCleanup(newManifest, dataPaths); - } - - private Manifest loadManifest(Terminal terminal, Path[] dataPaths) throws IOException { - terminal.println(Terminal.Verbosity.VERBOSE, "Loading manifest"); - final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, dataPaths); - - if (manifest == null) { - terminal.println(Terminal.Verbosity.SILENT, PRE_V7_MESSAGE); - } - return manifest; - } - private void removePaths(Terminal terminal, Collection paths) { terminal.println(Terminal.Verbosity.VERBOSE, "Removing data"); paths.forEach(this::removePath); diff --git a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java index f50bdf081ef85..52c2a9cb366d5 100644 --- a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java +++ b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java @@ -19,21 +19,18 @@ package org.elasticsearch.env; import joptsimple.OptionParser; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import joptsimple.OptionSet; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cluster.coordination.ElasticsearchNodeCommand; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.gateway.PersistedClusterStateService; import java.io.IOException; import java.nio.file.Path; import java.util.Arrays; public class OverrideNodeVersionCommand extends ElasticsearchNodeCommand { - private static final Logger logger = LogManager.getLogger(OverrideNodeVersionCommand.class); - private static final String TOO_NEW_MESSAGE = DELIMITER + "\n" + @@ -72,10 +69,9 @@ public OverrideNodeVersionCommand() { } @Override - protected void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { + protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException { final Path[] nodePaths = Arrays.stream(toNodePaths(dataPaths)).map(p -> p.path).toArray(Path[]::new); - final NodeMetaData nodeMetaData - = new NodeMetaData.NodeMetaDataStateFormat(true).loadLatestState(logger, NamedXContentRegistry.EMPTY, nodePaths); + final NodeMetaData nodeMetaData = PersistedClusterStateService.nodeMetaData(nodePaths); if (nodeMetaData == null) { throw new ElasticsearchException(NO_METADATA_MESSAGE); } @@ -93,7 +89,7 @@ protected void processNodePaths(Terminal terminal, Path[] dataPaths, Environment .replace("V_NEW", nodeMetaData.nodeVersion().toString()) .replace("V_CUR", Version.CURRENT.toString())); - NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeMetaData.nodeId(), Version.CURRENT), nodePaths); + PersistedClusterStateService.overrideVersion(Version.CURRENT, dataPaths); terminal.println(SUCCESS_MESSAGE); } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index c60f5d6b7d40c..6fdc0b43aaa2d 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -22,14 +22,14 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateApplier; +import org.elasticsearch.cluster.coordination.CoordinationMetaData; import org.elasticsearch.cluster.coordination.CoordinationState.PersistedState; import org.elasticsearch.cluster.coordination.InMemoryPersistedState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -42,19 +42,32 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.Index; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.NodeMetaData; +import org.elasticsearch.node.Node; import org.elasticsearch.plugins.MetaDataUpgrader; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.Closeable; import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.UnaryOperator; +import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; + /** * Loads (and maybe upgrades) cluster metadata at startup, and persistently stores cluster metadata for future restarts. * @@ -63,8 +76,7 @@ * ClusterState#metaData()} because it might be stale or incomplete. Master-eligible nodes must perform an election to find a complete and * non-stale state, and master-ineligible nodes receive the real cluster state from the elected master after joining the cluster. */ -public class GatewayMetaState { - private static final Logger logger = LogManager.getLogger(GatewayMetaState.class); +public class GatewayMetaState implements Closeable { // Set by calling start() private final SetOnce persistedState = new SetOnce<>(); @@ -81,49 +93,83 @@ public MetaData getMetaData() { public void start(Settings settings, TransportService transportService, ClusterService clusterService, MetaStateService metaStateService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, - MetaDataUpgrader metaDataUpgrader) { + MetaDataUpgrader metaDataUpgrader, PersistedClusterStateService persistedClusterStateService) { assert persistedState.get() == null : "should only start once, but already have " + persistedState.get(); - final Tuple manifestClusterStateTuple; - try { - upgradeMetaData(settings, metaStateService, metaDataIndexUpgradeService, metaDataUpgrader); - manifestClusterStateTuple = loadStateAndManifest(ClusterName.CLUSTER_NAME_SETTING.get(settings), metaStateService); - } catch (IOException e) { - throw new ElasticsearchException("failed to load metadata", e); - } + if (DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings)) { + try { + final PersistedClusterStateService.OnDiskState onDiskState = persistedClusterStateService.loadBestOnDiskState(); + + MetaData metaData = onDiskState.metaData; + long lastAcceptedVersion = onDiskState.lastAcceptedVersion; + long currentTerm = onDiskState.currentTerm; + + if (onDiskState.empty()) { + assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : + "legacy metadata loader is not needed anymore from v9 onwards"; + final Tuple legacyState = metaStateService.loadFullState(); + if (legacyState.v1().isEmpty() == false) { + metaData = legacyState.v2(); + lastAcceptedVersion = legacyState.v1().getClusterStateVersion(); + currentTerm = legacyState.v1().getCurrentTerm(); + } + } - final IncrementalClusterStateWriter incrementalClusterStateWriter - = new IncrementalClusterStateWriter(settings, clusterService.getClusterSettings(), metaStateService, - manifestClusterStateTuple.v1(), - prepareInitialClusterState(transportService, clusterService, manifestClusterStateTuple.v2()), - transportService.getThreadPool()::relativeTimeInMillis); - if (DiscoveryNode.isMasterNode(settings) == false) { - if (DiscoveryNode.isDataNode(settings)) { - // Master-eligible nodes persist index metadata for all indices regardless of whether they hold any shards or not. It's - // vitally important to the safety of the cluster coordination system that master-eligible nodes persist this metadata when - // _accepting_ the cluster state (i.e. before it is committed). This persistence happens on the generic threadpool. - // - // In contrast, master-ineligible data nodes only persist the index metadata for shards that they hold. When all shards of - // an index are moved off such a node the IndicesStore is responsible for removing the corresponding index directory, - // including the metadata, and does so on the cluster applier thread. - // - // This presents a problem: if a shard is unassigned from a node and then reassigned back to it again then there is a race - // between the IndicesStore deleting the index folder and the CoordinationState concurrently trying to write the updated - // metadata into it. We could probably solve this with careful synchronization, but in fact there is no need. The persisted - // state on master-ineligible data nodes is mostly ignored - it's only there to support dangling index imports, which is - // inherently unsafe anyway. Thus we can safely delay metadata writes on master-ineligible data nodes until applying the - // cluster state, which is what this does: - clusterService.addLowPriorityApplier(new GatewayClusterApplier(incrementalClusterStateWriter)); - } + PersistedState persistedState = null; + boolean success = false; + try { + final ClusterState clusterState = prepareInitialClusterState(transportService, clusterService, + ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) + .version(lastAcceptedVersion) + .metaData(upgradeMetaDataForNode(metaData, metaDataIndexUpgradeService, metaDataUpgrader)) + .build()); + if (DiscoveryNode.isMasterNode(settings)) { + persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState); + } else { + persistedState = new AsyncLucenePersistedState(settings, transportService.getThreadPool(), + new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState)); + } + if (DiscoveryNode.isDataNode(settings)) { + metaStateService.unreferenceAll(); // unreference legacy files (only keep them for dangling indices functionality) + } else { + metaStateService.deleteAll(); // delete legacy files + } + // write legacy node metadata to prevent accidental downgrades from spawning empty cluster state + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(persistedClusterStateService.getNodeId(), Version.CURRENT), + persistedClusterStateService.getDataPaths()); + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(persistedState); + } + } - // Master-ineligible nodes do not need to persist the cluster state when accepting it because they are not in the voting - // configuration, so it's ok if they have a stale or incomplete cluster state when restarted. We track the latest cluster state - // in memory instead. - persistedState.set(new InMemoryPersistedState(manifestClusterStateTuple.v1().getCurrentTerm(), manifestClusterStateTuple.v2())); + this.persistedState.set(persistedState); + } catch (IOException e) { + throw new ElasticsearchException("failed to load metadata", e); + } } else { - // Master-ineligible nodes must persist the cluster state when accepting it because they must reload the (complete, fresh) - // last-accepted cluster state when restarted. - persistedState.set(new GatewayPersistedState(incrementalClusterStateWriter)); + final long currentTerm = 0L; + final ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)).build(); + if (persistedClusterStateService.getDataPaths().length > 0) { + // write empty cluster state just so that we have a persistent node id. There is no need to write out global metadata with + // cluster uuid as coordinating-only nodes do not snap into a cluster as they carry no state + try (PersistedClusterStateService.Writer persistenceWriter = persistedClusterStateService.createWriter()) { + persistenceWriter.writeFullStateAndCommit(currentTerm, clusterState); + } catch (IOException e) { + throw new ElasticsearchException("failed to load metadata", e); + } + try { + // delete legacy cluster state files + metaStateService.deleteAll(); + // write legacy node metadata to prevent downgrades from spawning empty cluster state + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(persistedClusterStateService.getNodeId(), Version.CURRENT), + persistedClusterStateService.getDataPaths()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + persistedState.set(new InMemoryPersistedState(currentTerm, clusterState)); } } @@ -140,67 +186,10 @@ ClusterState prepareInitialClusterState(TransportService transportService, Clust } // exposed so it can be overridden by tests - void upgradeMetaData(Settings settings, MetaStateService metaStateService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, - MetaDataUpgrader metaDataUpgrader) throws IOException { - if (isMasterOrDataNode(settings)) { - try { - final Tuple metaStateAndData = metaStateService.loadFullState(); - final Manifest manifest = metaStateAndData.v1(); - final MetaData metaData = metaStateAndData.v2(); - - // We finished global state validation and successfully checked all indices for backward compatibility - // and found no non-upgradable indices, which means the upgrade can continue. - // Now it's safe to overwrite global and index metadata. - // We don't re-write metadata if it's not upgraded by upgrade plugins, because - // if there is manifest file, it means metadata is properly persisted to all data paths - // if there is no manifest file (upgrade from 6.x to 7.x) metadata might be missing on some data paths, - // but anyway we will re-write it as soon as we receive first ClusterState - final IncrementalClusterStateWriter.AtomicClusterStateWriter writer - = new IncrementalClusterStateWriter.AtomicClusterStateWriter(metaStateService, manifest); - final MetaData upgradedMetaData = upgradeMetaData(metaData, metaDataIndexUpgradeService, metaDataUpgrader); - - final long globalStateGeneration; - if (MetaData.isGlobalStateEquals(metaData, upgradedMetaData) == false) { - globalStateGeneration = writer.writeGlobalState("upgrade", upgradedMetaData); - } else { - globalStateGeneration = manifest.getGlobalGeneration(); - } - - Map indices = new HashMap<>(manifest.getIndexGenerations()); - for (IndexMetaData indexMetaData : upgradedMetaData) { - if (metaData.hasIndexMetaData(indexMetaData) == false) { - final long generation = writer.writeIndex("upgrade", indexMetaData); - indices.put(indexMetaData.getIndex(), generation); - } - } - - final Manifest newManifest = new Manifest(manifest.getCurrentTerm(), manifest.getClusterStateVersion(), - globalStateGeneration, indices); - writer.writeManifestAndCleanup("startup", newManifest); - } catch (Exception e) { - logger.error("failed to read or upgrade local state, exiting...", e); - throw e; - } - } - } - - private static Tuple loadStateAndManifest(ClusterName clusterName, - MetaStateService metaStateService) throws IOException { - final long startNS = System.nanoTime(); - final Tuple manifestAndMetaData = metaStateService.loadFullState(); - final Manifest manifest = manifestAndMetaData.v1(); - - final ClusterState clusterState = ClusterState.builder(clusterName) - .version(manifest.getClusterStateVersion()) - .metaData(manifestAndMetaData.v2()).build(); - - logger.debug("took {} to load state", TimeValue.timeValueMillis(TimeValue.nsecToMSec(System.nanoTime() - startNS))); - - return Tuple.tuple(manifest, clusterState); - } - - private static boolean isMasterOrDataNode(Settings settings) { - return DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings); + MetaData upgradeMetaDataForNode(MetaData metaData, + MetaDataIndexUpgradeService metaDataIndexUpgradeService, + MetaDataUpgrader metaDataUpgrader) { + return upgradeMetaData(metaData, metaDataIndexUpgradeService, metaDataUpgrader); } /** @@ -252,81 +241,274 @@ private static boolean applyPluginUpgraders(ImmutableOpenMap incrementalClusterStateWriter.getPreviousManifest().getCurrentTerm()) { - incrementalClusterStateWriter.setCurrentTerm(event.state().term()); + @Override + public void setLastAcceptedState(ClusterState clusterState) { + synchronized (mutex) { + super.setLastAcceptedState(clusterState); + if (newStateQueued) { + logger.trace("cluster state update already queued (setting cluster state to {})", clusterState.version()); + } else { + logger.trace("queuing cluster state update (setting cluster state to {})", clusterState.version()); + newStateQueued = true; + if (newCurrentTermQueued == false) { + scheduleUpdate(); + } } - - incrementalClusterStateWriter.updateClusterState(event.state()); - incrementalClusterStateWriter.setIncrementalWrite(true); - } catch (WriteStateException e) { - logger.warn("Exception occurred when storing new meta data", e); } } - } + private void scheduleUpdate() { + assert Thread.holdsLock(mutex); + assert threadPoolExecutor.getQueue().isEmpty() : "threadPoolExecutor queue not empty"; + threadPoolExecutor.execute(new AbstractRunnable() { - private static class GatewayPersistedState implements PersistedState { + @Override + public void onFailure(Exception e) { + logger.error("Exception occurred when storing new meta data", e); + } + + @Override + public void onRejection(Exception e) { + assert threadPoolExecutor.isShutdown() : "only expect rejections when shutting down"; + } + + @Override + protected void doRun() { + final Long term; + final ClusterState clusterState; + synchronized (mutex) { + if (newCurrentTermQueued) { + term = getCurrentTerm(); + logger.trace("resetting newCurrentTermQueued"); + newCurrentTermQueued = false; + } else { + term = null; + } + if (newStateQueued) { + clusterState = getLastAcceptedState(); + logger.trace("resetting newStateQueued"); + newStateQueued = false; + } else { + clusterState = null; + } + } + // write current term before last accepted state so that it is never below term in last accepted state + if (term != null) { + persistedState.setCurrentTerm(term); + } + if (clusterState != null) { + persistedState.setLastAcceptedState(resetVotingConfiguration(clusterState)); + } + } + }); + } - private final IncrementalClusterStateWriter incrementalClusterStateWriter; + static final CoordinationMetaData.VotingConfiguration staleStateConfiguration = + new CoordinationMetaData.VotingConfiguration(Collections.singleton("STALE_STATE_CONFIG")); - GatewayPersistedState(IncrementalClusterStateWriter incrementalClusterStateWriter) { - this.incrementalClusterStateWriter = incrementalClusterStateWriter; + static ClusterState resetVotingConfiguration(ClusterState clusterState) { + CoordinationMetaData newCoordinationMetaData = CoordinationMetaData.builder(clusterState.coordinationMetaData()) + .lastAcceptedConfiguration(staleStateConfiguration) + .lastCommittedConfiguration(staleStateConfiguration) + .build(); + return ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()) + .coordinationMetaData(newCoordinationMetaData).build()).build(); + } + + @Override + public void close() throws IOException { + try { + ThreadPool.terminate(threadPoolExecutor, 10, TimeUnit.SECONDS); + } finally { + persistedState.close(); + } + } + + boolean allPendingAsyncStatesWritten() { + synchronized (mutex) { + if (newCurrentTermQueued || newStateQueued) { + return false; + } + return threadPoolExecutor.getActiveCount() == 0; + } + } + } + + /** + * Encapsulates the incremental writing of metadata to a {@link PersistedClusterStateService.Writer}. + */ + static class LucenePersistedState implements PersistedState { + + private long currentTerm; + private ClusterState lastAcceptedState; + private final PersistedClusterStateService persistedClusterStateService; + + // As the close method can be concurrently called to the other PersistedState methods, this class has extra protection in place. + private final AtomicReference persistenceWriter = new AtomicReference<>(); + boolean writeNextStateFully; + + LucenePersistedState(PersistedClusterStateService persistedClusterStateService, long currentTerm, ClusterState lastAcceptedState) + throws IOException { + this.persistedClusterStateService = persistedClusterStateService; + this.currentTerm = currentTerm; + this.lastAcceptedState = lastAcceptedState; + // Write the whole state out to be sure it's fresh and using the latest format. Called during initialisation, so that + // (1) throwing an IOException is enough to halt the node, and + // (2) the index is currently empty since it was opened with IndexWriterConfig.OpenMode.CREATE + + // In the common case it's actually sufficient to commit() the existing state and not do any indexing. For instance, + // this is true if there's only one data path on this master node, and the commit we just loaded was already written out + // by this version of Elasticsearch. TODO TBD should we avoid indexing when possible? + final PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter(); + try { + writer.writeFullStateAndCommit(currentTerm, lastAcceptedState); + } catch (Exception e) { + try { + writer.close(); + } catch (Exception e2) { + e.addSuppressed(e2); + } + throw e; + } + persistenceWriter.set(writer); } @Override public long getCurrentTerm() { - return incrementalClusterStateWriter.getPreviousManifest().getCurrentTerm(); + return currentTerm; } @Override public ClusterState getLastAcceptedState() { - final ClusterState previousClusterState = incrementalClusterStateWriter.getPreviousClusterState(); - assert previousClusterState.nodes().getLocalNode() != null : "Cluster state is not fully built yet"; - return previousClusterState; + return lastAcceptedState; } @Override public void setCurrentTerm(long currentTerm) { try { - incrementalClusterStateWriter.setCurrentTerm(currentTerm); - } catch (WriteStateException e) { - logger.error(new ParameterizedMessage("Failed to set current term to {}", currentTerm), e); - e.rethrowAsErrorOrUncheckedException(); + if (writeNextStateFully) { + getWriterSafe().writeFullStateAndCommit(currentTerm, lastAcceptedState); + writeNextStateFully = false; + } else { + getWriterSafe().commit(currentTerm, lastAcceptedState.version()); + } + } catch (Exception e) { + handleExceptionOnWrite(e); } + this.currentTerm = currentTerm; } @Override public void setLastAcceptedState(ClusterState clusterState) { try { - incrementalClusterStateWriter.setIncrementalWrite( - incrementalClusterStateWriter.getPreviousClusterState().term() == clusterState.term()); - incrementalClusterStateWriter.updateClusterState(clusterState); - } catch (WriteStateException e) { - logger.error(new ParameterizedMessage("Failed to set last accepted state with version {}", clusterState.version()), e); - e.rethrowAsErrorOrUncheckedException(); + if (writeNextStateFully) { + getWriterSafe().writeFullStateAndCommit(currentTerm, clusterState); + writeNextStateFully = false; + } else { + if (clusterState.term() != lastAcceptedState.term()) { + assert clusterState.term() > lastAcceptedState.term() : clusterState.term() + " vs " + lastAcceptedState.term(); + // In a new currentTerm, we cannot compare the persisted metadata's lastAcceptedVersion to those in the new state, + // so it's simplest to write everything again. + getWriterSafe().writeFullStateAndCommit(currentTerm, clusterState); + } else { + // Within the same currentTerm, we _can_ use metadata versions to skip unnecessary writing. + getWriterSafe().writeIncrementalStateAndCommit(currentTerm, lastAcceptedState, clusterState); + } + } + } catch (Exception e) { + handleExceptionOnWrite(e); } + + lastAcceptedState = clusterState; } - } + private PersistedClusterStateService.Writer getWriterSafe() { + final PersistedClusterStateService.Writer writer = persistenceWriter.get(); + if (writer == null) { + throw new AlreadyClosedException("persisted state has been closed"); + } + if (writer.isOpen()) { + return writer; + } else { + try { + final PersistedClusterStateService.Writer newWriter = persistedClusterStateService.createWriter(); + if (persistenceWriter.compareAndSet(writer, newWriter)) { + return newWriter; + } else { + assert persistenceWriter.get() == null : "expected no concurrent calls to getWriterSafe"; + newWriter.close(); + throw new AlreadyClosedException("persisted state has been closed"); + } + } catch (Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } + } + } + + private void handleExceptionOnWrite(Exception e) { + writeNextStateFully = true; + throw ExceptionsHelper.convertToRuntime(e); + } + @Override + public void close() throws IOException { + IOUtils.close(persistenceWriter.getAndSet(null)); + } + } } diff --git a/server/src/main/java/org/elasticsearch/gateway/IncrementalClusterStateWriter.java b/server/src/main/java/org/elasticsearch/gateway/IncrementalClusterStateWriter.java index eb0d243d74fab..dea3c629450a3 100644 --- a/server/src/main/java/org/elasticsearch/gateway/IncrementalClusterStateWriter.java +++ b/server/src/main/java/org/elasticsearch/gateway/IncrementalClusterStateWriter.java @@ -27,13 +27,11 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -48,14 +46,9 @@ public class IncrementalClusterStateWriter { private static final Logger logger = LogManager.getLogger(IncrementalClusterStateWriter.class); - public static final Setting SLOW_WRITE_LOGGING_THRESHOLD = Setting.timeSetting("gateway.slow_write_logging_threshold", - TimeValue.timeValueSeconds(10), TimeValue.ZERO, Setting.Property.NodeScope, Setting.Property.Dynamic); - private final MetaStateService metaStateService; - // On master-eligible nodes we call updateClusterState under the Coordinator's mutex; on master-ineligible data nodes we call - // updateClusterState on the (unique) cluster applier thread; on other nodes we never call updateClusterState. In all cases there's - // no need to synchronize access to these fields. + // We call updateClusterState on the (unique) cluster applier thread so there's no need to synchronize access to these fields. private Manifest previousManifest; private ClusterState previousClusterState; private final LongSupplier relativeTimeMillisSupplier; @@ -70,8 +63,9 @@ public class IncrementalClusterStateWriter { this.previousClusterState = clusterState; this.relativeTimeMillisSupplier = relativeTimeMillisSupplier; this.incrementalWrite = false; - this.slowWriteLoggingThreshold = SLOW_WRITE_LOGGING_THRESHOLD.get(settings); - clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold); + this.slowWriteLoggingThreshold = PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.get(settings); + clusterSettings.addSettingsUpdateConsumer(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD, + this::setSlowWriteLoggingThreshold); } private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { @@ -89,10 +83,6 @@ Manifest getPreviousManifest() { return previousManifest; } - ClusterState getPreviousClusterState() { - return previousClusterState; - } - void setIncrementalWrite(boolean incrementalWrite) { this.incrementalWrite = incrementalWrite; } @@ -206,38 +196,20 @@ static List resolveIndexMetaDataActions(Map pr return actions; } - private static Set getRelevantIndicesOnDataOnlyNode(ClusterState state) { - RoutingNode newRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); + // exposed for tests + static Set getRelevantIndices(ClusterState state) { + assert state.nodes().getLocalNode().isDataNode(); + final RoutingNode newRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); if (newRoutingNode == null) { throw new IllegalStateException("cluster state does not contain this node - cannot write index meta state"); } - Set indices = new HashSet<>(); - for (ShardRouting routing : newRoutingNode) { + final Set indices = new HashSet<>(); + for (final ShardRouting routing : newRoutingNode) { indices.add(routing.index()); } return indices; } - private static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) { - Set relevantIndices = new HashSet<>(); - // we have to iterate over the metadata to make sure we also capture closed indices - for (IndexMetaData indexMetaData : state.metaData()) { - relevantIndices.add(indexMetaData.getIndex()); - } - return relevantIndices; - } - - // exposed for tests - static Set getRelevantIndices(ClusterState state) { - if (state.nodes().getLocalNode().isMasterNode()) { - return getRelevantIndicesForMasterEligibleNode(state); - } else if (state.nodes().getLocalNode().isDataNode()) { - return getRelevantIndicesOnDataOnlyNode(state); - } else { - return Collections.emptySet(); - } - } - /** * Action to perform with index metadata. */ diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 3ce3f8918a190..0fb631813cbb2 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -68,7 +68,7 @@ public MetaStateService(NodeEnvironment nodeEnv, NamedXContentRegistry namedXCon * meta state with globalGeneration -1 and empty meta data is returned. * @throws IOException if some IOException when loading files occurs or there is no metadata referenced by manifest file. */ - Tuple loadFullState() throws IOException { + public Tuple loadFullState() throws IOException { final Manifest manifest = MANIFEST_FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); if (manifest == null) { return loadFullStateBWC(); @@ -184,17 +184,6 @@ List loadIndicesStates(Predicate excludeIndexPathIdsPredi return indexMetaDataList; } - /** - * Loads Manifest file from disk, returns Manifest.empty() if there is no manifest file. - */ - public Manifest loadManifestOrEmpty() throws IOException { - Manifest manifest = MANIFEST_FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); - if (manifest == null) { - manifest = Manifest.empty(); - } - return manifest; - } - /** * Loads the global state, *without* index state, see {@link #loadFullState()} for that. */ @@ -276,28 +265,26 @@ public void cleanupIndex(Index index, long currentGeneration) { } /** - * Writes index metadata and updates manifest file accordingly. - * Used by tests. + * Creates empty cluster state file on disk, deleting global metadata and unreferencing all index metadata + * (only used for dangling indices at that point). */ - public void writeIndexAndUpdateManifest(String reason, IndexMetaData metaData) throws IOException { - long generation = writeIndex(reason, metaData); - Manifest manifest = loadManifestOrEmpty(); - Map indices = new HashMap<>(manifest.getIndexGenerations()); - indices.put(metaData.getIndex(), generation); - manifest = new Manifest(manifest.getCurrentTerm(), manifest.getClusterStateVersion(), manifest.getGlobalGeneration(), indices); - writeManifestAndCleanup(reason, manifest); - cleanupIndex(metaData.getIndex(), generation); + public void unreferenceAll() throws IOException { + MANIFEST_FORMAT.writeAndCleanup(Manifest.empty(), nodeEnv.nodeDataPaths()); // write empty file so that indices become unreferenced + META_DATA_FORMAT.cleanupOldFiles(Long.MAX_VALUE, nodeEnv.nodeDataPaths()); } /** - * Writes global metadata and updates manifest file accordingly. - * Used by tests. + * Removes manifest file, global metadata and all index metadata */ - public void writeGlobalStateAndUpdateManifest(String reason, MetaData metaData) throws IOException { - long generation = writeGlobalState(reason, metaData); - Manifest manifest = loadManifestOrEmpty(); - manifest = new Manifest(manifest.getCurrentTerm(), manifest.getClusterStateVersion(), generation, manifest.getIndexGenerations()); - writeManifestAndCleanup(reason, manifest); - cleanupGlobalState(generation); + public void deleteAll() throws IOException { + // To ensure that the metadata is never reimported by loadFullStateBWC in case where the deletions here fail mid-way through, + // we first write an empty manifest file so that the indices become unreferenced, then clean up the indices, and only then delete + // the manifest file. + unreferenceAll(); + for (String indexFolderName : nodeEnv.availableIndexFolders()) { + // delete meta state directories of indices + MetaDataStateFormat.deleteMetaState(nodeEnv.resolveIndexFolder(indexFolderName)); + } + MANIFEST_FORMAT.cleanupOldFiles(Long.MAX_VALUE, nodeEnv.nodeDataPaths()); // finally delete manifest } } diff --git a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java new file mode 100644 index 0000000000000..ffef170754075 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java @@ -0,0 +1,842 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gateway; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.analysis.core.KeywordAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexNotFoundException; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SerialMergeScheduler; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.NodeMetaData; +import org.elasticsearch.index.Index; + +import java.io.Closeable; +import java.io.FilterOutputStream; +import java.io.IOError; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.IntPredicate; +import java.util.function.LongSupplier; +import java.util.function.Supplier; + +/** + * Stores cluster metadata in a bare Lucene index (per data path) split across a number of documents. This is used by master-eligible nodes + * to record the last-accepted cluster state during publication. The metadata is written incrementally where possible, leaving alone any + * documents that have not changed. The index has the following fields: + * + * +------------------------------+-----------------------------+----------------------------------------------+ + * | "type" (string field) | "index_uuid" (string field) | "data" (stored binary field in SMILE format) | + * +------------------------------+-----------------------------+----------------------------------------------+ + * | GLOBAL_TYPE_NAME == "global" | (omitted) | Global metadata | + * | INDEX_TYPE_NAME == "index" | Index UUID | Index metadata | + * +------------------------------+-----------------------------+----------------------------------------------+ + * + * Additionally each commit has the following user data: + * + * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ + * | Key symbol | Key literal | Value | + * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ + * | CURRENT_TERM_KEY | "current_term" | Node's "current" term (≥ last-accepted term and the terms of all sent joins) | + * | LAST_ACCEPTED_VERSION_KEY | "last_accepted_version" | The cluster state version corresponding with the persisted metadata | + * | NODE_ID_KEY | "node_id" | The (persistent) ID of the node that wrote this metadata | + * | NODE_VERSION_KEY | "node_version" | The (ID of the) version of the node that wrote this metadata | + * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ + * + * (the last-accepted term is recorded in MetaData → CoordinationMetaData so does not need repeating here) + */ +public class PersistedClusterStateService { + private static final Logger logger = LogManager.getLogger(PersistedClusterStateService.class); + private static final String CURRENT_TERM_KEY = "current_term"; + private static final String LAST_ACCEPTED_VERSION_KEY = "last_accepted_version"; + private static final String NODE_ID_KEY = "node_id"; + private static final String NODE_VERSION_KEY = "node_version"; + private static final String TYPE_FIELD_NAME = "type"; + private static final String DATA_FIELD_NAME = "data"; + private static final String GLOBAL_TYPE_NAME = "global"; + private static final String INDEX_TYPE_NAME = "index"; + private static final String INDEX_UUID_FIELD_NAME = "index_uuid"; + private static final int COMMIT_DATA_SIZE = 4; + + public static final String METADATA_DIRECTORY_NAME = MetaDataStateFormat.STATE_DIR_NAME; + + public static final Setting SLOW_WRITE_LOGGING_THRESHOLD = Setting.timeSetting("gateway.slow_write_logging_threshold", + TimeValue.timeValueSeconds(10), TimeValue.ZERO, Setting.Property.NodeScope, Setting.Property.Dynamic); + + private final Path[] dataPaths; + private final String nodeId; + private final NamedXContentRegistry namedXContentRegistry; + private final BigArrays bigArrays; + private final boolean preserveUnknownCustoms; + private final LongSupplier relativeTimeMillisSupplier; + + private volatile TimeValue slowWriteLoggingThreshold; + + public PersistedClusterStateService(NodeEnvironment nodeEnvironment, NamedXContentRegistry namedXContentRegistry, BigArrays bigArrays, + ClusterSettings clusterSettings, LongSupplier relativeTimeMillisSupplier) { + this(nodeEnvironment.nodeDataPaths(), nodeEnvironment.nodeId(), namedXContentRegistry, bigArrays, clusterSettings, + relativeTimeMillisSupplier, false); + } + + public PersistedClusterStateService(Path[] dataPaths, String nodeId, NamedXContentRegistry namedXContentRegistry, BigArrays bigArrays, + ClusterSettings clusterSettings, LongSupplier relativeTimeMillisSupplier, + boolean preserveUnknownCustoms) { + this.dataPaths = dataPaths; + this.nodeId = nodeId; + this.namedXContentRegistry = namedXContentRegistry; + this.bigArrays = bigArrays; + this.relativeTimeMillisSupplier = relativeTimeMillisSupplier; + this.preserveUnknownCustoms = preserveUnknownCustoms; + this.slowWriteLoggingThreshold = clusterSettings.get(SLOW_WRITE_LOGGING_THRESHOLD); + clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold); + } + + private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { + this.slowWriteLoggingThreshold = slowWriteLoggingThreshold; + } + + public String getNodeId() { + return nodeId; + } + + /** + * Creates a new disk-based writer for cluster states + */ + public Writer createWriter() throws IOException { + final List metaDataIndexWriters = new ArrayList<>(); + final List closeables = new ArrayList<>(); + boolean success = false; + try { + for (final Path path : dataPaths) { + final Directory directory = createDirectory(path.resolve(METADATA_DIRECTORY_NAME)); + closeables.add(directory); + + final IndexWriter indexWriter = createIndexWriter(directory, false); + closeables.add(indexWriter); + metaDataIndexWriters.add(new MetaDataIndexWriter(directory, indexWriter)); + } + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(closeables); + } + } + return new Writer(metaDataIndexWriters, nodeId, bigArrays, relativeTimeMillisSupplier, () -> slowWriteLoggingThreshold); + } + + private static IndexWriter createIndexWriter(Directory directory, boolean openExisting) throws IOException { + final IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new KeywordAnalyzer()); + // start empty since we re-write the whole cluster state to ensure it is all using the same format version + indexWriterConfig.setOpenMode(openExisting ? IndexWriterConfig.OpenMode.APPEND : IndexWriterConfig.OpenMode.CREATE); + // only commit when specifically instructed, we must not write any intermediate states + indexWriterConfig.setCommitOnClose(false); + // most of the data goes into stored fields which are not buffered, so we only really need a tiny buffer + indexWriterConfig.setRAMBufferSizeMB(1.0); + // merge on the write thread (e.g. while flushing) + indexWriterConfig.setMergeScheduler(new SerialMergeScheduler()); + + return new IndexWriter(directory, indexWriterConfig); + } + + /** + * Remove all persisted cluster states from the given data paths, for use in tests. Should only be called when there is no open + * {@link Writer} on these paths. + */ + public static void deleteAll(Path[] dataPaths) throws IOException { + for (Path dataPath : dataPaths) { + Lucene.cleanLuceneIndex(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME))); + } + } + + // exposed for tests + Directory createDirectory(Path path) throws IOException { + // it is possible to disable the use of MMapDirectory for indices, and it may be surprising to users that have done so if we still + // use a MMapDirectory here, which might happen with FSDirectory.open(path). Concurrency is of no concern here so a + // SimpleFSDirectory is fine: + return new SimpleFSDirectory(path); + } + + public Path[] getDataPaths() { + return dataPaths; + } + + public static class OnDiskState { + private static final OnDiskState NO_ON_DISK_STATE = new OnDiskState(null, null, 0L, 0L, MetaData.EMPTY_META_DATA); + + private final String nodeId; + private final Path dataPath; + public final long currentTerm; + public final long lastAcceptedVersion; + public final MetaData metaData; + + private OnDiskState(String nodeId, Path dataPath, long currentTerm, long lastAcceptedVersion, MetaData metaData) { + this.nodeId = nodeId; + this.dataPath = dataPath; + this.currentTerm = currentTerm; + this.lastAcceptedVersion = lastAcceptedVersion; + this.metaData = metaData; + } + + public boolean empty() { + return this == NO_ON_DISK_STATE; + } + } + + /** + * Returns the node metadata for the given data paths, and checks if the node ids are unique + * @param dataPaths the data paths to scan + */ + @Nullable + public static NodeMetaData nodeMetaData(Path... dataPaths) throws IOException { + String nodeId = null; + Version version = null; + for (final Path dataPath : dataPaths) { + final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME); + if (Files.exists(indexPath)) { + try (DirectoryReader reader = DirectoryReader.open(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)))) { + final Map userData = reader.getIndexCommit().getUserData(); + assert userData.get(NODE_VERSION_KEY) != null; + + final String thisNodeId = userData.get(NODE_ID_KEY); + assert thisNodeId != null; + if (nodeId != null && nodeId.equals(thisNodeId) == false) { + throw new IllegalStateException("unexpected node ID in metadata, found [" + thisNodeId + + "] in [" + dataPath + "] but expected [" + nodeId + "]"); + } else if (nodeId == null) { + nodeId = thisNodeId; + version = Version.fromId(Integer.parseInt(userData.get(NODE_VERSION_KEY))); + } + } catch (IndexNotFoundException e) { + logger.debug(new ParameterizedMessage("no on-disk state at {}", indexPath), e); + } + } + } + if (nodeId == null) { + return null; + } + return new NodeMetaData(nodeId, version); + } + + /** + * Overrides the version field for the metadata in the given data path + */ + public static void overrideVersion(Version newVersion, Path... dataPaths) throws IOException { + for (final Path dataPath : dataPaths) { + final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME); + if (Files.exists(indexPath)) { + try (DirectoryReader reader = DirectoryReader.open(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)))) { + final Map userData = reader.getIndexCommit().getUserData(); + assert userData.get(NODE_VERSION_KEY) != null; + + try (IndexWriter indexWriter = + createIndexWriter(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)), true)) { + final Map commitData = new HashMap<>(userData); + commitData.put(NODE_VERSION_KEY, Integer.toString(newVersion.id)); + indexWriter.setLiveCommitData(commitData.entrySet()); + indexWriter.commit(); + } + } catch (IndexNotFoundException e) { + logger.debug(new ParameterizedMessage("no on-disk state at {}", indexPath), e); + } + } + } + } + + /** + * Loads the best available on-disk cluster state. Returns {@link OnDiskState#NO_ON_DISK_STATE} if no such state was found. + */ + public OnDiskState loadBestOnDiskState() throws IOException { + String committedClusterUuid = null; + Path committedClusterUuidPath = null; + OnDiskState bestOnDiskState = OnDiskState.NO_ON_DISK_STATE; + OnDiskState maxCurrentTermOnDiskState = bestOnDiskState; + + // We use a write-all-read-one strategy: metadata is written to every data path when accepting it, which means it is mostly + // sufficient to read _any_ copy. "Mostly" sufficient because the user can change the set of data paths when restarting, and may + // add a data path containing a stale copy of the metadata. We deal with this by using the freshest copy we can find. + for (final Path dataPath : dataPaths) { + final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME); + if (Files.exists(indexPath)) { + try (Directory directory = createDirectory(indexPath); + DirectoryReader directoryReader = DirectoryReader.open(directory)) { + final OnDiskState onDiskState = loadOnDiskState(dataPath, directoryReader); + + if (nodeId.equals(onDiskState.nodeId) == false) { + throw new IllegalStateException("unexpected node ID in metadata, found [" + onDiskState.nodeId + + "] in [" + dataPath + "] but expected [" + nodeId + "]"); + } + + if (onDiskState.metaData.clusterUUIDCommitted()) { + if (committedClusterUuid == null) { + committedClusterUuid = onDiskState.metaData.clusterUUID(); + committedClusterUuidPath = dataPath; + } else if (committedClusterUuid.equals(onDiskState.metaData.clusterUUID()) == false) { + throw new IllegalStateException("mismatched cluster UUIDs in metadata, found [" + committedClusterUuid + + "] in [" + committedClusterUuidPath + "] and [" + onDiskState.metaData.clusterUUID() + "] in [" + + dataPath + "]"); + } + } + + if (maxCurrentTermOnDiskState.empty() || maxCurrentTermOnDiskState.currentTerm < onDiskState.currentTerm) { + maxCurrentTermOnDiskState = onDiskState; + } + + long acceptedTerm = onDiskState.metaData.coordinationMetaData().term(); + long maxAcceptedTerm = bestOnDiskState.metaData.coordinationMetaData().term(); + if (bestOnDiskState.empty() + || acceptedTerm > maxAcceptedTerm + || (acceptedTerm == maxAcceptedTerm + && (onDiskState.lastAcceptedVersion > bestOnDiskState.lastAcceptedVersion + || (onDiskState.lastAcceptedVersion == bestOnDiskState.lastAcceptedVersion) + && onDiskState.currentTerm > bestOnDiskState.currentTerm))) { + bestOnDiskState = onDiskState; + } + } catch (IndexNotFoundException e) { + logger.debug(new ParameterizedMessage("no on-disk state at {}", indexPath), e); + } + } + } + + if (bestOnDiskState.currentTerm != maxCurrentTermOnDiskState.currentTerm) { + throw new IllegalStateException("inconsistent terms found: best state is from [" + bestOnDiskState.dataPath + + "] in term [" + bestOnDiskState.currentTerm + "] but there is a stale state in [" + maxCurrentTermOnDiskState.dataPath + + "] with greater term [" + maxCurrentTermOnDiskState.currentTerm + "]"); + } + + return bestOnDiskState; + } + + private OnDiskState loadOnDiskState(Path dataPath, DirectoryReader reader) throws IOException { + final IndexSearcher searcher = new IndexSearcher(reader); + searcher.setQueryCache(null); + + final SetOnce builderReference = new SetOnce<>(); + consumeFromType(searcher, GLOBAL_TYPE_NAME, bytes -> + { + final MetaData metaData = MetaData.Builder.fromXContent(XContentFactory.xContent(XContentType.SMILE) + .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes.bytes, bytes.offset, bytes.length), + preserveUnknownCustoms); + logger.trace("found global metadata with last-accepted term [{}]", metaData.coordinationMetaData().term()); + if (builderReference.get() != null) { + throw new IllegalStateException("duplicate global metadata found in [" + dataPath + "]"); + } + builderReference.set(MetaData.builder(metaData)); + }); + + final MetaData.Builder builder = builderReference.get(); + if (builder == null) { + throw new IllegalStateException("no global metadata found in [" + dataPath + "]"); + } + + logger.trace("got global metadata, now reading index metadata"); + + final Set indexUUIDs = new HashSet<>(); + consumeFromType(searcher, INDEX_TYPE_NAME, bytes -> + { + final IndexMetaData indexMetaData = IndexMetaData.fromXContent(XContentFactory.xContent(XContentType.SMILE) + .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes.bytes, bytes.offset, bytes.length)); + logger.trace("found index metadata for {}", indexMetaData.getIndex()); + if (indexUUIDs.add(indexMetaData.getIndexUUID()) == false) { + throw new IllegalStateException("duplicate metadata found for " + indexMetaData.getIndex() + " in [" + dataPath + "]"); + } + builder.put(indexMetaData, false); + }); + + final Map userData = reader.getIndexCommit().getUserData(); + logger.trace("loaded metadata [{}] from [{}]", userData, reader.directory()); + assert userData.size() == COMMIT_DATA_SIZE : userData; + assert userData.get(CURRENT_TERM_KEY) != null; + assert userData.get(LAST_ACCEPTED_VERSION_KEY) != null; + assert userData.get(NODE_ID_KEY) != null; + assert userData.get(NODE_VERSION_KEY) != null; + return new OnDiskState(userData.get(NODE_ID_KEY), dataPath, Long.parseLong(userData.get(CURRENT_TERM_KEY)), + Long.parseLong(userData.get(LAST_ACCEPTED_VERSION_KEY)), builder.build()); + } + + private static void consumeFromType(IndexSearcher indexSearcher, String type, + CheckedConsumer bytesRefConsumer) throws IOException { + + final Query query = new TermQuery(new Term(TYPE_FIELD_NAME, type)); + final Weight weight = indexSearcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 0.0f); + logger.trace("running query [{}]", query); + + for (LeafReaderContext leafReaderContext : indexSearcher.getIndexReader().leaves()) { + logger.trace("new leafReaderContext: {}", leafReaderContext); + final Scorer scorer = weight.scorer(leafReaderContext); + if (scorer != null) { + final Bits liveDocs = leafReaderContext.reader().getLiveDocs(); + final IntPredicate isLiveDoc = liveDocs == null ? i -> true : liveDocs::get; + final DocIdSetIterator docIdSetIterator = scorer.iterator(); + while (docIdSetIterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { + if (isLiveDoc.test(docIdSetIterator.docID())) { + logger.trace("processing doc {}", docIdSetIterator.docID()); + bytesRefConsumer.accept( + leafReaderContext.reader().document(docIdSetIterator.docID()).getBinaryValue(DATA_FIELD_NAME)); + } + } + } + } + } + + private static final ToXContent.Params FORMAT_PARAMS; + + static { + Map params = new HashMap<>(2); + params.put("binary", "true"); + params.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY); + FORMAT_PARAMS = new ToXContent.MapParams(params); + } + + /** + * A {@link Document} with a stored field containing serialized metadata written to a {@link ReleasableBytesStreamOutput} which must be + * released when no longer needed. + */ + private static class ReleasableDocument implements Releasable { + private final Document document; + private final Releasable releasable; + + ReleasableDocument(Document document, Releasable releasable) { + this.document = document; + this.releasable = releasable; + } + + Document getDocument() { + return document; + } + + @Override + public void close() { + releasable.close(); + } + } + + /** + * Encapsulates a single {@link IndexWriter} with its {@link Directory} for ease of closing, and a {@link Logger}. There is one of these + * for each data path. + */ + private static class MetaDataIndexWriter implements Closeable { + + private final Logger logger; + private final Directory directory; + private final IndexWriter indexWriter; + + MetaDataIndexWriter(Directory directory, IndexWriter indexWriter) { + this.directory = directory; + this.indexWriter = indexWriter; + this.logger = Loggers.getLogger(MetaDataIndexWriter.class, directory.toString()); + } + + void deleteAll() throws IOException { + this.logger.trace("clearing existing metadata"); + this.indexWriter.deleteAll(); + } + + void updateIndexMetaDataDocument(Document indexMetaDataDocument, Index index) throws IOException { + this.logger.trace("updating metadata for [{}]", index); + indexWriter.updateDocument(new Term(INDEX_UUID_FIELD_NAME, index.getUUID()), indexMetaDataDocument); + } + + void updateGlobalMetaData(Document globalMetaDataDocument) throws IOException { + this.logger.trace("updating global metadata doc"); + indexWriter.updateDocument(new Term(TYPE_FIELD_NAME, GLOBAL_TYPE_NAME), globalMetaDataDocument); + } + + void deleteIndexMetaData(String indexUUID) throws IOException { + this.logger.trace("removing metadata for [{}]", indexUUID); + indexWriter.deleteDocuments(new Term(INDEX_UUID_FIELD_NAME, indexUUID)); + } + + void flush() throws IOException { + this.logger.trace("flushing"); + this.indexWriter.flush(); + } + + void prepareCommit(String nodeId, long currentTerm, long lastAcceptedVersion) throws IOException { + final Map commitData = new HashMap<>(COMMIT_DATA_SIZE); + commitData.put(CURRENT_TERM_KEY, Long.toString(currentTerm)); + commitData.put(LAST_ACCEPTED_VERSION_KEY, Long.toString(lastAcceptedVersion)); + commitData.put(NODE_VERSION_KEY, Integer.toString(Version.CURRENT.id)); + commitData.put(NODE_ID_KEY, nodeId); + indexWriter.setLiveCommitData(commitData.entrySet()); + indexWriter.prepareCommit(); + } + + void commit() throws IOException { + indexWriter.commit(); + } + + @Override + public void close() throws IOException { + IOUtils.close(indexWriter, directory); + } + } + + public static class Writer implements Closeable { + + private final List metaDataIndexWriters; + private final String nodeId; + private final BigArrays bigArrays; + private final LongSupplier relativeTimeMillisSupplier; + private final Supplier slowWriteLoggingThresholdSupplier; + + boolean fullStateWritten = false; + private final AtomicBoolean closed = new AtomicBoolean(); + + private Writer(List metaDataIndexWriters, String nodeId, BigArrays bigArrays, + LongSupplier relativeTimeMillisSupplier, Supplier slowWriteLoggingThresholdSupplier) { + this.metaDataIndexWriters = metaDataIndexWriters; + this.nodeId = nodeId; + this.bigArrays = bigArrays; + this.relativeTimeMillisSupplier = relativeTimeMillisSupplier; + this.slowWriteLoggingThresholdSupplier = slowWriteLoggingThresholdSupplier; + } + + private void ensureOpen() { + if (closed.get()) { + throw new AlreadyClosedException("cluster state writer is closed already"); + } + } + + public boolean isOpen() { + return closed.get() == false; + } + + private void closeIfAnyIndexWriterHasTragedyOrIsClosed() { + if (metaDataIndexWriters.stream().map(writer -> writer.indexWriter) + .anyMatch(iw -> iw.getTragicException() != null || iw.isOpen() == false)) { + try { + close(); + } catch (Exception e) { + logger.warn("failed on closing cluster state writer", e); + } + } + } + + /** + * Overrides and commits the given current term and cluster state + */ + public void writeFullStateAndCommit(long currentTerm, ClusterState clusterState) throws IOException { + ensureOpen(); + try { + final long startTimeMillis = relativeTimeMillisSupplier.getAsLong(); + final WriterStats stats = overwriteMetaData(clusterState.metaData()); + commit(currentTerm, clusterState.version()); + fullStateWritten = true; + final long durationMillis = relativeTimeMillisSupplier.getAsLong() - startTimeMillis; + final TimeValue finalSlowWriteLoggingThreshold = slowWriteLoggingThresholdSupplier.get(); + if (durationMillis >= finalSlowWriteLoggingThreshold.getMillis()) { + logger.warn("writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + + "wrote full state with [{}] indices", + durationMillis, finalSlowWriteLoggingThreshold, stats.numIndicesUpdated); + } else { + logger.debug("writing cluster state took [{}ms]; " + + "wrote full state with [{}] indices", + durationMillis, stats.numIndicesUpdated); + } + } finally { + closeIfAnyIndexWriterHasTragedyOrIsClosed(); + } + } + + /** + * Updates and commits the given cluster state update + */ + void writeIncrementalStateAndCommit(long currentTerm, ClusterState previousClusterState, + ClusterState clusterState) throws IOException { + ensureOpen(); + assert fullStateWritten : "Need to write full state first before doing incremental writes"; + try { + final long startTimeMillis = relativeTimeMillisSupplier.getAsLong(); + final WriterStats stats = updateMetaData(previousClusterState.metaData(), clusterState.metaData()); + commit(currentTerm, clusterState.version()); + final long durationMillis = relativeTimeMillisSupplier.getAsLong() - startTimeMillis; + final TimeValue finalSlowWriteLoggingThreshold = slowWriteLoggingThresholdSupplier.get(); + if (durationMillis >= finalSlowWriteLoggingThreshold.getMillis()) { + logger.warn("writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + + "wrote global metadata [{}] and metadata for [{}] indices and skipped [{}] unchanged indices", + durationMillis, finalSlowWriteLoggingThreshold, stats.globalMetaUpdated, stats.numIndicesUpdated, + stats.numIndicesUnchanged); + } else { + logger.debug("writing cluster state took [{}ms]; " + + "wrote global metadata [{}] and metadata for [{}] indices and skipped [{}] unchanged indices", + durationMillis, stats.globalMetaUpdated, stats.numIndicesUpdated, stats.numIndicesUnchanged); + } + } finally { + closeIfAnyIndexWriterHasTragedyOrIsClosed(); + } + } + + /** + * Update the persisted metadata to match the given cluster state by removing any stale or unnecessary documents and adding any + * updated documents. + */ + private WriterStats updateMetaData(MetaData previouslyWrittenMetaData, MetaData metaData) throws IOException { + assert previouslyWrittenMetaData.coordinationMetaData().term() == metaData.coordinationMetaData().term(); + logger.trace("currentTerm [{}] matches previous currentTerm, writing changes only", + metaData.coordinationMetaData().term()); + + final boolean updateGlobalMeta = MetaData.isGlobalStateEquals(previouslyWrittenMetaData, metaData) == false; + if (updateGlobalMeta) { + try (ReleasableDocument globalMetaDataDocument = makeGlobalMetaDataDocument(metaData)) { + for (MetaDataIndexWriter metaDataIndexWriter : metaDataIndexWriters) { + metaDataIndexWriter.updateGlobalMetaData(globalMetaDataDocument.getDocument()); + } + } + } + + final Map indexMetaDataVersionByUUID = new HashMap<>(previouslyWrittenMetaData.indices().size()); + for (ObjectCursor cursor : previouslyWrittenMetaData.indices().values()) { + final IndexMetaData indexMetaData = cursor.value; + final Long previousValue = indexMetaDataVersionByUUID.putIfAbsent(indexMetaData.getIndexUUID(), indexMetaData.getVersion()); + assert previousValue == null : indexMetaData.getIndexUUID() + " already mapped to " + previousValue; + } + + int numIndicesUpdated = 0; + int numIndicesUnchanged = 0; + for (ObjectCursor cursor : metaData.indices().values()) { + final IndexMetaData indexMetaData = cursor.value; + final Long previousVersion = indexMetaDataVersionByUUID.get(indexMetaData.getIndexUUID()); + if (previousVersion == null || indexMetaData.getVersion() != previousVersion) { + logger.trace("updating metadata for [{}], changing version from [{}] to [{}]", + indexMetaData.getIndex(), previousVersion, indexMetaData.getVersion()); + numIndicesUpdated++; + try (ReleasableDocument indexMetaDataDocument = makeIndexMetaDataDocument(indexMetaData)) { + for (MetaDataIndexWriter metaDataIndexWriter : metaDataIndexWriters) { + metaDataIndexWriter.updateIndexMetaDataDocument(indexMetaDataDocument.getDocument(), indexMetaData.getIndex()); + } + } + } else { + numIndicesUnchanged++; + logger.trace("no action required for [{}]", indexMetaData.getIndex()); + } + indexMetaDataVersionByUUID.remove(indexMetaData.getIndexUUID()); + } + + for (String removedIndexUUID : indexMetaDataVersionByUUID.keySet()) { + for (MetaDataIndexWriter metaDataIndexWriter : metaDataIndexWriters) { + metaDataIndexWriter.deleteIndexMetaData(removedIndexUUID); + } + } + + // Flush, to try and expose a failure (e.g. out of disk space) before committing, because we can handle a failure here more + // gracefully than one that occurs during the commit process. + for (MetaDataIndexWriter metaDataIndexWriter : metaDataIndexWriters) { + metaDataIndexWriter.flush(); + } + + return new WriterStats(updateGlobalMeta, numIndicesUpdated, numIndicesUnchanged); + } + + /** + * Update the persisted metadata to match the given cluster state by removing all existing documents and then adding new documents. + */ + private WriterStats overwriteMetaData(MetaData metaData) throws IOException { + for (MetaDataIndexWriter metaDataIndexWriter : metaDataIndexWriters) { + metaDataIndexWriter.deleteAll(); + } + return addMetaData(metaData); + } + + /** + * Add documents for the metadata of the given cluster state, assuming that there are currently no documents. + */ + private WriterStats addMetaData(MetaData metaData) throws IOException { + try (ReleasableDocument globalMetaDataDocument = makeGlobalMetaDataDocument(metaData)) { + for (MetaDataIndexWriter metaDataIndexWriter : metaDataIndexWriters) { + metaDataIndexWriter.updateGlobalMetaData(globalMetaDataDocument.getDocument()); + } + } + + for (ObjectCursor cursor : metaData.indices().values()) { + final IndexMetaData indexMetaData = cursor.value; + try (ReleasableDocument indexMetaDataDocument = makeIndexMetaDataDocument(indexMetaData)) { + for (MetaDataIndexWriter metaDataIndexWriter : metaDataIndexWriters) { + metaDataIndexWriter.updateIndexMetaDataDocument(indexMetaDataDocument.getDocument(), indexMetaData.getIndex()); + } + } + } + + // Flush, to try and expose a failure (e.g. out of disk space) before committing, because we can handle a failure here more + // gracefully than one that occurs during the commit process. + for (MetaDataIndexWriter metaDataIndexWriter : metaDataIndexWriters) { + metaDataIndexWriter.flush(); + } + + return new WriterStats(true, metaData.indices().size(), 0); + } + + public void commit(long currentTerm, long lastAcceptedVersion) throws IOException { + ensureOpen(); + try { + for (MetaDataIndexWriter metaDataIndexWriter : metaDataIndexWriters) { + metaDataIndexWriter.prepareCommit(nodeId, currentTerm, lastAcceptedVersion); + } + } catch (Exception e) { + try { + close(); + } catch (Exception e2) { + logger.warn("failed on closing cluster state writer", e2); + e.addSuppressed(e2); + } + throw e; + } finally { + closeIfAnyIndexWriterHasTragedyOrIsClosed(); + } + try { + for (MetaDataIndexWriter metaDataIndexWriter : metaDataIndexWriters) { + metaDataIndexWriter.commit(); + } + } catch (IOException e) { + // The commit() call has similar semantics to a fsync(): although it's atomic, if it fails then we've no idea whether the + // data on disk is now the old version or the new version, and this is a disaster. It's safest to fail the whole node and + // retry from the beginning. + try { + close(); + } catch (Exception e2) { + e.addSuppressed(e2); + } + throw new IOError(e); + } finally { + closeIfAnyIndexWriterHasTragedyOrIsClosed(); + } + } + + @Override + public void close() throws IOException { + logger.trace("closing PersistedClusterStateService.Writer"); + if (closed.compareAndSet(false, true)) { + IOUtils.close(metaDataIndexWriters); + } + } + + static class WriterStats { + final boolean globalMetaUpdated; + final long numIndicesUpdated; + final long numIndicesUnchanged; + + WriterStats(boolean globalMetaUpdated, long numIndicesUpdated, long numIndicesUnchanged) { + this.globalMetaUpdated = globalMetaUpdated; + this.numIndicesUpdated = numIndicesUpdated; + this.numIndicesUnchanged = numIndicesUnchanged; + } + } + + private ReleasableDocument makeIndexMetaDataDocument(IndexMetaData indexMetaData) throws IOException { + final ReleasableDocument indexMetaDataDocument = makeDocument(INDEX_TYPE_NAME, indexMetaData); + boolean success = false; + try { + final String indexUUID = indexMetaData.getIndexUUID(); + assert indexUUID.equals(IndexMetaData.INDEX_UUID_NA_VALUE) == false; + indexMetaDataDocument.getDocument().add(new StringField(INDEX_UUID_FIELD_NAME, indexUUID, Field.Store.NO)); + success = true; + return indexMetaDataDocument; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(indexMetaDataDocument); + } + } + } + + private ReleasableDocument makeGlobalMetaDataDocument(MetaData metaData) throws IOException { + return makeDocument(GLOBAL_TYPE_NAME, metaData); + } + + private ReleasableDocument makeDocument(String typeName, ToXContent metaData) throws IOException { + final Document document = new Document(); + document.add(new StringField(TYPE_FIELD_NAME, typeName, Field.Store.NO)); + + boolean success = false; + final ReleasableBytesStreamOutput releasableBytesStreamOutput = new ReleasableBytesStreamOutput(bigArrays); + try { + final FilterOutputStream outputStream = new FilterOutputStream(releasableBytesStreamOutput) { + @Override + public void close() { + // closing the XContentBuilder should not release the bytes yet + } + }; + try (XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.SMILE, outputStream)) { + xContentBuilder.startObject(); + metaData.toXContent(xContentBuilder, FORMAT_PARAMS); + xContentBuilder.endObject(); + } + document.add(new StoredField(DATA_FIELD_NAME, releasableBytesStreamOutput.bytes().toBytesRef())); + final ReleasableDocument releasableDocument = new ReleasableDocument(document, releasableBytesStreamOutput); + success = true; + return releasableDocument; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(releasableBytesStreamOutput); + } + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 15fd0de2760a6..d7cf228e526a7 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -48,6 +48,8 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; +import org.elasticsearch.gateway.MetaDataStateFormat; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; @@ -90,6 +92,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BooleanSupplier; @@ -324,6 +327,29 @@ public synchronized void close(final String reason, boolean delete) throws IOExc } } + // method is synchronized so that IndexService can't be closed while we're writing out dangling indices information + public synchronized void writeDanglingIndicesInfo() { + if (closed.get()) { + return; + } + try { + IndexMetaData.FORMAT.writeAndCleanup(getMetaData(), nodeEnv.indexPaths(index())); + } catch (WriteStateException e) { + logger.warn(() -> new ParameterizedMessage("failed to write dangling indices state for index {}", index()), e); + } + } + + // method is synchronized so that IndexService can't be closed while we're deleting dangling indices information + public synchronized void deleteDanglingIndicesInfo() { + if (closed.get()) { + return; + } + try { + MetaDataStateFormat.deleteMetaState(nodeEnv.indexPaths(index())); + } catch (IOException e) { + logger.warn(() -> new ParameterizedMessage("failed to delete dangling indices state for index {}", index()), e); + } + } public String indexUUID() { return indexSettings.getUUID(); @@ -669,24 +695,30 @@ public IndexMetaData getMetaData() { return indexSettings.getIndexMetaData(); } + private final CopyOnWriteArrayList> metaDataListeners = new CopyOnWriteArrayList<>(); + + public void addMetaDataListener(Consumer listener) { + metaDataListeners.add(listener); + } + @Override public synchronized void updateMetaData(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) { - final boolean updateIndexMetaData = indexSettings.updateIndexMetaData(newIndexMetaData); + final boolean updateIndexSettings = indexSettings.updateIndexMetaData(newIndexMetaData); if (Assertions.ENABLED && currentIndexMetaData != null) { final long currentSettingsVersion = currentIndexMetaData.getSettingsVersion(); final long newSettingsVersion = newIndexMetaData.getSettingsVersion(); if (currentSettingsVersion == newSettingsVersion) { - assert updateIndexMetaData == false; + assert updateIndexSettings == false; } else { - assert updateIndexMetaData; + assert updateIndexSettings; assert currentSettingsVersion < newSettingsVersion : "expected current settings version [" + currentSettingsVersion + "] " + "to be less than new settings version [" + newSettingsVersion + "]"; } } - if (updateIndexMetaData) { + if (updateIndexSettings) { for (final IndexShard shard : this.shards.values()) { try { shard.onSettingsChanged(); @@ -722,6 +754,8 @@ public boolean isForceExecution() { } updateFsyncTaskIfNecessary(); } + + metaDataListeners.forEach(c -> c.accept(newIndexMetaData)); } private void updateFsyncTaskIfNecessary() { diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 882dcf6bb6cd9..31b343f300880 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.elasticsearch.ElasticsearchException; @@ -536,6 +537,10 @@ public IndexAnalyzers build(IndexSettings indexSettings, tokenFilterFactoryFactories, charFilterFactoryFactories); } + for (Analyzer analyzer : normalizers.values()) { + analyzer.normalize("", ""); // check for deprecations + } + if (!analyzers.containsKey(DEFAULT_ANALYZER_NAME)) { analyzers.put(DEFAULT_ANALYZER_NAME, produceAnalyzer(DEFAULT_ANALYZER_NAME, @@ -599,6 +604,7 @@ private static NamedAnalyzer produceAnalyzer(String name, } else { analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap); } + checkVersions(analyzer); return analyzer; } @@ -626,4 +632,20 @@ private void processNormalizerFactory( NamedAnalyzer normalizer = new NamedAnalyzer(name, normalizerFactory.scope(), normalizerF); normalizers.put(name, normalizer); } + + // Some analysis components emit deprecation warnings or throw exceptions when used + // with the wrong version of elasticsearch. These exceptions and warnings are + // normally thrown when tokenstreams are constructed, which unless we build a + // tokenstream up-front does not happen until a document is indexed. In order to + // surface these warnings or exceptions as early as possible, we build an empty + // tokenstream and pull it through an Analyzer at construction time. + private static void checkVersions(Analyzer analyzer) { + try (TokenStream ts = analyzer.tokenStream("", "")) { + ts.reset(); + while (ts.incrementToken()) {} + ts.end(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java index f8e14ef9af52b..fc28f0bd34d2d 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java @@ -52,25 +52,6 @@ public static PreConfiguredTokenFilter singleton(String name, boolean useFilterF (tokenStream, version) -> create.apply(tokenStream)); } - /** - * Create a pre-configured token filter that may vary based on the Elasticsearch version. - */ - public static PreConfiguredTokenFilter singletonWithVersion(String name, boolean useFilterForMultitermQueries, - BiFunction create) { - return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, true, CachingStrategy.ONE, - (tokenStream, version) -> create.apply(tokenStream, version)); - } - - /** - * Create a pre-configured token filter that may vary based on the Elasticsearch version. - */ - public static PreConfiguredTokenFilter singletonWithVersion(String name, boolean useFilterForMultitermQueries, - boolean useFilterForParsingSynonyms, - BiFunction create) { - return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, useFilterForParsingSynonyms, CachingStrategy.ONE, - (tokenStream, version) -> create.apply(tokenStream, version)); - } - /** * Create a pre-configured token filter that may vary based on the Lucene version. */ @@ -88,6 +69,16 @@ public static PreConfiguredTokenFilter elasticsearchVersion(String name, boolean return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, true, CachingStrategy.ELASTICSEARCH, create); } + /** + * Create a pre-configured token filter that may vary based on the Elasticsearch version. + */ + public static PreConfiguredTokenFilter elasticsearchVersion(String name, boolean useFilterForMultitermQueries, + boolean useFilterForParsingSynonyms, + BiFunction create) { + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, useFilterForParsingSynonyms, + CachingStrategy.ELASTICSEARCH, create); + } + private final boolean useFilterForMultitermQueries; private final boolean allowForSynonymParsing; private final BiFunction create; diff --git a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java index 22587cf6aad79..517e2966e41c2 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java @@ -88,13 +88,6 @@ public Engine.CommitId getRawCommitId() { return new Engine.CommitId(Base64.getDecoder().decode(id)); } - /** - * The synced-flush id of the commit if existed. - */ - public String syncId() { - return userData.get(InternalEngine.SYNC_COMMIT_ID); - } - /** * Returns the number of documents in the in this commit */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java index d4f16bcaf8458..1307e74f9f2f5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -74,25 +74,22 @@ public DocumentMapper parse(@Nullable String type, CompressedXContent source) th if (source != null) { Map root = XContentHelper.convertToMap(source.compressedReference(), true, XContentType.JSON).v2(); Tuple> t = extractMapping(type, root); - type = t.v1(); mapping = t.v2(); } if (mapping == null) { mapping = new HashMap<>(); } - return parse(type, mapping); + return parse(mapping); } @SuppressWarnings({"unchecked"}) - private DocumentMapper parse(String type, Map mapping) throws MapperParsingException { - if (type == null) { - throw new MapperParsingException("Failed to derive type"); - } + private DocumentMapper parse(Map mapping) throws MapperParsingException { Mapper.TypeParser.ParserContext parserContext = parserContext(); // parse RootObjectMapper DocumentMapper.Builder docBuilder = new DocumentMapper.Builder( - (RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext), mapperService); + (RootObjectMapper.Builder) rootObjectTypeParser.parse(MapperService.SINGLE_MAPPING_NAME, mapping, parserContext), + mapperService); Iterator> iterator = mapping.entrySet().iterator(); // parse DocumentMapper while(iterator.hasNext()) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 5415a433d8670..549b3b7a21dc3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1035,12 +1035,6 @@ public CompletionStats completionStats(String... fields) { } } - public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) { - verifyNotClosed(); - logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId); - return getEngine().syncFlush(syncId, expectedCommitId); - } - /** * Executes the given flush request against the engine. * diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java index b7a178972fa72..4ed1e9d63b174 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -21,7 +21,6 @@ import joptsimple.OptionParser; import joptsimple.OptionSet; import joptsimple.OptionSpec; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexWriter; @@ -33,9 +32,9 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NativeFSLockFactory; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.coordination.ElasticsearchNodeCommand; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; @@ -49,35 +48,30 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetaData; -import org.elasticsearch.gateway.MetaDataStateFormat; +import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.TruncateTranslogAction; -import org.elasticsearch.indices.IndicesModule; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; import java.io.PrintWriter; -import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.Objects; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; +import java.util.stream.StreamSupport; -public class RemoveCorruptedShardDataCommand extends EnvironmentAwareCommand { +public class RemoveCorruptedShardDataCommand extends ElasticsearchNodeCommand { private static final Logger logger = LogManager.getLogger(RemoveCorruptedShardDataCommand.class); @@ -88,7 +82,6 @@ public class RemoveCorruptedShardDataCommand extends EnvironmentAwareCommand { private final RemoveCorruptedLuceneSegmentsAction removeCorruptedLuceneSegmentsAction; private final TruncateTranslogAction truncateTranslogAction; - private final NamedXContentRegistry namedXContentRegistry; public RemoveCorruptedShardDataCommand() { super("Removes corrupted shard files"); @@ -106,11 +99,6 @@ public RemoveCorruptedShardDataCommand() { parser.accepts(TRUNCATE_CLEAN_TRANSLOG_FLAG, "Truncate the translog even if it is not corrupt"); - namedXContentRegistry = new NamedXContentRegistry( - Stream.of(ClusterModule.getNamedXWriteables().stream(), IndicesModule.getNamedXContents().stream()) - .flatMap(Function.identity()) - .collect(Collectors.toList())); - removeCorruptedLuceneSegmentsAction = new RemoveCorruptedLuceneSegmentsAction(); truncateTranslogAction = new TruncateTranslogAction(namedXContentRegistry); } @@ -130,11 +118,12 @@ protected Path getPath(String dirValue) { return PathUtils.get(dirValue, "", ""); } - protected void findAndProcessShardPath(OptionSet options, Environment environment, CheckedConsumer consumer) + protected void findAndProcessShardPath(OptionSet options, Environment environment, Path[] dataPaths, ClusterState clusterState, + CheckedConsumer consumer) throws IOException { final Settings settings = environment.settings(); - final String indexName; + final IndexMetaData indexMetaData; final int shardId; if (options.has(folderOption)) { @@ -146,65 +135,48 @@ protected void findAndProcessShardPath(OptionSet options, Environment environmen throw new ElasticsearchException("index directory [" + indexPath + "], must exist and be a directory"); } - final IndexMetaData indexMetaData = - IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, shardParent); - final String shardIdFileName = path.getFileName().toString(); + final String indexUUIDFolderName = shardParent.getFileName().toString(); if (Files.isDirectory(path) && shardIdFileName.chars().allMatch(Character::isDigit) // SHARD-ID path element check && NodeEnvironment.INDICES_FOLDER.equals(shardParentParent.getFileName().toString()) // `indices` check ) { shardId = Integer.parseInt(shardIdFileName); - indexName = indexMetaData.getIndex().getName(); + indexMetaData = StreamSupport.stream(clusterState.metaData().indices().values().spliterator(), false) + .map(imd -> imd.value) + .filter(imd -> imd.getIndexUUID().equals(indexUUIDFolderName)).findFirst() + .orElse(null); } else { throw new ElasticsearchException("Unable to resolve shard id. Wrong folder structure at [ " + path.toString() + " ], expected .../indices/[INDEX-UUID]/[SHARD-ID]"); } } else { // otherwise resolve shardPath based on the index name and shard id - indexName = Objects.requireNonNull(indexNameOption.value(options), "Index name is required"); + String indexName = Objects.requireNonNull(indexNameOption.value(options), "Index name is required"); shardId = Objects.requireNonNull(shardIdOption.value(options), "Shard ID is required"); + indexMetaData = clusterState.metaData().index(indexName); } - try (NodeEnvironment.NodeLock nodeLock = new NodeEnvironment.NodeLock(logger, environment, Files::exists)) { - final NodeEnvironment.NodePath[] nodePaths = nodeLock.getNodePaths(); - for (NodeEnvironment.NodePath nodePath : nodePaths) { - if (Files.exists(nodePath.indicesPath)) { - // have to scan all index uuid folders to resolve from index name - try (DirectoryStream stream = Files.newDirectoryStream(nodePath.indicesPath)) { - for (Path file : stream) { - if (Files.exists(file.resolve(MetaDataStateFormat.STATE_DIR_NAME)) == false) { - continue; - } - - final IndexMetaData indexMetaData = - IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, file); - if (indexMetaData == null) { - continue; - } - final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); - final Index index = indexMetaData.getIndex(); - if (indexName.equals(index.getName()) == false) { - continue; - } - final ShardId shId = new ShardId(index, shardId); - - final Path shardPathLocation = nodePath.resolve(shId); - if (Files.exists(shardPathLocation) == false) { - continue; - } - final ShardPath shardPath = ShardPath.loadShardPath(logger, shId, indexSettings.customDataPath(), - new Path[]{shardPathLocation}, nodePath.path); - if (shardPath != null) { - consumer.accept(shardPath); - return; - } - } - } + if (indexMetaData == null) { + throw new ElasticsearchException("Unable to find index in cluster state"); + } + + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + final Index index = indexMetaData.getIndex(); + final ShardId shId = new ShardId(index, shardId); + + for (Path dataPath : dataPaths) { + final Path shardPathLocation = dataPath + .resolve(NodeEnvironment.INDICES_FOLDER) + .resolve(index.getUUID()) + .resolve(Integer.toString(shId.id())); + if (Files.exists(shardPathLocation)) { + final ShardPath shardPath = ShardPath.loadShardPath(logger, shId, indexSettings.customDataPath(), + new Path[]{shardPathLocation}, dataPath); + if (shardPath != null) { + consumer.accept(shardPath); + return; } } - } catch (LockObtainFailedException lofe) { - throw new ElasticsearchException("Failed to lock node's directory [" + lofe.getMessage() - + "], is Elasticsearch still running ?"); } } @@ -256,11 +228,9 @@ private static void confirm(String msg, Terminal terminal) { } } - private void warnAboutESShouldBeStopped(Terminal terminal) { + private void warnAboutIndexBackup(Terminal terminal) { terminal.println("-----------------------------------------------------------------------"); terminal.println(""); - terminal.println(" WARNING: Elasticsearch MUST be stopped before running this tool."); - terminal.println(""); terminal.println(" Please make a complete backup of your index before using this tool."); terminal.println(""); terminal.println("-----------------------------------------------------------------------"); @@ -268,10 +238,13 @@ private void warnAboutESShouldBeStopped(Terminal terminal) { // Visible for testing @Override - public void execute(Terminal terminal, OptionSet options, Environment environment) throws Exception { - warnAboutESShouldBeStopped(terminal); + public void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment environment) throws IOException { + warnAboutIndexBackup(terminal); - findAndProcessShardPath(options, environment, shardPath -> { + final ClusterState clusterState = + loadTermAndClusterState(createPersistedClusterStateService(environment.settings(), dataPaths), environment).v2(); + + findAndProcessShardPath(options, environment, dataPaths, clusterState, shardPath -> { final Path indexPath = shardPath.resolveIndex(); final Path translogPath = shardPath.resolveTranslog(); if (Files.exists(translogPath) == false || Files.isDirectory(translogPath) == false) { @@ -320,7 +293,7 @@ public void write(int b) { terminal.println("Opening translog at " + translogPath); terminal.println(""); try { - translogCleanStatus = truncateTranslogAction.getCleanStatus(shardPath, indexDir); + translogCleanStatus = truncateTranslogAction.getCleanStatus(shardPath, clusterState, indexDir); } catch (Exception e) { terminal.println(e.getMessage()); throw e; @@ -464,21 +437,17 @@ private void newAllocationId(ShardPath shardPath, Terminal terminal) throws IOEx printRerouteCommand(shardPath, terminal, true); } - private void printRerouteCommand(ShardPath shardPath, Terminal terminal, boolean allocateStale) throws IOException { - final IndexMetaData indexMetaData = - IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, - shardPath.getDataPath().getParent()); - + private void printRerouteCommand(ShardPath shardPath, Terminal terminal, boolean allocateStale) + throws IOException { final Path nodePath = getNodePath(shardPath); - final NodeMetaData nodeMetaData = - NodeMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodePath); + final NodeMetaData nodeMetaData = PersistedClusterStateService.nodeMetaData(nodePath); if (nodeMetaData == null) { throw new ElasticsearchException("No node meta data at " + nodePath); } final String nodeId = nodeMetaData.nodeId(); - final String index = indexMetaData.getIndex().getName(); + final String index = shardPath.getShardId().getIndexName(); final int id = shardPath.getShardId().id(); final AllocationCommands commands = new AllocationCommands( allocateStale @@ -494,7 +463,8 @@ private void printRerouteCommand(ShardPath shardPath, Terminal terminal, boolean private Path getNodePath(ShardPath shardPath) { final Path nodePath = shardPath.getDataPath().getParent().getParent().getParent(); - if (Files.exists(nodePath) == false || Files.exists(nodePath.resolve(MetaDataStateFormat.STATE_DIR_NAME)) == false) { + if (Files.exists(nodePath) == false || + Files.exists(nodePath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME)) == false) { throw new ElasticsearchException("Unable to resolve node path for " + shardPath); } return nodePath; diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java index e6581d0359d11..9480ee3c1e1f3 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java @@ -26,6 +26,7 @@ import org.apache.lucene.store.Directory; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; @@ -63,6 +64,7 @@ public TruncateTranslogAction(NamedXContentRegistry namedXContentRegistry) { } public Tuple getCleanStatus(ShardPath shardPath, + ClusterState clusterState, Directory indexDirectory) throws IOException { final Path indexPath = shardPath.resolveIndex(); final Path translogPath = shardPath.resolveTranslog(); @@ -83,7 +85,7 @@ public Tuple getCleanStatus throw new ElasticsearchException("shard must have a valid translog UUID but got: [null]"); } - final boolean clean = isTranslogClean(shardPath, translogUUID); + final boolean clean = isTranslogClean(shardPath, clusterState, translogUUID); if (clean) { return Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CLEAN, null); @@ -166,13 +168,12 @@ public void execute(Terminal terminal, ShardPath shardPath, Directory indexDirec IOUtils.fsync(translogPath, true); } - private boolean isTranslogClean(ShardPath shardPath, String translogUUID) throws IOException { + private boolean isTranslogClean(ShardPath shardPath, ClusterState clusterState, String translogUUID) throws IOException { // perform clean check of translog instead of corrupted marker file try { final Path translogPath = shardPath.resolveTranslog(); final long translogGlobalCheckpoint = Translog.readGlobalCheckpoint(translogPath, translogUUID); - final IndexMetaData indexMetaData = - IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, shardPath.getDataPath().getParent()); + final IndexMetaData indexMetaData = clusterState.metaData().getIndexSafe(shardPath.getShardId().getIndex()); final IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY); final TranslogConfig translogConfig = new TranslogConfig(shardPath.getShardId(), translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index 7584fda21c329..1214103dd69c0 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -61,7 +61,6 @@ import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.shard.PrimaryReplicaSyncer; import org.elasticsearch.indices.cluster.IndicesClusterStateService; -import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.plugins.MapperPlugin; @@ -238,7 +237,6 @@ private static Function> and(Function INDICES_ID_FIELD_DATA_ENABLED_SETTING = Setting.boolSetting("indices.id_field_data.enabled", false, Property.Dynamic, Property.NodeScope); + public static final Setting WRITE_DANGLING_INDICES_INFO_SETTING = Setting.boolSetting( + "gateway.write_dangling_indices_info", + true, + Setting.Property.NodeScope + ); /** * The node's settings. @@ -209,6 +221,12 @@ public class IndicesService extends AbstractLifecycleComponent private final CountDownLatch closeLatch = new CountDownLatch(1); private volatile boolean idFieldDataEnabled; + @Nullable + private final EsThreadPoolExecutor danglingIndicesThreadPoolExecutor; + private final Set danglingIndicesToWrite = Sets.newConcurrentHashSet(); + private final boolean nodeWriteDanglingIndicesInfo; + + @Override protected void doStart() { // Start thread that will manage cleaning the field data cache periodically @@ -289,12 +307,25 @@ protected void closeInternal() { } } }; + + final String nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); + nodeWriteDanglingIndicesInfo = WRITE_DANGLING_INDICES_INFO_SETTING.get(settings); + danglingIndicesThreadPoolExecutor = nodeWriteDanglingIndicesInfo ? EsExecutors.newScaling( + nodeName + "/" + DANGLING_INDICES_UPDATE_THREAD_NAME, + 1, 1, + 0, TimeUnit.MILLISECONDS, + daemonThreadFactory(nodeName, DANGLING_INDICES_UPDATE_THREAD_NAME), + threadPool.getThreadContext()) : null; } + private static final String DANGLING_INDICES_UPDATE_THREAD_NAME = "DanglingIndices#updateTask"; + @Override protected void doStop() { + ThreadPool.terminate(danglingIndicesThreadPoolExecutor, 10, TimeUnit.SECONDS); + ExecutorService indicesStopExecutor = - Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory(settings, "indices_shutdown")); + Executors.newFixedThreadPool(5, daemonThreadFactory(settings, "indices_shutdown")); // Copy indices because we modify it asynchronously in the body of the loop final Set indices = this.indices.values().stream().map(s -> s.index()).collect(Collectors.toSet()); @@ -455,6 +486,7 @@ public boolean hasIndex(Index index) { public IndexService indexService(Index index) { return indices.get(index.getUUID()); } + /** * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. */ @@ -478,7 +510,8 @@ public IndexService indexServiceSafe(Index index) { */ @Override public synchronized IndexService createIndex( - final IndexMetaData indexMetaData, final List builtInListeners) throws IOException { + final IndexMetaData indexMetaData, final List builtInListeners, + final boolean writeDanglingIndices) throws IOException { ensureChangesAllowed(); if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) { throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]"); @@ -514,8 +547,18 @@ public void onStoreClosed(ShardId shardId) { indexingMemoryController); boolean success = false; try { + if (writeDanglingIndices && nodeWriteDanglingIndicesInfo) { + indexService.addMetaDataListener(imd -> updateDanglingIndicesInfo(index)); + } indexService.getIndexEventListener().afterIndexCreated(indexService); indices = Maps.copyMapWithAddedEntry(indices, index.getUUID(), indexService); + if (writeDanglingIndices) { + if (nodeWriteDanglingIndicesInfo) { + updateDanglingIndicesInfo(index); + } else { + indexService.deleteDanglingIndicesInfo(); + } + } success = true; return indexService; } finally { @@ -761,7 +804,7 @@ public void deleteUnassignedIndex(String reason, IndexMetaData metaData, Cluster throw new IllegalStateException("Can't delete unassigned index store for [" + indexName + "] - it's still part of " + "the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); } - deleteIndexStore(reason, metaData, clusterState); + deleteIndexStore(reason, metaData); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] failed to delete unassigned index (reason [{}])", metaData.getIndex(), reason), e); @@ -775,7 +818,7 @@ public void deleteUnassignedIndex(String reason, IndexMetaData metaData, Cluster * * Package private for testing */ - void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState) throws IOException { + void deleteIndexStore(String reason, IndexMetaData metaData) throws IOException { if (nodeEnv.hasNodeFile()) { synchronized (this) { Index index = metaData.getIndex(); @@ -784,15 +827,6 @@ void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState cluste throw new IllegalStateException("Can't delete index store for [" + index.getName() + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]"); } - - if (clusterState.metaData().hasIndex(index.getName()) && (clusterState.nodes().getLocalNode().isMasterNode() == true)) { - // we do not delete the store if it is a master eligible node and the index is still in the cluster state - // because we want to keep the meta data for indices around even if no shards are left here - final IndexMetaData idxMeta = clusterState.metaData().index(index.getName()); - throw new IllegalStateException("Can't delete index store for [" + index.getName() + "] - it's still part of the " + - "cluster state [" + idxMeta.getIndexUUID() + "] [" + metaData.getIndexUUID() + "], " + - "we are master eligible, so will keep the index metadata even if no shards are left."); - } } final IndexSettings indexSettings = buildIndexSettings(metaData); deleteIndexStore(reason, indexSettings.getIndex(), indexSettings); @@ -870,13 +904,11 @@ public void deleteShardStore(String reason, ShardId shardId, ClusterState cluste nodeEnv.deleteShardDirectorySafe(shardId, indexSettings); logger.debug("{} deleted shard reason [{}]", shardId, reason); - // master nodes keep the index meta data, even if having no shards.. - if (clusterState.nodes().getLocalNode().isMasterNode() == false && - canDeleteIndexContents(shardId.getIndex(), indexSettings)) { + if (canDeleteIndexContents(shardId.getIndex(), indexSettings)) { if (nodeEnv.findAllShardIds(shardId.getIndex()).isEmpty()) { try { // note that deleteIndexStore have more safety checks and may throw an exception if index was concurrently created. - deleteIndexStore("no longer used", metaData, clusterState); + deleteIndexStore("no longer used", metaData); } catch (Exception e) { // wrap the exception to indicate we already deleted the shard throw new ElasticsearchException("failed to delete unused index after deleting its last shard (" + shardId + ")", e); @@ -1498,4 +1530,51 @@ public static Optional checkShardLimit(int newShards, ClusterState state } return Optional.empty(); } + + private void updateDanglingIndicesInfo(Index index) { + assert DiscoveryNode.isDataNode(settings) : "dangling indices information should only be persisted on data nodes"; + assert nodeWriteDanglingIndicesInfo : "writing dangling indices info is not enabled"; + assert danglingIndicesThreadPoolExecutor != null : "executor for dangling indices info is not available"; + if (danglingIndicesToWrite.add(index)) { + logger.trace("triggered dangling indices update for {}", index); + final long triggeredTimeMillis = threadPool.relativeTimeInMillis(); + try { + danglingIndicesThreadPoolExecutor.execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("failed to write dangling indices state for index {}", index), e); + } + + @Override + protected void doRun() { + final boolean exists = danglingIndicesToWrite.remove(index); + assert exists : "removed non-existing item for " + index; + final IndexService indexService = indices.get(index.getUUID()); + if (indexService != null) { + final long executedTimeMillis = threadPool.relativeTimeInMillis(); + logger.trace("writing out dangling indices state for index {}, triggered {} ago", index, + TimeValue.timeValueMillis(Math.min(0L, executedTimeMillis - triggeredTimeMillis))); + indexService.writeDanglingIndicesInfo(); + final long completedTimeMillis = threadPool.relativeTimeInMillis(); + logger.trace("writing out of dangling indices state for index {} completed after {}", index, + TimeValue.timeValueMillis(Math.min(0L, completedTimeMillis - executedTimeMillis))); + } else { + logger.trace("omit writing dangling indices state for index {} as index is deallocated on this node", index); + } + } + }); + } catch (EsRejectedExecutionException e) { + // ignore cases where we are shutting down..., there is really nothing interesting to be done here... + assert danglingIndicesThreadPoolExecutor.isShutdown(); + } + } else { + logger.trace("dangling indices update already pending for {}", index); + } + } + + // visible for testing + public boolean allPendingDanglingIndicesWritten() { + return nodeWriteDanglingIndicesInfo == false || + (danglingIndicesToWrite.isEmpty() && danglingIndicesThreadPoolExecutor.getActiveCount() == 0); + } } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index 60a2b1640ed5b..2b856579ff15b 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -181,7 +181,7 @@ static Map setupPreConfiguredTokenFilters(List preConfiguredTokenFilters.register("lowercase", PreConfiguredTokenFilter.singleton("lowercase", true, LowerCaseFilter::new)); // Add "standard" for old indices (bwc) preConfiguredTokenFilters.register( "standard", - PreConfiguredTokenFilter.singletonWithVersion("standard", true, (reader, version) -> { + PreConfiguredTokenFilter.elasticsearchVersion("standard", true, (reader, version) -> { if (version.before(Version.V_7_0_0)) { deprecationLogger.deprecatedAndMaybeLog("standard_deprecation", "The [standard] token filter is deprecated and will be removed in a future version."); diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java index 90a814b29b3c5..c69b6b740aebd 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java @@ -19,8 +19,6 @@ package org.elasticsearch.indices.breaker; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -29,7 +27,6 @@ * that load field data. */ public abstract class CircuitBreakerService extends AbstractLifecycleComponent { - private static final Logger logger = LogManager.getLogger(CircuitBreakerService.class); protected CircuitBreakerService() { } diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 23c8c26230c87..754635b121249 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -68,7 +68,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.recovery.PeerRecoverySourceService; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryFailedException; @@ -135,7 +134,6 @@ public IndicesClusterStateService( final NodeMappingRefreshAction nodeMappingRefreshAction, final RepositoriesService repositoriesService, final SearchService searchService, - final SyncedFlushService syncedFlushService, final PeerRecoverySourceService peerRecoverySourceService, final SnapshotShardsService snapshotShardsService, final PrimaryReplicaSyncer primaryReplicaSyncer, @@ -151,7 +149,6 @@ public IndicesClusterStateService( nodeMappingRefreshAction, repositoriesService, searchService, - syncedFlushService, peerRecoverySourceService, snapshotShardsService, primaryReplicaSyncer, @@ -170,7 +167,6 @@ public IndicesClusterStateService( final NodeMappingRefreshAction nodeMappingRefreshAction, final RepositoriesService repositoriesService, final SearchService searchService, - final SyncedFlushService syncedFlushService, final PeerRecoverySourceService peerRecoverySourceService, final SnapshotShardsService snapshotShardsService, final PrimaryReplicaSyncer primaryReplicaSyncer, @@ -499,7 +495,7 @@ private void createIndices(final ClusterState state) { AllocatedIndex indexService = null; try { - indexService = indicesService.createIndex(indexMetaData, buildInIndexListener); + indexService = indicesService.createIndex(indexMetaData, buildInIndexListener, true); if (indexService.updateMapping(null, indexMetaData) && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh(state.nodes().getMasterNode(), new NodeMappingRefreshAction.NodeMappingRefreshRequest(indexMetaData.getIndex().getName(), @@ -863,10 +859,12 @@ public interface AllocatedIndices> * @param indexMetaData the index metadata to create the index for * @param builtInIndexListener a list of built-in lifecycle {@link IndexEventListener} that should should be used along side with * the per-index listeners + * @param writeDanglingIndices whether dangling indices information should be written * @throws ResourceAlreadyExistsException if the index already exists. */ U createIndex(IndexMetaData indexMetaData, - List builtInIndexListener) throws IOException; + List builtInIndexListener, + boolean writeDanglingIndices) throws IOException; /** * Verify that the contents on disk for the given index is deleted; if not, delete the contents. diff --git a/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java b/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java deleted file mode 100644 index 4748c41d4b3e7..0000000000000 --- a/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.indices.flush; - -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.index.shard.ShardId; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.emptyMap; - -/** - * Result for all copies of a shard - */ -public class ShardsSyncedFlushResult implements Writeable { - private String failureReason; - private Map shardResponses; - private String syncId; - private ShardId shardId; - // some shards may be unassigned, so we need this as state - private int totalShards; - - public ShardsSyncedFlushResult(StreamInput in) throws IOException { - failureReason = in.readOptionalString(); - int numResponses = in.readInt(); - shardResponses = new HashMap<>(); - for (int i = 0; i < numResponses; i++) { - ShardRouting shardRouting = new ShardRouting(in); - SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in); - shardResponses.put(shardRouting, response); - } - syncId = in.readOptionalString(); - shardId = new ShardId(in); - totalShards = in.readInt(); - } - - public ShardId getShardId() { - return shardId; - } - - /** - * failure constructor - */ - public ShardsSyncedFlushResult(ShardId shardId, int totalShards, String failureReason) { - this.syncId = null; - this.failureReason = failureReason; - this.shardResponses = emptyMap(); - this.shardId = shardId; - this.totalShards = totalShards; - } - - /** - * success constructor - */ - public ShardsSyncedFlushResult(ShardId shardId, - String syncId, - int totalShards, - Map shardResponses) { - this.failureReason = null; - this.shardResponses = Map.copyOf(shardResponses); - this.syncId = syncId; - this.totalShards = totalShards; - this.shardId = shardId; - } - - /** - * @return true if the operation failed before reaching step three of synced flush. {@link #failureReason()} can be used for - * more details - */ - public boolean failed() { - return failureReason != null; - } - - /** - * @return the reason for the failure if synced flush failed before step three of synced flush - */ - public String failureReason() { - return failureReason; - } - - public String syncId() { - return syncId; - } - - /** - * @return total number of shards for which a sync attempt was made - */ - public int totalShards() { - return totalShards; - } - - /** - * @return total number of successful shards - */ - public int successfulShards() { - int i = 0; - for (SyncedFlushService.ShardSyncedFlushResponse result : shardResponses.values()) { - if (result.success()) { - i++; - } - } - return i; - } - - /** - * @return an array of shard failures - */ - public Map failedShards() { - Map failures = new HashMap<>(); - for (Map.Entry result : shardResponses.entrySet()) { - if (result.getValue().success() == false) { - failures.put(result.getKey(), result.getValue()); - } - } - return failures; - } - - /** - * @return Individual responses for each shard copy with a detailed failure message if the copy failed to perform the synced flush. - * Empty if synced flush failed before step three. - */ - public Map shardResponses() { - return shardResponses; - } - - public ShardId shardId() { - return shardId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(failureReason); - out.writeInt(shardResponses.size()); - for (Map.Entry entry : shardResponses.entrySet()) { - entry.getKey().writeTo(out); - entry.getValue().writeTo(out); - } - out.writeOptionalString(syncId); - shardId.writeTo(out); - out.writeInt(totalShards); - } -} diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java deleted file mode 100644 index c0e0d513b33d7..0000000000000 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ /dev/null @@ -1,761 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.indices.flush; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.StepListener; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.CountDown; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.engine.CommitStats; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardNotFoundException; -import org.elasticsearch.indices.IndexClosedException; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentMap; - -public class SyncedFlushService { - - private static final Logger logger = LogManager.getLogger(SyncedFlushService.class); - - private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre"; - private static final String SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/sync"; - private static final String IN_FLIGHT_OPS_ACTION_NAME = "internal:indices/flush/synced/in_flight"; - - private final IndicesService indicesService; - private final ClusterService clusterService; - private final TransportService transportService; - private final IndexNameExpressionResolver indexNameExpressionResolver; - - @Inject - public SyncedFlushService(IndicesService indicesService, - ClusterService clusterService, - TransportService transportService, - IndexNameExpressionResolver indexNameExpressionResolver) { - this.indicesService = indicesService; - this.clusterService = clusterService; - this.transportService = transportService; - this.indexNameExpressionResolver = indexNameExpressionResolver; - transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, ThreadPool.Names.FLUSH, PreShardSyncedFlushRequest::new, - new PreSyncedFlushTransportHandler()); - transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, ThreadPool.Names.FLUSH, ShardSyncedFlushRequest::new, - new SyncedFlushTransportHandler()); - transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, ThreadPool.Names.SAME, InFlightOpsRequest::new, - new InFlightOpCountTransportHandler()); - } - - /** - * a utility method to perform a synced flush for all shards of multiple indices. - * see {@link #attemptSyncedFlush(ShardId, ActionListener)} - * for more details. - */ - public void attemptSyncedFlush(final String[] aliasesOrIndices, - IndicesOptions indicesOptions, - final ActionListener listener) { - final ClusterState state = clusterService.state(); - final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); - final Map> results = ConcurrentCollections.newConcurrentMap(); - int numberOfShards = 0; - for (Index index : concreteIndices) { - final IndexMetaData indexMetaData = state.metaData().getIndexSafe(index); - numberOfShards += indexMetaData.getNumberOfShards(); - results.put(index.getName(), Collections.synchronizedList(new ArrayList<>())); - - } - if (numberOfShards == 0) { - listener.onResponse(new SyncedFlushResponse(results)); - return; - } - final CountDown countDown = new CountDown(numberOfShards); - - for (final Index concreteIndex : concreteIndices) { - final String index = concreteIndex.getName(); - final IndexMetaData indexMetaData = state.metaData().getIndexSafe(concreteIndex); - final int indexNumberOfShards = indexMetaData.getNumberOfShards(); - for (int shard = 0; shard < indexNumberOfShards; shard++) { - final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard); - innerAttemptSyncedFlush(shardId, state, new ActionListener() { - @Override - public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { - results.get(index).add(syncedFlushResult); - if (countDown.countDown()) { - listener.onResponse(new SyncedFlushResponse(results)); - } - } - - @Override - public void onFailure(Exception e) { - logger.debug("{} unexpected error while executing synced flush", shardId); - final int totalShards = indexMetaData.getNumberOfReplicas() + 1; - results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage())); - if (countDown.countDown()) { - listener.onResponse(new SyncedFlushResponse(results)); - } - } - }); - } - } - } - - /* - * Tries to flush all copies of a shard and write a sync id to it. - * After a synced flush two shard copies may only contain the same sync id if they contain the same documents. - * To ensure this, synced flush works in three steps: - * 1. Flush all shard copies and gather the commit ids for each copy after the flush - * 2. Ensure that there are no ongoing indexing operations on the primary - * 3. Perform an additional flush on each shard copy that writes the sync id - * - * Step 3 is only executed on a shard if - * a) the shard has no uncommitted changes since the last flush - * b) the last flush was the one executed in 1 (use the collected commit id to verify this) - * - * This alone is not enough to ensure that all copies contain the same documents. - * Without step 2 a sync id would be written for inconsistent copies in the following scenario: - * - * Write operation has completed on a primary and is being sent to replicas. The write request does not reach the - * replicas until sync flush is finished. - * Step 1 is executed. After the flush the commit points on primary contains a write operation that the replica does not have. - * Step 3 will be executed on primary and replica as well because there are no uncommitted changes on primary (the first flush - * committed them) and there are no uncommitted changes on the replica (the write operation has not reached the replica yet). - * - * Step 2 detects this scenario and fails the whole synced flush if a write operation is ongoing on the primary. - * Together with the conditions for step 3 (same commit id and no uncommitted changes) this guarantees that a snc id will only - * be written on a primary if no write operation was executed between step 1 and step 3 and sync id will only be written on - * the replica if it contains the same changes that the primary contains. - * - * Synced flush is a best effort operation. The sync id may be written on all, some or none of the copies. - **/ - public void attemptSyncedFlush(final ShardId shardId, final ActionListener actionListener) { - innerAttemptSyncedFlush(shardId, clusterService.state(), actionListener); - } - - private void innerAttemptSyncedFlush(final ShardId shardId, - final ClusterState state, - final ActionListener actionListener) { - try { - final IndexShardRoutingTable shardRoutingTable = getShardRoutingTable(shardId, state); - final List activeShards = shardRoutingTable.activeShards(); - final int totalShards = shardRoutingTable.getSize(); - - if (activeShards.size() == 0) { - actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "no active shards")); - return; - } - - // 1. send pre-sync flushes to all replicas - final StepListener> presyncStep = new StepListener<>(); - sendPreSyncRequests(activeShards, state, shardId, presyncStep); - - // 2. fetch in flight operations - final StepListener inflightOpsStep = new StepListener<>(); - presyncStep.whenComplete(presyncResponses -> { - if (presyncResponses.isEmpty()) { - actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "all shards failed to commit on pre-sync")); - } else { - getInflightOpsCount(shardId, state, shardRoutingTable, inflightOpsStep); - } - }, actionListener::onFailure); - - // 3. now send the sync request to all the shards - inflightOpsStep.whenComplete(inFlightOpsResponse -> { - final Map presyncResponses = presyncStep.result(); - final int inflight = inFlightOpsResponse.opCount(); - assert inflight >= 0; - if (inflight != 0) { - actionListener.onResponse( - new ShardsSyncedFlushResult(shardId, totalShards, "[" + inflight + "] ongoing operations on primary")); - } else { - final String sharedSyncId = sharedExistingSyncId(presyncResponses); - if (sharedSyncId != null) { - assert presyncResponses.values().stream().allMatch(r -> r.existingSyncId.equals(sharedSyncId)) : - "Not all shards have the same existing sync id [" + sharedSyncId + "], responses [" + presyncResponses + "]"; - reportSuccessWithExistingSyncId(shardId, sharedSyncId, activeShards, totalShards, presyncResponses, actionListener); - }else { - String syncId = UUIDs.randomBase64UUID(); - sendSyncRequests(syncId, activeShards, state, presyncResponses, shardId, totalShards, actionListener); - } - } - }, actionListener::onFailure); - } catch (Exception e) { - actionListener.onFailure(e); - } - } - - private String sharedExistingSyncId(Map preSyncedFlushResponses) { - String existingSyncId = null; - for (PreSyncedFlushResponse resp : preSyncedFlushResponses.values()) { - if (Strings.isNullOrEmpty(resp.existingSyncId)) { - return null; - } - if (existingSyncId == null) { - existingSyncId = resp.existingSyncId; - } - if (existingSyncId.equals(resp.existingSyncId) == false) { - return null; - } - } - return existingSyncId; - } - - private void reportSuccessWithExistingSyncId(ShardId shardId, - String existingSyncId, - List shards, - int totalShards, - Map preSyncResponses, - ActionListener listener) { - final Map results = new HashMap<>(); - for (final ShardRouting shard : shards) { - if (preSyncResponses.containsKey(shard.currentNodeId())) { - results.put(shard, new ShardSyncedFlushResponse((String) null)); - } - } - listener.onResponse(new ShardsSyncedFlushResult(shardId, existingSyncId, totalShards, results)); - } - - final IndexShardRoutingTable getShardRoutingTable(final ShardId shardId, final ClusterState state) { - final IndexMetaData indexMetaData = state.getMetaData().index(shardId.getIndex()); - if (indexMetaData == null) { - throw new IndexNotFoundException(shardId.getIndexName()); - } else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { - throw new IndexClosedException(shardId.getIndex()); - } - final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(indexMetaData.getIndex()).shard(shardId.id()); - if (shardRoutingTable == null) { - throw new ShardNotFoundException(shardId); - } - return shardRoutingTable; - } - - /** - * returns the number of in flight operations on primary. -1 upon error. - */ - protected void getInflightOpsCount(final ShardId shardId, - ClusterState state, - IndexShardRoutingTable shardRoutingTable, - final ActionListener listener) { - try { - final ShardRouting primaryShard = shardRoutingTable.primaryShard(); - final DiscoveryNode primaryNode = state.nodes().get(primaryShard.currentNodeId()); - if (primaryNode == null) { - logger.trace("{} failed to resolve node for primary shard {}, skipping sync", shardId, primaryShard); - listener.onResponse(new InFlightOpsResponse(-1)); - return; - } - logger.trace("{} retrieving in flight operation count", shardId); - transportService.sendRequest(primaryNode, IN_FLIGHT_OPS_ACTION_NAME, new InFlightOpsRequest(shardId), - new TransportResponseHandler() { - @Override - public InFlightOpsResponse read(StreamInput in) throws IOException { - return new InFlightOpsResponse(in); - } - - @Override - public void handleResponse(InFlightOpsResponse response) { - listener.onResponse(response); - } - - @Override - public void handleException(TransportException exp) { - logger.debug("{} unexpected error while retrieving in flight op count", shardId); - listener.onFailure(exp); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }); - } catch (Exception e) { - listener.onFailure(e); - } - } - - private int numDocsOnPrimary(List shards, Map preSyncResponses) { - for (ShardRouting shard : shards) { - if (shard.primary()) { - final PreSyncedFlushResponse resp = preSyncResponses.get(shard.currentNodeId()); - if (resp != null) { - return resp.numDocs; - } - } - } - return PreSyncedFlushResponse.UNKNOWN_NUM_DOCS; - } - - void sendSyncRequests(final String syncId, - final List shards, - ClusterState state, - Map preSyncResponses, - final ShardId shardId, - final int totalShards, - final ActionListener listener) { - final CountDown countDown = new CountDown(shards.size()); - final Map results = ConcurrentCollections.newConcurrentMap(); - final int numDocsOnPrimary = numDocsOnPrimary(shards, preSyncResponses); - for (final ShardRouting shard : shards) { - final DiscoveryNode node = state.nodes().get(shard.currentNodeId()); - if (node == null) { - logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); - results.put(shard, new ShardSyncedFlushResponse("unknown node")); - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - continue; - } - final PreSyncedFlushResponse preSyncedResponse = preSyncResponses.get(shard.currentNodeId()); - if (preSyncedResponse == null) { - logger.trace("{} can't resolve expected commit id for current node, skipping for sync id [{}]. shard routing {}", - shardId, syncId, shard); - results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush")); - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - continue; - } - if (preSyncedResponse.numDocs != numDocsOnPrimary && - preSyncedResponse.numDocs != PreSyncedFlushResponse.UNKNOWN_NUM_DOCS && - numDocsOnPrimary != PreSyncedFlushResponse.UNKNOWN_NUM_DOCS) { - logger.debug("{} can't issue sync id [{}] for replica [{}] with num docs [{}]; num docs on primary [{}]", - shardId, syncId, shard, preSyncedResponse.numDocs, numDocsOnPrimary); - results.put(shard, new ShardSyncedFlushResponse("ongoing indexing operations: " + - "num docs on replica [" + preSyncedResponse.numDocs + "]; num docs on primary [" + numDocsOnPrimary + "]")); - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - continue; - } - logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId); - ShardSyncedFlushRequest syncedFlushRequest = new ShardSyncedFlushRequest(shard.shardId(), syncId, preSyncedResponse.commitId); - transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, syncedFlushRequest, - new TransportResponseHandler() { - @Override - public ShardSyncedFlushResponse read(StreamInput in) throws IOException { - return new ShardSyncedFlushResponse(in); - } - - @Override - public void handleResponse(ShardSyncedFlushResponse response) { - ShardSyncedFlushResponse existing = results.put(shard, response); - assert existing == null : "got two answers for node [" + node + "]"; - // count after the assert so we won't decrement twice in handleException - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - } - - @Override - public void handleException(TransportException exp) { - logger.trace(() -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", - shardId, shard), exp); - results.put(shard, new ShardSyncedFlushResponse(exp.getMessage())); - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }); - } - - } - - private void countDownAndSendResponseIfDone(String syncId, - List shards, - ShardId shardId, - int totalShards, - ActionListener listener, - CountDown countDown, - Map results) { - if (countDown.countDown()) { - assert results.size() == shards.size(); - listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results)); - } - } - - /** - * send presync requests to all started copies of the given shard - */ - void sendPreSyncRequests(final List shards, - final ClusterState state, - final ShardId shardId, - final ActionListener> listener) { - final CountDown countDown = new CountDown(shards.size()); - final ConcurrentMap presyncResponses = ConcurrentCollections.newConcurrentMap(); - for (final ShardRouting shard : shards) { - logger.trace("{} sending pre-synced flush request to {}", shardId, shard); - final DiscoveryNode node = state.nodes().get(shard.currentNodeId()); - if (node == null) { - logger.trace("{} shard routing {} refers to an unknown node. skipping.", shardId, shard); - if (countDown.countDown()) { - listener.onResponse(presyncResponses); - } - continue; - } - transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreShardSyncedFlushRequest(shard.shardId()), - new TransportResponseHandler() { - @Override - public PreSyncedFlushResponse read(StreamInput in) throws IOException { - return new PreSyncedFlushResponse(in); - } - - @Override - public void handleResponse(PreSyncedFlushResponse response) { - PreSyncedFlushResponse existing = presyncResponses.putIfAbsent(node.getId(), response); - assert existing == null : "got two answers for node [" + node + "]"; - // count after the assert so we won't decrement twice in handleException - if (countDown.countDown()) { - listener.onResponse(presyncResponses); - } - } - - @Override - public void handleException(TransportException exp) { - logger.trace(() -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", - shardId, shard), exp); - if (countDown.countDown()) { - listener.onResponse(presyncResponses); - } - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }); - } - } - - private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) { - IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); - FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true); - logger.trace("{} performing pre sync flush", request.shardId()); - indexShard.flush(flushRequest); - final CommitStats commitStats = indexShard.commitStats(); - final Engine.CommitId commitId = commitStats.getRawCommitId(); - logger.trace("{} pre sync flush done. commit id {}, num docs {}", request.shardId(), commitId, commitStats.getNumDocs()); - return new PreSyncedFlushResponse(commitId, commitStats.getNumDocs(), commitStats.syncId()); - } - - private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", - request.shardId(), request.syncId(), request.expectedCommitId()); - Engine.SyncedFlushResult result = indexShard.syncFlush(request.syncId(), request.expectedCommitId()); - logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result); - switch (result) { - case SUCCESS: - return new ShardSyncedFlushResponse((String) null); - case COMMIT_MISMATCH: - return new ShardSyncedFlushResponse("commit has changed"); - case PENDING_OPERATIONS: - return new ShardSyncedFlushResponse("pending operations"); - default: - throw new ElasticsearchException("unknown synced flush result [" + result + "]"); - } - } - - private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - if (indexShard.routingEntry().primary() == false) { - throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard"); - } - int opCount = indexShard.getActiveOperationsCount(); - return new InFlightOpsResponse(opCount == IndexShard.OPERATIONS_BLOCKED ? 0 : opCount); - } - - public static final class PreShardSyncedFlushRequest extends TransportRequest { - private ShardId shardId; - - public PreShardSyncedFlushRequest(StreamInput in) throws IOException { - super(in); - this.shardId = new ShardId(in); - } - - public PreShardSyncedFlushRequest(ShardId shardId) { - this.shardId = shardId; - } - - @Override - public String toString() { - return "PreShardSyncedFlushRequest{" + - "shardId=" + shardId + - '}'; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - shardId.writeTo(out); - } - - public ShardId shardId() { - return shardId; - } - } - - /** - * Response for first step of synced flush (flush) for one shard copy - */ - static final class PreSyncedFlushResponse extends TransportResponse { - static final int UNKNOWN_NUM_DOCS = -1; - - Engine.CommitId commitId; - int numDocs; - @Nullable String existingSyncId = null; - - PreSyncedFlushResponse(StreamInput in) throws IOException { - super(in); - commitId = new Engine.CommitId(in); - numDocs = in.readInt(); - existingSyncId = in.readOptionalString(); - } - - PreSyncedFlushResponse(Engine.CommitId commitId, int numDocs, String existingSyncId) { - this.commitId = commitId; - this.numDocs = numDocs; - this.existingSyncId = existingSyncId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - commitId.writeTo(out); - out.writeInt(numDocs); - out.writeOptionalString(existingSyncId); - } - } - - public static final class ShardSyncedFlushRequest extends TransportRequest { - - private String syncId; - private Engine.CommitId expectedCommitId; - private ShardId shardId; - - public ShardSyncedFlushRequest(StreamInput in) throws IOException { - super(in); - shardId = new ShardId(in); - expectedCommitId = new Engine.CommitId(in); - syncId = in.readString(); - } - - public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) { - this.expectedCommitId = expectedCommitId; - this.shardId = shardId; - this.syncId = syncId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - shardId.writeTo(out); - expectedCommitId.writeTo(out); - out.writeString(syncId); - } - - public ShardId shardId() { - return shardId; - } - - public String syncId() { - return syncId; - } - - public Engine.CommitId expectedCommitId() { - return expectedCommitId; - } - - @Override - public String toString() { - return "ShardSyncedFlushRequest{" + - "shardId=" + shardId + - ",syncId='" + syncId + '\'' + - '}'; - } - } - - /** - * Response for third step of synced flush (writing the sync id) for one shard copy - */ - public static final class ShardSyncedFlushResponse extends TransportResponse { - - /** - * a non null value indicates a failure to sync flush. null means success - */ - String failureReason; - - public ShardSyncedFlushResponse(StreamInput in) throws IOException { - super(in); - failureReason = in.readOptionalString(); - } - - public ShardSyncedFlushResponse(String failureReason) { - this.failureReason = failureReason; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(failureReason); - } - - public boolean success() { - return failureReason == null; - } - - public String failureReason() { - return failureReason; - } - - @Override - public String toString() { - return "ShardSyncedFlushResponse{" + - "success=" + success() + - ", failureReason='" + failureReason + '\'' + - '}'; - } - - public static ShardSyncedFlushResponse readSyncedFlushResponse(StreamInput in) throws IOException { - return new ShardSyncedFlushResponse(in); - } - } - - - public static final class InFlightOpsRequest extends TransportRequest { - - private ShardId shardId; - - public InFlightOpsRequest(StreamInput in) throws IOException { - super(in); - shardId = new ShardId(in); - } - - public InFlightOpsRequest(ShardId shardId) { - this.shardId = shardId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - shardId.writeTo(out); - } - - public ShardId shardId() { - return shardId; - } - - @Override - public String toString() { - return "InFlightOpsRequest{" + - "shardId=" + shardId + - '}'; - } - } - - /** - * Response for second step of synced flush (check operations in flight) - */ - static final class InFlightOpsResponse extends TransportResponse { - - int opCount; - - InFlightOpsResponse(StreamInput in) throws IOException { - super(in); - opCount = in.readVInt(); - } - - InFlightOpsResponse(int opCount) { - assert opCount >= 0 : opCount; - this.opCount = opCount; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(opCount); - } - - public int opCount() { - return opCount; - } - - @Override - public String toString() { - return "InFlightOpsResponse{" + - "opCount=" + opCount + - '}'; - } - } - - private final class PreSyncedFlushTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel, Task task) throws Exception { - channel.sendResponse(performPreSyncedFlush(request)); - } - } - - private final class SyncedFlushTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(ShardSyncedFlushRequest request, TransportChannel channel, Task task) throws Exception { - channel.sendResponse(performSyncedFlush(request)); - } - } - - private final class InFlightOpCountTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(InFlightOpsRequest request, TransportChannel channel, Task task) throws Exception { - channel.sendResponse(performInFlightOps(request)); - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index 1f0d2b4757128..72adbfae0e6c0 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -646,8 +646,14 @@ private static Object deepCopy(Object value) { */ public void executePipeline(Pipeline pipeline, BiConsumer handler) { if (executedPipelines.add(pipeline.getId())) { + Object previousPipeline = ingestMetadata.put("pipeline", pipeline.getId()); pipeline.execute(this, (result, e) -> { executedPipelines.remove(pipeline.getId()); + if (previousPipeline != null) { + ingestMetadata.put("pipeline", previousPipeline); + } else { + ingestMetadata.remove("pipeline"); + } handler.accept(result, e); }); } else { diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineProcessor.java b/server/src/main/java/org/elasticsearch/ingest/PipelineProcessor.java index be02fe24752c1..2ee99d6e5d48f 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineProcessor.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineProcessor.java @@ -31,7 +31,7 @@ public class PipelineProcessor extends AbstractProcessor { private final TemplateScript.Factory pipelineTemplate; private final IngestService ingestService; - private PipelineProcessor(String tag, TemplateScript.Factory pipelineTemplate, IngestService ingestService) { + PipelineProcessor(String tag, TemplateScript.Factory pipelineTemplate, IngestService ingestService) { super(tag); this.pipelineTemplate = pipelineTemplate; this.ingestService = ingestService; diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 347e8a8c79389..5c4890ad90ca1 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -23,9 +23,11 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Assertions; import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.search.SearchExecutionStatsCollector; @@ -89,10 +91,12 @@ import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.NodeMetaData; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.gateway.GatewayModule; import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.IndexSettings; @@ -405,6 +409,9 @@ protected Node( ClusterModule.getNamedXWriteables().stream()) .flatMap(Function.identity()).collect(toList())); final MetaStateService metaStateService = new MetaStateService(nodeEnvironment, xContentRegistry); + final PersistedClusterStateService lucenePersistedStateFactory + = new PersistedClusterStateService(nodeEnvironment, xContentRegistry, bigArrays, clusterService.getClusterSettings(), + threadPool::relativeTimeInMillis); // collect engine factory providers from server and from plugins final Collection enginePlugins = pluginsService.filterPlugins(EnginePlugin.class); @@ -542,6 +549,7 @@ protected Node( b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); b.bind(MetaDataUpgrader.class).toInstance(metaDataUpgrader); b.bind(MetaStateService.class).toInstance(metaStateService); + b.bind(PersistedClusterStateService.class).toInstance(lucenePersistedStateFactory); b.bind(IndicesService.class).toInstance(indicesService); b.bind(AliasValidator.class).toInstance(aliasValidator); b.bind(MetaDataCreateIndexService.class).toInstance(metaDataCreateIndexService); @@ -688,7 +696,20 @@ public Node start() throws NodeValidationException { // Load (and maybe upgrade) the metadata stored on disk final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class); gatewayMetaState.start(settings(), transportService, clusterService, injector.getInstance(MetaStateService.class), - injector.getInstance(MetaDataIndexUpgradeService.class), injector.getInstance(MetaDataUpgrader.class)); + injector.getInstance(MetaDataIndexUpgradeService.class), injector.getInstance(MetaDataUpgrader.class), + injector.getInstance(PersistedClusterStateService.class)); + if (Assertions.ENABLED) { + try { + assert injector.getInstance(MetaStateService.class).loadFullState().v1().isEmpty(); + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, + nodeEnvironment.nodeDataPaths()); + assert nodeMetaData != null; + assert nodeMetaData.nodeVersion().equals(Version.CURRENT); + assert nodeMetaData.nodeId().equals(localNodeFactory.getNode().getId()); + } catch (IOException e) { + assert false : e; + } + } // we load the global state here (the persistent part of the cluster state stored on disk) to // pass it to the bootstrap checks to allow plugins to enforce certain preconditions based on the recovered state. final MetaData onDiskMetadata = gatewayMetaState.getPersistedState().getLastAcceptedState().metaData(); @@ -860,8 +881,11 @@ public synchronized void close() throws IOException { // Don't call shutdownNow here, it might break ongoing operations on Lucene indices. // See https://issues.apache.org/jira/browse/LUCENE-7248. We call shutdownNow in // awaitClose if the node doesn't finish closing within the specified time. - toClose.add(() -> stopWatch.stop().start("node_environment")); + toClose.add(() -> stopWatch.stop().start("gateway_meta_state")); + toClose.add(injector.getInstance(GatewayMetaState.class)); + + toClose.add(() -> stopWatch.stop().start("node_environment")); toClose.add(injector.getInstance(NodeEnvironment.class)); toClose.add(stopWatch::stop); diff --git a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java index b48f1cbcc3f1c..4e654262912c5 100644 --- a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.search.function.ScoreFunction; +import org.elasticsearch.common.xcontent.ContextParser; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; @@ -248,7 +249,7 @@ public QuerySpec(String name, Writeable.Reader reader, QueryParser parser) /** * Specification for an {@link Aggregation}. */ - class AggregationSpec extends SearchExtensionSpec { + class AggregationSpec extends SearchExtensionSpec> { private final Map> resultReaders = new TreeMap<>(); /** @@ -261,7 +262,8 @@ class AggregationSpec extends SearchExtensionSpec reader, Aggregator.Parser parser) { + public AggregationSpec(ParseField name, Writeable.Reader reader, + ContextParser parser) { super(name, reader, parser); } @@ -274,10 +276,41 @@ public AggregationSpec(ParseField name, Writeable.Reader reader, Aggregator.Parser parser) { + public AggregationSpec(String name, Writeable.Reader reader, ContextParser parser) { super(name, reader, parser); } + /** + * Specification for an {@link Aggregation}. + * + * @param name holds the names by which this aggregation might be parsed. The {@link ParseField#getPreferredName()} is special as it + * is the name by under which the reader is registered. So it is the name that the {@link AggregationBuilder} should return + * from {@link NamedWriteable#getWriteableName()}. + * @param reader the reader registered for this aggregation's builder. Typically a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser the reads the aggregation builder from xcontent + * @deprecated Use the ctor that takes a {@link ContextParser} instead + */ + @Deprecated + public AggregationSpec(ParseField name, Writeable.Reader reader, Aggregator.Parser parser) { + super(name, reader, (p, aggName) -> parser.parse(aggName, p)); + } + + /** + * Specification for an {@link Aggregation}. + * + * @param name the name by which this aggregation might be parsed or deserialized. Make sure that the {@link AggregationBuilder} + * returns this from {@link NamedWriteable#getWriteableName()}. + * @param reader the reader registered for this aggregation's builder. Typically a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser the reads the aggregation builder from xcontent + * @deprecated Use the ctor that takes a {@link ContextParser} instead + */ + @Deprecated + public AggregationSpec(String name, Writeable.Reader reader, Aggregator.Parser parser) { + super(name, reader, (p, aggName) -> parser.parse(aggName, p)); + } + /** * Add a reader for the shard level results of the aggregation with {@linkplain #getName}'s {@link ParseField#getPreferredName()} as * the {@link NamedWriteable#getWriteableName()}. diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java index f41a28686e38b..618eb246a14b4 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -69,7 +69,7 @@ public final class RepositoryData { * An instance initialized for an empty repository. */ public static final RepositoryData EMPTY = new RepositoryData(EMPTY_REPO_GEN, - Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), ShardGenerations.EMPTY); + Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), ShardGenerations.EMPTY); /** * The generational id of the index file from which the repository data was read. @@ -92,13 +92,16 @@ public final class RepositoryData { */ private final Map> indexSnapshots; + private final Map snapshotVersions; + /** * Shard generations. */ private final ShardGenerations shardGenerations; public RepositoryData(long genId, Map snapshotIds, Map snapshotStates, - Map> indexSnapshots, ShardGenerations shardGenerations) { + Map snapshotVersions, Map> indexSnapshots, + ShardGenerations shardGenerations) { this.genId = genId; this.snapshotIds = Collections.unmodifiableMap(snapshotIds); this.snapshotStates = Collections.unmodifiableMap(snapshotStates); @@ -106,12 +109,27 @@ public RepositoryData(long genId, Map snapshotIds, Map versions) { + if (versions.isEmpty()) { + return this; + } + final Map newVersions = new HashMap<>(snapshotVersions); + versions.forEach((id, version) -> newVersions.put(id.getUUID(), version)); + return new RepositoryData(genId, snapshotIds, snapshotStates, newVersions, indexSnapshots, shardGenerations); } public ShardGenerations shardGenerations() { @@ -141,6 +159,14 @@ public SnapshotState getSnapshotState(final SnapshotId snapshotId) { return snapshotStates.get(snapshotId.getUUID()); } + /** + * Returns the {@link Version} for the given snapshot or {@code null} if unknown. + */ + @Nullable + public Version getVersion(SnapshotId snapshotId) { + return snapshotVersions.get(snapshotId.getUUID()); + } + /** * Returns an unmodifiable map of the index names to {@link IndexId} in the repository. */ @@ -173,6 +199,7 @@ public List indicesToUpdateAfterRemovingSnapshot(SnapshotId snapshotId) */ public RepositoryData addSnapshot(final SnapshotId snapshotId, final SnapshotState snapshotState, + final Version version, final ShardGenerations shardGenerations) { if (snapshotIds.containsKey(snapshotId.getUUID())) { // if the snapshot id already exists in the repository data, it means an old master @@ -184,11 +211,13 @@ public RepositoryData addSnapshot(final SnapshotId snapshotId, snapshots.put(snapshotId.getUUID(), snapshotId); Map newSnapshotStates = new HashMap<>(snapshotStates); newSnapshotStates.put(snapshotId.getUUID(), snapshotState); + Map newSnapshotVersions = new HashMap<>(snapshotVersions); + newSnapshotVersions.put(snapshotId.getUUID(), version); Map> allIndexSnapshots = new HashMap<>(indexSnapshots); for (final IndexId indexId : shardGenerations.indices()) { allIndexSnapshots.computeIfAbsent(indexId, k -> new LinkedHashSet<>()).add(snapshotId); } - return new RepositoryData(genId, snapshots, newSnapshotStates, allIndexSnapshots, + return new RepositoryData(genId, snapshots, newSnapshotStates, newSnapshotVersions, allIndexSnapshots, ShardGenerations.builder().putAll(this.shardGenerations).putAll(shardGenerations).build()); } @@ -202,7 +231,7 @@ public RepositoryData withGenId(long newGeneration) { if (newGeneration == genId) { return this; } - return new RepositoryData(newGeneration, this.snapshotIds, this.snapshotStates, this.indexSnapshots, this.shardGenerations); + return new RepositoryData(newGeneration, snapshotIds, snapshotStates, snapshotVersions, indexSnapshots, shardGenerations); } /** @@ -222,6 +251,8 @@ public RepositoryData removeSnapshot(final SnapshotId snapshotId, final ShardGen } Map newSnapshotStates = new HashMap<>(snapshotStates); newSnapshotStates.remove(snapshotId.getUUID()); + final Map newSnapshotVersions = new HashMap<>(snapshotVersions); + newSnapshotVersions.remove(snapshotId.getUUID()); Map> indexSnapshots = new HashMap<>(); for (final IndexId indexId : indices.values()) { Set set; @@ -241,7 +272,7 @@ public RepositoryData removeSnapshot(final SnapshotId snapshotId, final ShardGen indexSnapshots.put(indexId, set); } - return new RepositoryData(genId, newSnapshotIds, newSnapshotStates, indexSnapshots, + return new RepositoryData(genId, newSnapshotIds, newSnapshotStates, newSnapshotVersions, indexSnapshots, ShardGenerations.builder().putAll(shardGenerations).putAll(updatedShardGenerations) .retainIndicesAndPruneDeletes(indexSnapshots.keySet()).build() ); @@ -269,6 +300,7 @@ public boolean equals(Object obj) { RepositoryData that = (RepositoryData) obj; return snapshotIds.equals(that.snapshotIds) && snapshotStates.equals(that.snapshotStates) + && snapshotVersions.equals(that.snapshotVersions) && indices.equals(that.indices) && indexSnapshots.equals(that.indexSnapshots) && shardGenerations.equals(that.shardGenerations); @@ -276,7 +308,7 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return Objects.hash(snapshotIds, snapshotStates, indices, indexSnapshots, shardGenerations); + return Objects.hash(snapshotIds, snapshotStates, snapshotVersions, indices, indexSnapshots, shardGenerations); } /** @@ -323,6 +355,7 @@ public List resolveNewIndices(final List indicesToResolve) { private static final String NAME = "name"; private static final String UUID = "uuid"; private static final String STATE = "state"; + private static final String VERSION = "version"; private static final String MIN_VERSION = "min_version"; /** @@ -339,6 +372,9 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final if (snapshotStates.containsKey(snapshot.getUUID())) { builder.field(STATE, snapshotStates.get(snapshot.getUUID()).value()); } + if (snapshotVersions.containsKey(snapshot.getUUID())) { + builder.field(VERSION, snapshotVersions.get(snapshot.getUUID()).toString()); + } builder.endObject(); } builder.endArray(); @@ -365,10 +401,8 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final } builder.endObject(); if (shouldWriteShardGens) { - // TODO: write this field once 7.6 is able to read it and add tests to :qa:snapshot-repository-downgrade that make sure older - // ES versions can't corrupt the repository by writing to it and all the snapshots in it are v7.6 or newer // Add min version field to make it impossible for older ES versions to deserialize this object - // builder.field(MIN_VERSION, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.toString()); + builder.field(MIN_VERSION, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.toString()); } builder.endObject(); return builder; @@ -380,6 +414,7 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final public static RepositoryData snapshotsFromXContent(final XContentParser parser, long genId) throws IOException { final Map snapshots = new HashMap<>(); final Map snapshotStates = new HashMap<>(); + final Map snapshotVersions = new HashMap<>(); final Map> indexSnapshots = new HashMap<>(); final ShardGenerations.Builder shardGenerations = ShardGenerations.builder(); @@ -392,6 +427,7 @@ public static RepositoryData snapshotsFromXContent(final XContentParser parser, String name = null; String uuid = null; SnapshotState state = null; + Version version = null; while (parser.nextToken() != XContentParser.Token.END_OBJECT) { String currentFieldName = parser.currentName(); parser.nextToken(); @@ -401,12 +437,17 @@ public static RepositoryData snapshotsFromXContent(final XContentParser parser, uuid = parser.text(); } else if (STATE.equals(currentFieldName)) { state = SnapshotState.fromValue(parser.numberValue().byteValue()); + } else if (VERSION.equals(currentFieldName)) { + version = Version.fromString(parser.text()); } } final SnapshotId snapshotId = new SnapshotId(name, uuid); if (state != null) { snapshotStates.put(uuid, state); } + if (version != null) { + snapshotVersions.put(uuid, version); + } snapshots.put(snapshotId.getUUID(), snapshotId); } } else { @@ -490,7 +531,7 @@ public static RepositoryData snapshotsFromXContent(final XContentParser parser, } else { throw new ElasticsearchParseException("start object expected"); } - return new RepositoryData(genId, snapshots, snapshotStates, indexSnapshots, shardGenerations.build()); + return new RepositoryData(genId, snapshots, snapshotStates, snapshotVersions, indexSnapshots, shardGenerations.build()); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index b9016394ca514..87a9da671e417 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -32,6 +32,7 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.StepListener; @@ -46,6 +47,7 @@ import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; @@ -121,6 +123,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; @@ -873,7 +876,7 @@ public void finalizeSnapshot(final SnapshotId snapshotId, final SnapshotInfo snapshotInfo = snapshotInfos.iterator().next(); getRepositoryData(ActionListener.wrap(existingRepositoryData -> { final RepositoryData updatedRepositoryData = - existingRepositoryData.addSnapshot(snapshotId, snapshotInfo.state(), shardGenerations); + existingRepositoryData.addSnapshot(snapshotId, snapshotInfo.state(), Version.CURRENT, shardGenerations); writeIndexGen(updatedRepositoryData, repositoryStateId, writeShardGens, ActionListener.wrap(v -> { if (writeShardGens) { cleanupOldShardGens(existingRepositoryData, updatedRepositoryData); @@ -1049,6 +1052,9 @@ public void getRepositoryData(ActionListener listener) { return; } // Retry loading RepositoryData in a loop in case we run into concurrent modifications of the repository. + // Keep track of the most recent generation we failed to load so we can break out of the loop if we fail to load the same + // generation repeatedly. + long lastFailedGeneration = RepositoryData.UNKNOWN_REPO_GEN; while (true) { final long genToLoad; if (bestEffortConsistency) { @@ -1058,7 +1064,9 @@ public void getRepositoryData(ActionListener listener) { try { generation = latestIndexBlobId(); } catch (IOException ioe) { - throw new RepositoryException(metadata.name(), "Could not determine repository generation from root blobs", ioe); + listener.onFailure( + new RepositoryException(metadata.name(), "Could not determine repository generation from root blobs", ioe)); + return; } genToLoad = latestKnownRepoGen.updateAndGet(known -> Math.max(known, generation)); if (genToLoad > generation) { @@ -1073,7 +1081,9 @@ public void getRepositoryData(ActionListener listener) { listener.onResponse(getRepositoryData(genToLoad)); return; } catch (RepositoryException e) { - if (genToLoad != latestKnownRepoGen.get()) { + // If the generation to load changed concurrently and we didn't just try loading the same generation before we retry + if (genToLoad != latestKnownRepoGen.get() && genToLoad != lastFailedGeneration) { + lastFailedGeneration = genToLoad; logger.warn("Failed to load repository data generation [" + genToLoad + "] because a concurrent operation moved the current generation to [" + latestKnownRepoGen.get() + "]", e); continue; @@ -1083,10 +1093,10 @@ public void getRepositoryData(ActionListener listener) { // of N so we mark this repository as corrupted. markRepoCorrupted(genToLoad, e, ActionListener.wrap(v -> listener.onFailure(corruptedStateException(e)), listener::onFailure)); - return; } else { - throw e; + listener.onFailure(e); } + return; } } } @@ -1252,8 +1262,42 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }); + final StepListener filterRepositoryDataStep = new StepListener<>(); + // Step 2: Write new index-N blob to repository and update index.latest setPendingStep.whenComplete(newGen -> threadPool().executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> { + // BwC logic: Load snapshot version information if any snapshot is missing a version in RepositoryData so that the new + // RepositoryData contains a version for every snapshot + final List snapshotIdsWithoutVersion = repositoryData.getSnapshotIds().stream().filter( + snapshotId -> repositoryData.getVersion(snapshotId) == null).collect(Collectors.toList()); + if (snapshotIdsWithoutVersion.isEmpty() == false) { + final Map updatedVersionMap = new ConcurrentHashMap<>(); + final GroupedActionListener loadAllVersionsListener = new GroupedActionListener<>( + ActionListener.runAfter( + new ActionListener<>() { + @Override + public void onResponse(Collection voids) { + logger.info("Successfully loaded all snapshot's version information for {} from snapshot metadata", + AllocationService.firstListElementsToCommaDelimitedString( + snapshotIdsWithoutVersion, SnapshotId::toString, logger.isDebugEnabled())); + } + + @Override + public void onFailure(Exception e) { + logger.warn("Failure when trying to load missing version information from snapshot metadata", e); + } + }, () -> filterRepositoryDataStep.onResponse(repositoryData.withVersions(updatedVersionMap))), + snapshotIdsWithoutVersion.size()); + for (SnapshotId snapshotId : snapshotIdsWithoutVersion) { + threadPool().executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.run(loadAllVersionsListener, () -> + updatedVersionMap.put(snapshotId, getSnapshotInfo(snapshotId).version()))); + } + } else { + filterRepositoryDataStep.onResponse(repositoryData); + } + })), listener::onFailure); + filterRepositoryDataStep.whenComplete(filteredRepositoryData -> { + final long newGen = setPendingStep.result(); if (latestKnownRepoGen.get() >= newGen) { throw new IllegalArgumentException( "Tried writing generation [" + newGen + "] but repository is at least at generation [" + latestKnownRepoGen.get() @@ -1263,7 +1307,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS final String indexBlob = INDEX_FILE_PREFIX + Long.toString(newGen); logger.debug("Repository [{}] writing new index generational blob [{}]", metadata.name(), indexBlob); writeAtomic(indexBlob, - BytesReference.bytes(repositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), writeShardGens)), true); + BytesReference.bytes(filteredRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), writeShardGens)), true); // write the current generation to the index-latest file final BytesReference genBytes; try (BytesStreamOutput bStream = new BytesStreamOutput()) { @@ -1297,13 +1341,13 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - l.onFailure( + listener.onFailure( new RepositoryException(metadata.name(), "Failed to execute cluster state update [" + source + "]", e)); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.run(l, () -> { + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.run(listener, () -> { // Delete all now outdated index files up to 1000 blobs back from the new generation. // If there are more than 1000 dangling index-N cleanup functionality on repo delete will take care of them. // Deleting one older than the current expectedGen is done for BwC reasons as older versions used to keep @@ -1320,7 +1364,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS })); } }); - })), listener::onFailure); + }, listener::onFailure); } private RepositoryMetaData getRepoMetaData(ClusterState state) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java index 53cbb5c6d10dd..d775189af48e3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java @@ -19,18 +19,23 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -39,6 +44,9 @@ public class RestSyncedFlushAction extends BaseRestHandler { + private static final Logger logger = LogManager.getLogger(RestSyncedFlushAction.class); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(logger); + public RestSyncedFlushAction(RestController controller) { controller.registerHandler(POST, "/_flush/synced", this); controller.registerHandler(POST, "/{index}/_flush/synced", this); @@ -54,17 +62,35 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen()); - SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); - syncedFlushRequest.indicesOptions(indicesOptions); - return channel -> client.admin().indices().syncedFlush(syncedFlushRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(SyncedFlushResponse results, XContentBuilder builder) throws Exception { - builder.startObject(); - results.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(results.restStatus(), builder); - } - }); + DEPRECATION_LOGGER.deprecatedAndMaybeLog("synced_flush", + "Synced flush was removed and a normal flush was performed instead. This transition will be removed in a future version."); + final FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); + flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions())); + return channel -> client.admin().indices().flush(flushRequest, new SimulateSyncedFlushResponseListener(channel)); + } + + static final class SimulateSyncedFlushResponseListener extends RestToXContentListener { + + SimulateSyncedFlushResponseListener(RestChannel channel) { + super(channel); + } + + @Override + public RestResponse buildResponse(FlushResponse flushResponse, XContentBuilder builder) throws Exception { + builder.startObject(); + buildSyncedFlushResponse(builder, flushResponse); + builder.endObject(); + final RestStatus restStatus = flushResponse.getFailedShards() == 0 ? RestStatus.OK : RestStatus.CONFLICT; + return new BytesRestResponse(restStatus, builder); + } + + private void buildSyncedFlushResponse(XContentBuilder builder, FlushResponse flushResponse) throws IOException { + builder.startObject("_shards"); + builder.field("total", flushResponse.getTotalShards()); + builder.field("successful", flushResponse.getSuccessfulShards()); + builder.field("failed", flushResponse.getFailedShards()); + // can't serialize the detail of each index as we don't have the shard count per index. + builder.endObject(); + } } } diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index bf0f876704148..b09c8983c1394 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -139,6 +139,11 @@ public double parseDouble(String value, boolean roundUp, LongSupplier now) { public BytesRef parseBytesRef(String value) { return new BytesRef(value); } + + @Override + public String toString() { + return "raw"; + } }; DocValueFormat BINARY = new DocValueFormat() { @@ -335,6 +340,11 @@ public String format(BytesRef value) { public BytesRef parseBytesRef(String value) { return new BytesRef(InetAddressPoint.encode(InetAddresses.forString(value))); } + + @Override + public String toString() { + return "ip"; + } }; final class Decimal implements DocValueFormat { diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 089132a6ef744..a68413dfc4d03 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -422,16 +422,16 @@ private void registerAggregations(List plugins) { registerAggregation(new AggregationSpec(GeoCentroidAggregationBuilder.NAME, GeoCentroidAggregationBuilder::new, GeoCentroidAggregationBuilder::parse).addResultReader(InternalGeoCentroid::new)); registerAggregation(new AggregationSpec(ScriptedMetricAggregationBuilder.NAME, ScriptedMetricAggregationBuilder::new, - (name, p) -> ScriptedMetricAggregationBuilder.PARSER.parse(p, name)).addResultReader(InternalScriptedMetric::new)); + ScriptedMetricAggregationBuilder.PARSER).addResultReader(InternalScriptedMetric::new)); registerAggregation((new AggregationSpec(CompositeAggregationBuilder.NAME, CompositeAggregationBuilder::new, - (name, p) -> CompositeAggregationBuilder.PARSER.parse(p, name)).addResultReader(InternalComposite::new))); + CompositeAggregationBuilder.PARSER).addResultReader(InternalComposite::new))); registerFromPlugin(plugins, SearchPlugin::getAggregations, this::registerAggregation); } private void registerAggregation(AggregationSpec spec) { namedXContents.add(new NamedXContentRegistry.Entry(BaseAggregationBuilder.class, spec.getName(), (p, c) -> { String name = (String) c; - return spec.getParser().parse(name, p); + return spec.getParser().parse(p, name); })); namedWriteables.add( new NamedWriteableRegistry.Entry(AggregationBuilder.class, spec.getName().getPreferredName(), spec.getReader())); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java index b6f2b2788cd25..165a50db60d18 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java @@ -19,7 +19,10 @@ package org.elasticsearch.search.aggregations.bucket.composite; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -45,6 +48,8 @@ public class GeoTileGridValuesSourceBuilder extends CompositeValuesSourceBuilder static { PARSER = new ObjectParser<>(GeoTileGridValuesSourceBuilder.TYPE); PARSER.declareInt(GeoTileGridValuesSourceBuilder::precision, new ParseField("precision")); + PARSER.declareField(((p, builder, context) -> builder.geoBoundingBox(GeoBoundingBox.parseBoundingBox(p))), + GeoBoundingBox.BOUNDS_FIELD, ObjectParser.ValueType.OBJECT); CompositeValuesSourceParserHelper.declareValuesSourceFields(PARSER, ValueType.NUMERIC); } @@ -53,6 +58,7 @@ static GeoTileGridValuesSourceBuilder parse(String name, XContentParser parser) } private int precision = GeoTileGridAggregationBuilder.DEFAULT_PRECISION; + private GeoBoundingBox geoBoundingBox = new GeoBoundingBox(new GeoPoint(Double.NaN, Double.NaN), new GeoPoint(Double.NaN, Double.NaN)); GeoTileGridValuesSourceBuilder(String name) { super(name); @@ -61,6 +67,9 @@ static GeoTileGridValuesSourceBuilder parse(String name, XContentParser parser) GeoTileGridValuesSourceBuilder(StreamInput in) throws IOException { super(in); this.precision = in.readInt(); + if (in.getVersion().onOrAfter(Version.V_7_6_0)) { + this.geoBoundingBox = new GeoBoundingBox(in); + } } public GeoTileGridValuesSourceBuilder precision(int precision) { @@ -68,6 +77,11 @@ public GeoTileGridValuesSourceBuilder precision(int precision) { return this; } + public GeoTileGridValuesSourceBuilder geoBoundingBox(GeoBoundingBox geoBoundingBox) { + this.geoBoundingBox = geoBoundingBox; + return this; + } + @Override public GeoTileGridValuesSourceBuilder format(String format) { throw new IllegalArgumentException("[format] is not supported for [" + TYPE + "]"); @@ -76,11 +90,17 @@ public GeoTileGridValuesSourceBuilder format(String format) { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeInt(precision); + if (out.getVersion().onOrAfter(Version.V_7_6_0)) { + geoBoundingBox.writeTo(out); + } } @Override protected void doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.field("precision", precision); + if (geoBoundingBox.isUnbounded() == false) { + geoBoundingBox.toXContent(builder, params); + } } @Override @@ -88,9 +108,13 @@ String type() { return TYPE; } + GeoBoundingBox geoBoundingBox() { + return geoBoundingBox; + } + @Override public int hashCode() { - return Objects.hash(super.hashCode(), precision); + return Objects.hash(super.hashCode(), precision, geoBoundingBox); } @Override @@ -99,7 +123,8 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; if (super.equals(obj) == false) return false; GeoTileGridValuesSourceBuilder other = (GeoTileGridValuesSourceBuilder) obj; - return precision == other.precision; + return Objects.equals(precision,other.precision) + && Objects.equals(geoBoundingBox, other.geoBoundingBox); } @Override @@ -112,7 +137,7 @@ protected CompositeValuesSourceConfig innerBuild(QueryShardContext queryShardCon ValuesSource.GeoPoint geoPoint = (ValuesSource.GeoPoint) orig; // is specified in the builder. final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null; - CellIdSource cellIdSource = new CellIdSource(geoPoint, precision, GeoTileUtils::longEncode); + CellIdSource cellIdSource = new CellIdSource(geoPoint, precision, geoBoundingBox, GeoTileUtils::longEncode); return new CompositeValuesSourceConfig(name, fieldType, cellIdSource, DocValueFormat.GEOTILE, order(), missingBucket(), script() != null); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index 503c1780d22f0..9dd179fa79702 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -134,6 +134,13 @@ public List getBuckets() { return buckets; } + /** + * The formats used when writing the keys. Package private for testing. + */ + List getFormats() { + return formats; + } + @Override public Map afterKey() { if (afterKey != null) { @@ -189,8 +196,17 @@ public InternalAggregation reduce(List aggregations, Reduce reduceContext.consumeBucketsAndMaybeBreak(1); result.add(reduceBucket); } - final CompositeKey lastKey = result.size() > 0 ? result.get(result.size()-1).getRawKey() : null; - return new InternalComposite(name, size, sourceNames, formats, result, lastKey, reverseMuls, + + List reducedFormats = formats; + CompositeKey lastKey = null; + if (result.size() > 0) { + lastBucket = result.get(result.size() - 1); + /* Attach the formats from the last bucket to the reduced composite + * so that we can properly format the after key. */ + reducedFormats = lastBucket.formats; + lastKey = lastBucket.getRawKey(); + } + return new InternalComposite(name, size, sourceNames, reducedFormats, result, lastKey, reverseMuls, earlyTerminated, pipelineAggregators(), metaData); } @@ -204,7 +220,12 @@ protected InternalBucket reduceBucket(List buckets, ReduceContex aggregations.add(bucket.aggregations); } InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); - return new InternalBucket(sourceNames, formats, buckets.get(0).key, reverseMuls, docCount, aggs); + /* Use the formats from the bucket because they'll be right to format + * the key. The formats on the InternalComposite doing the reducing are + * just whatever formats make sense for *its* index. This can be real + * trouble when the index doing the reducing is unmapped. */ + var reducedFormats = buckets.get(0).formats; + return new InternalBucket(sourceNames, reducedFormats, buckets.get(0).key, reverseMuls, docCount, aggs); } @Override @@ -334,6 +355,13 @@ public Aggregations getAggregations() { return aggregations; } + /** + * The formats used when writing the keys. Package private for testing. + */ + List getFormats() { + return formats; + } + @Override public int compareKey(InternalBucket other) { for (int i = 0; i < key.size(); i++) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BoundedCellValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BoundedCellValues.java new file mode 100644 index 0000000000000..52fddb34b0576 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BoundedCellValues.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.index.fielddata.MultiGeoPointValues; + +/** + * Class representing {@link CellValues} whose values are filtered + * according to whether they are within the specified {@link GeoBoundingBox}. + * + * The specified bounding box is assumed to be bounded. + */ +class BoundedCellValues extends CellValues { + + private final GeoBoundingBox geoBoundingBox; + + protected BoundedCellValues(MultiGeoPointValues geoValues, int precision, CellIdSource.GeoPointLongEncoder encoder, + GeoBoundingBox geoBoundingBox) { + super(geoValues, precision, encoder); + this.geoBoundingBox = geoBoundingBox; + } + + + @Override + int advanceValue(org.elasticsearch.common.geo.GeoPoint target, int valuesIdx) { + if (geoBoundingBox.pointInBounds(target.getLon(), target.getLat())) { + values[valuesIdx] = encoder.encode(target.getLon(), target.getLat(), precision); + return valuesIdx + 1; + } + return valuesIdx; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellIdSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellIdSource.java index 4ebb689c7c44f..84f963bbbd9ce 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellIdSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellIdSource.java @@ -20,14 +20,12 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; -import org.elasticsearch.index.fielddata.AbstractSortingNumericDocValues; +import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.index.fielddata.MultiGeoPointValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.search.aggregations.support.ValuesSource; -import java.io.IOException; - /** * Wrapper class to help convert {@link MultiGeoPointValues} * to numeric long values for bucketing. @@ -36,11 +34,13 @@ public class CellIdSource extends ValuesSource.Numeric { private final ValuesSource.GeoPoint valuesSource; private final int precision; private final GeoPointLongEncoder encoder; + private final GeoBoundingBox geoBoundingBox; - public CellIdSource(GeoPoint valuesSource, int precision, GeoPointLongEncoder encoder) { + public CellIdSource(GeoPoint valuesSource,int precision, GeoBoundingBox geoBoundingBox, GeoPointLongEncoder encoder) { this.valuesSource = valuesSource; //different GeoPoints could map to the same or different hashing cells. this.precision = precision; + this.geoBoundingBox = geoBoundingBox; this.encoder = encoder; } @@ -55,7 +55,10 @@ public boolean isFloatingPoint() { @Override public SortedNumericDocValues longValues(LeafReaderContext ctx) { - return new CellValues(valuesSource.geoPointValues(ctx), precision, encoder); + if (geoBoundingBox.isUnbounded()) { + return new UnboundedCellValues(valuesSource.geoPointValues(ctx), precision, encoder); + } + return new BoundedCellValues(valuesSource.geoPointValues(ctx), precision, encoder, geoBoundingBox); } @Override @@ -77,30 +80,4 @@ public interface GeoPointLongEncoder { long encode(double lon, double lat, int precision); } - private static class CellValues extends AbstractSortingNumericDocValues { - private MultiGeoPointValues geoValues; - private int precision; - private GeoPointLongEncoder encoder; - - protected CellValues(MultiGeoPointValues geoValues, int precision, GeoPointLongEncoder encoder) { - this.geoValues = geoValues; - this.precision = precision; - this.encoder = encoder; - } - - @Override - public boolean advanceExact(int docId) throws IOException { - if (geoValues.advanceExact(docId)) { - resize(geoValues.docValueCount()); - for (int i = 0; i < docValueCount(); ++i) { - org.elasticsearch.common.geo.GeoPoint target = geoValues.nextValue(); - values[i] = encoder.encode(target.getLon(), target.getLat(), precision); - } - sort(); - return true; - } else { - return false; - } - } - } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellValues.java new file mode 100644 index 0000000000000..5d428373ccd8f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellValues.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.index.fielddata.AbstractSortingNumericDocValues; +import org.elasticsearch.index.fielddata.MultiGeoPointValues; + +import java.io.IOException; + +/** + * Class representing the long-encoded grid-cells belonging to + * the geo-doc-values. Class must encode the values and then + * sort them in order to account for the cells correctly. + */ +abstract class CellValues extends AbstractSortingNumericDocValues { + private MultiGeoPointValues geoValues; + protected int precision; + protected CellIdSource.GeoPointLongEncoder encoder; + + protected CellValues(MultiGeoPointValues geoValues, int precision, CellIdSource.GeoPointLongEncoder encoder) { + this.geoValues = geoValues; + this.precision = precision; + this.encoder = encoder; + } + + @Override + public boolean advanceExact(int docId) throws IOException { + if (geoValues.advanceExact(docId)) { + int docValueCount = geoValues.docValueCount(); + resize(docValueCount); + int j = 0; + for (int i = 0; i < docValueCount; i++) { + j = advanceValue(geoValues.nextValue(), j); + } + resize(j); + sort(); + return true; + } else { + return false; + } + } + + /** + * Sets the appropriate long-encoded value for target + * in values. + * + * @param target the geo-value to encode + * @param valuesIdx the index into values to set + * @return valuesIdx + 1 if value was set, valuesIdx otherwise. + */ + abstract int advanceValue(org.elasticsearch.common.geo.GeoPoint target, int valuesIdx); +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index e1c4221139202..0a7c918231c73 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -20,7 +20,10 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -53,6 +56,8 @@ public abstract class GeoGridAggregationBuilder extends ValuesSourceAggregationB protected int precision; protected int requiredSize; protected int shardSize; + private GeoBoundingBox geoBoundingBox = new GeoBoundingBox(new GeoPoint(Double.NaN, Double.NaN), new GeoPoint(Double.NaN, Double.NaN)); + @FunctionalInterface protected interface PrecisionParser { @@ -66,6 +71,10 @@ public static ObjectParser createParser(String org.elasticsearch.common.xcontent.ObjectParser.ValueType.INT); parser.declareInt(GeoGridAggregationBuilder::size, FIELD_SIZE); parser.declareInt(GeoGridAggregationBuilder::shardSize, FIELD_SHARD_SIZE); + parser.declareField((p, builder, context) -> { + builder.setGeoBoundingBox(GeoBoundingBox.parseBoundingBox(p)); + }, + GeoBoundingBox.BOUNDS_FIELD, org.elasticsearch.common.xcontent.ObjectParser.ValueType.OBJECT); return parser; } @@ -78,7 +87,7 @@ protected GeoGridAggregationBuilder(GeoGridAggregationBuilder clone, Builder fac this.precision = clone.precision; this.requiredSize = clone.requiredSize; this.shardSize = clone.shardSize; - + this.geoBoundingBox = clone.geoBoundingBox; } /** @@ -89,6 +98,9 @@ public GeoGridAggregationBuilder(StreamInput in) throws IOException { precision = in.readVInt(); requiredSize = in.readVInt(); shardSize = in.readVInt(); + if (in.getVersion().onOrAfter(Version.V_7_6_0)) { + geoBoundingBox = new GeoBoundingBox(in); + } } @Override @@ -96,6 +108,9 @@ protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(precision); out.writeVInt(requiredSize); out.writeVInt(shardSize); + if (out.getVersion().onOrAfter(Version.V_7_6_0)) { + geoBoundingBox.writeTo(out); + } } /** @@ -110,7 +125,8 @@ protected void innerWriteTo(StreamOutput out) throws IOException { */ protected abstract ValuesSourceAggregatorFactory createFactory( String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, - QueryShardContext queryShardContext, AggregatorFactory parent, Builder subFactoriesBuilder, Map metaData + GeoBoundingBox geoBoundingBox, QueryShardContext queryShardContext, AggregatorFactory parent, + Builder subFactoriesBuilder, Map metaData ) throws IOException; public int precision() { @@ -143,6 +159,16 @@ public int shardSize() { return shardSize; } + public GeoGridAggregationBuilder setGeoBoundingBox(GeoBoundingBox geoBoundingBox) { + this.geoBoundingBox = geoBoundingBox; + // no validation done here, similar to geo_bounding_box query behavior. + return this; + } + + public GeoBoundingBox geoBoundingBox() { + return geoBoundingBox; + } + @Override protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, @@ -166,7 +192,7 @@ protected ValuesSourceAggregatorFactory innerBuild(QueryS if (shardSize < requiredSize) { shardSize = requiredSize; } - return createFactory(name, config, precision, requiredSize, shardSize, queryShardContext, parent, + return createFactory(name, config, precision, requiredSize, shardSize, geoBoundingBox, queryShardContext, parent, subFactoriesBuilder, metaData); } @@ -177,6 +203,9 @@ protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) if (shardSize > -1) { builder.field(FIELD_SHARD_SIZE.getPreferredName(), shardSize); } + if (geoBoundingBox.isUnbounded() == false) { + geoBoundingBox.toXContent(builder, params); + } return builder; } @@ -188,11 +217,12 @@ public boolean equals(Object obj) { GeoGridAggregationBuilder other = (GeoGridAggregationBuilder) obj; return precision == other.precision && requiredSize == other.requiredSize - && shardSize == other.shardSize; + && shardSize == other.shardSize + && Objects.equals(geoBoundingBox, other.geoBoundingBox); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), precision, requiredSize, shardSize); + return Objects.hash(super.hashCode(), precision, requiredSize, shardSize, geoBoundingBox); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java index c91f763b603c0..c77578f2570c7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -48,8 +48,8 @@ public abstract class GeoGridAggregator extends Bucke protected final LongHash bucketOrds; GeoGridAggregator(String name, AggregatorFactories factories, CellIdSource valuesSource, - int requiredSize, int shardSize, SearchContext aggregationContext, Aggregator parent, - List pipelineAggregators, Map metaData) throws IOException { + int requiredSize, int shardSize, SearchContext aggregationContext, + Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); this.valuesSource = valuesSource; this.requiredSize = requiredSize; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index d58beeb781c25..acc9cde113164 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; +import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -60,11 +61,12 @@ public GeoGridAggregationBuilder precision(int precision) { @Override protected ValuesSourceAggregatorFactory createFactory( - String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, - QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metaData) throws IOException { - return new GeoHashGridAggregatorFactory(name, config, precision, requiredSize, shardSize, queryShardContext, parent, - subFactoriesBuilder, metaData); + String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, + GeoBoundingBox geoBoundingBox, QueryShardContext queryShardContext, + AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + return new GeoHashGridAggregatorFactory(name, config, precision, requiredSize, shardSize, geoBoundingBox, + queryShardContext, parent, subFactoriesBuilder, metaData); } private GeoHashGridAggregationBuilder(GeoHashGridAggregationBuilder clone, AggregatorFactories.Builder factoriesBuilder, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index 54d1e2e940649..1ad59ccc45100 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -34,9 +34,11 @@ public class GeoHashGridAggregator extends GeoGridAggregator { GeoHashGridAggregator(String name, AggregatorFactories factories, CellIdSource valuesSource, - int requiredSize, int shardSize, SearchContext aggregationContext, Aggregator parent, - List pipelineAggregators, Map metaData) throws IOException { - super(name, factories, valuesSource, requiredSize, shardSize, aggregationContext, parent, pipelineAggregators, metaData); + int requiredSize, int shardSize, SearchContext aggregationContext, + Aggregator parent, List pipelineAggregators, + Map metaData) throws IOException { + super(name, factories, valuesSource, requiredSize, shardSize, aggregationContext, parent, + pipelineAggregators, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index a049a07f13dbe..2d7087a693bfa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; +import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.geometry.utils.Geohash; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; @@ -28,7 +29,6 @@ import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSource.GeoPoint; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; @@ -43,14 +43,17 @@ public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory< private final int precision; private final int requiredSize; private final int shardSize; + private final GeoBoundingBox geoBoundingBox; - GeoHashGridAggregatorFactory(String name, ValuesSourceConfig config, int precision, int requiredSize, - int shardSize, QueryShardContext queryShardContext, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + GeoHashGridAggregatorFactory(String name, ValuesSourceConfig config, int precision, int requiredSize, + int shardSize, GeoBoundingBox geoBoundingBox, QueryShardContext queryShardContext, + AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.precision = precision; this.requiredSize = requiredSize; this.shardSize = shardSize; + this.geoBoundingBox = geoBoundingBox; } @Override @@ -69,7 +72,7 @@ public InternalAggregation buildEmptyAggregation() { } @Override - protected Aggregator doCreateInternal(final GeoPoint valuesSource, + protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, SearchContext searchContext, Aggregator parent, boolean collectsFromSingleBucket, @@ -78,8 +81,8 @@ protected Aggregator doCreateInternal(final GeoPoint valuesSource, if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, searchContext, parent); } - CellIdSource cellIdSource = new CellIdSource(valuesSource, precision, Geohash::longEncode); - return new GeoHashGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, searchContext, parent, - pipelineAggregators, metaData); + CellIdSource cellIdSource = new CellIdSource(valuesSource, precision, geoBoundingBox, Geohash::longEncode); + return new GeoHashGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, + searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java index b3d9888781362..595c6cab6e718 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; +import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; @@ -59,12 +60,11 @@ public GeoGridAggregationBuilder precision(int precision) { @Override protected ValuesSourceAggregatorFactory createFactory( - String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, - QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metaData - ) throws IOException { - return new GeoTileGridAggregatorFactory(name, config, precision, requiredSize, shardSize, queryShardContext, parent, - subFactoriesBuilder, metaData); + String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, + GeoBoundingBox geoBoundingBox, QueryShardContext queryShardContext, AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, Map metaData ) throws IOException { + return new GeoTileGridAggregatorFactory(name, config, precision, requiredSize, shardSize, geoBoundingBox, + queryShardContext, parent, subFactoriesBuilder, metaData); } private GeoTileGridAggregationBuilder(GeoTileGridAggregationBuilder clone, AggregatorFactories.Builder factoriesBuilder, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java index d2ff5ed82513c..350761aa84050 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java @@ -35,9 +35,11 @@ public class GeoTileGridAggregator extends GeoGridAggregator { GeoTileGridAggregator(String name, AggregatorFactories factories, CellIdSource valuesSource, - int requiredSize, int shardSize, SearchContext aggregationContext, Aggregator parent, - List pipelineAggregators, Map metaData) throws IOException { - super(name, factories, valuesSource, requiredSize, shardSize, aggregationContext, parent, pipelineAggregators, metaData); + int requiredSize, int shardSize, SearchContext aggregationContext, + Aggregator parent, List pipelineAggregators, + Map metaData) throws IOException { + super(name, factories, valuesSource, requiredSize, shardSize, aggregationContext, parent, + pipelineAggregators, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java index 8380a4172c9c5..0f59c9a71ea40 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; +import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -27,7 +28,6 @@ import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSource.GeoPoint; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; @@ -42,14 +42,17 @@ public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory< private final int precision; private final int requiredSize; private final int shardSize; + private final GeoBoundingBox geoBoundingBox; - GeoTileGridAggregatorFactory(String name, ValuesSourceConfig config, int precision, int requiredSize, - int shardSize, QueryShardContext queryShardContext, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + GeoTileGridAggregatorFactory(String name, ValuesSourceConfig config, int precision, int requiredSize, + int shardSize, GeoBoundingBox geoBoundingBox, QueryShardContext queryShardContext, + AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.precision = precision; this.requiredSize = requiredSize; this.shardSize = shardSize; + this.geoBoundingBox = geoBoundingBox; } @Override @@ -68,7 +71,7 @@ public InternalAggregation buildEmptyAggregation() { } @Override - protected Aggregator doCreateInternal(final GeoPoint valuesSource, + protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, SearchContext searchContext, Aggregator parent, boolean collectsFromSingleBucket, @@ -77,8 +80,8 @@ protected Aggregator doCreateInternal(final GeoPoint valuesSource, if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, searchContext, parent); } - CellIdSource cellIdSource = new CellIdSource(valuesSource, precision, GeoTileUtils::longEncode); - return new GeoTileGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, searchContext, parent, - pipelineAggregators, metaData); + CellIdSource cellIdSource = new CellIdSource(valuesSource, precision, geoBoundingBox, GeoTileUtils::longEncode); + return new GeoTileGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, + searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java index 19381e87fedb6..03f821296f2a6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java @@ -51,7 +51,7 @@ private GeoTileUtils() {} * Another consideration is that index optimizes lat/lng storage, loosing some precision. * E.g. hash lng=140.74779717298918D lat=45.61884022447444D == "18/233561/93659", but shown as "18/233561/93658" */ - static final int MAX_ZOOM = 29; + public static final int MAX_ZOOM = 29; /** * Bit position of the zoom value within hash - zoom is stored in the most significant 6 bits of a long number. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/UnboundedCellValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/UnboundedCellValues.java new file mode 100644 index 0000000000000..e64061eae5ae5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/UnboundedCellValues.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.index.fielddata.MultiGeoPointValues; + +/** + * Class representing {@link CellValues} that are unbounded by any + * {@link GeoBoundingBox}. + */ +class UnboundedCellValues extends CellValues { + + UnboundedCellValues(MultiGeoPointValues geoValues, int precision, CellIdSource.GeoPointLongEncoder encoder) { + super(geoValues, precision, encoder); + } + + @Override + int advanceValue(org.elasticsearch.common.geo.GeoPoint target, int valuesIdx) { + values[valuesIdx] = encoder.encode(target.getLon(), target.getLat(), precision); + return valuesIdx + 1; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 0c66e17add645..861e3269ace6c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -494,13 +494,13 @@ protected ValuesSourceAggregatorFactory innerBuild(QueryShardConte Builder subFactoriesBuilder) throws IOException { final ZoneId tz = timeZone(); // TODO use offset here rather than explicitly in the aggregation - final Rounding rounding = dateHistogramInterval.createRounding(tz, 0); + final Rounding rounding = dateHistogramInterval.createRounding(tz, offset); final ZoneId rewrittenTimeZone = rewriteTimeZone(queryShardContext); final Rounding shardRounding; if (tz == rewrittenTimeZone) { shardRounding = rounding; } else { - shardRounding = dateHistogramInterval.createRounding(rewrittenTimeZone, 0); + shardRounding = dateHistogramInterval.createRounding(rewrittenTimeZone, offset); } ExtendedBounds roundedBounds = null; @@ -508,7 +508,7 @@ protected ValuesSourceAggregatorFactory innerBuild(QueryShardConte // parse any string bounds to longs and round roundedBounds = this.extendedBounds.parseAndValidate(name, queryShardContext, config.format()).round(rounding); } - return new DateHistogramAggregatorFactory(name, config, offset, order, keyed, minDocCount, + return new DateHistogramAggregatorFactory(name, config, order, keyed, minDocCount, rounding, shardRounding, roundedBounds, queryShardContext, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 0c7a91505ae88..8479ea066aa50 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -64,10 +64,9 @@ class DateHistogramAggregator extends BucketsAggregator { private final ExtendedBounds extendedBounds; private final LongHash bucketOrds; - private long offset; DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, Rounding shardRounding, - long offset, BucketOrder order, boolean keyed, + BucketOrder order, boolean keyed, long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext aggregationContext, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { @@ -75,7 +74,6 @@ class DateHistogramAggregator extends BucketsAggregator { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); this.rounding = rounding; this.shardRounding = shardRounding; - this.offset = offset; this.order = InternalOrder.validate(order, this); this.keyed = keyed; this.minDocCount = minDocCount; @@ -113,7 +111,7 @@ public void collect(int doc, long bucket) throws IOException { long value = values.nextValue(); // We can use shardRounding here, which is sometimes more efficient // if daylight saving times are involved. - long rounded = shardRounding.round(value - offset) + offset; + long rounded = shardRounding.round(value); assert rounded >= previousRounded; if (rounded == previousRounded) { continue; @@ -150,7 +148,7 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOE InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; - return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, formatter, keyed, + return new InternalDateHistogram(name, buckets, order, minDocCount, rounding.offset(), emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); } @@ -159,8 +157,8 @@ public InternalAggregation buildEmptyAggregation() { InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; - return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, offset, emptyBucketInfo, formatter, keyed, - pipelineAggregators(), metaData()); + return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, rounding.offset(), emptyBucketInfo, formatter, + keyed, pipelineAggregators(), metaData()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 86555767e25ea..d68cf814f32bc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -39,7 +39,6 @@ public final class DateHistogramAggregatorFactory extends ValuesSourceAggregatorFactory { - private final long offset; private final BucketOrder order; private final boolean keyed; private final long minDocCount; @@ -48,12 +47,11 @@ public final class DateHistogramAggregatorFactory private final Rounding shardRounding; public DateHistogramAggregatorFactory(String name, ValuesSourceConfig config, - long offset, BucketOrder order, boolean keyed, long minDocCount, + BucketOrder order, boolean keyed, long minDocCount, Rounding rounding, Rounding shardRounding, ExtendedBounds extendedBounds, QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); - this.offset = offset; this.order = order; this.keyed = keyed; this.minDocCount = minDocCount; @@ -104,7 +102,7 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, private Aggregator createAggregator(ValuesSource.Numeric valuesSource, SearchContext searchContext, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { - return new DateHistogramAggregator(name, factories, rounding, shardRounding, offset, order, keyed, minDocCount, extendedBounds, + return new DateHistogramAggregator(name, factories, rounding, shardRounding, order, keyed, minDocCount, extendedBounds, valuesSource, config.format(), searchContext, parent, pipelineAggregators, metaData); } @@ -113,7 +111,7 @@ private Aggregator createRangeAggregator(ValuesSource.Range valuesSource, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { - return new DateRangeHistogramAggregator(name, factories, rounding, shardRounding, offset, order, keyed, minDocCount, extendedBounds, + return new DateRangeHistogramAggregator(name, factories, rounding, shardRounding, order, keyed, minDocCount, extendedBounds, valuesSource, config.format(), searchContext, parent, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java index 9eed2a542f9dd..9c6417165ab30 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java @@ -67,10 +67,9 @@ class DateRangeHistogramAggregator extends BucketsAggregator { private final ExtendedBounds extendedBounds; private final LongHash bucketOrds; - private long offset; DateRangeHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, Rounding shardRounding, - long offset, BucketOrder order, boolean keyed, + BucketOrder order, boolean keyed, long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Range valuesSource, DocValueFormat formatter, SearchContext aggregationContext, Aggregator parent, List pipelineAggregators, @@ -79,7 +78,6 @@ class DateRangeHistogramAggregator extends BucketsAggregator { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); this.rounding = rounding; this.shardRounding = shardRounding; - this.offset = offset; this.order = InternalOrder.validate(order, this); this.keyed = keyed; this.minDocCount = minDocCount; @@ -126,8 +124,8 @@ public void collect(int doc, long bucket) throws IOException { // The encoding should ensure that this assert is always true. assert from >= previousFrom : "Start of range not >= previous start"; final Long to = (Long) range.getTo(); - final long startKey = offsetAwareRounding(shardRounding, from, offset); - final long endKey = offsetAwareRounding(shardRounding, to, offset); + final long startKey = shardRounding.round(from); + final long endKey = shardRounding.round(to); for (long key = startKey > previousKey ? startKey : previousKey; key <= endKey; key = shardRounding.nextRoundingValue(key)) { if (key == previousKey) { @@ -153,10 +151,6 @@ public void collect(int doc, long bucket) throws IOException { }; } - private long offsetAwareRounding(Rounding rounding, long value, long offset) { - return rounding.round(value - offset) + offset; - } - @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { assert owningBucketOrdinal == 0; @@ -175,7 +169,7 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOE InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; - return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, formatter, keyed, + return new InternalDateHistogram(name, buckets, order, minDocCount, rounding.offset(), emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); } @@ -184,8 +178,8 @@ public InternalAggregation buildEmptyAggregation() { InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; - return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, offset, emptyBucketInfo, formatter, keyed, - pipelineAggregators(), metaData()); + return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, rounding.offset(), emptyBucketInfo, formatter, + keyed, pipelineAggregators(), metaData()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java index dc20ff291e0d1..4a9deb9bdedfc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java @@ -166,7 +166,11 @@ ExtendedBounds parseAndValidate(String aggName, QueryShardContext queryShardCont } ExtendedBounds round(Rounding rounding) { - return new ExtendedBounds(min != null ? rounding.round(min) : null, max != null ? rounding.round(max) : null); + // Extended bounds shouldn't be effected by the offset + Rounding effectiveRounding = rounding.withoutOffset(); + return new ExtendedBounds( + min != null ? effectiveRounding.round(min) : null, + max != null ? effectiveRounding.round(max) : null); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 1f1775ede7535..9db524bd3d6ba 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -497,7 +497,7 @@ public Number getKey(MultiBucketsAggregation.Bucket bucket) { @Override public Number nextKey(Number key) { - return emptyBucketInfo.rounding.nextRoundingValue(key.longValue() - offset) + offset; + return emptyBucketInfo.rounding.nextRoundingValue(key.longValue()); } @Override diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 9340e1508999c..45967235e8086 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -365,9 +365,12 @@ public boolean hasOldVersionSnapshots(String repositoryName, RepositoryData repo } else { try { final Repository repository = repositoriesService.repository(repositoryName); - hasOldFormatSnapshots = snapshotIds.stream().map(repository::getSnapshotInfo).anyMatch( - snapshotInfo -> (excluded == null || snapshotInfo.snapshotId().equals(excluded) == false) - && snapshotInfo.version().before(SHARD_GEN_IN_REPO_DATA_VERSION)); + hasOldFormatSnapshots = snapshotIds.stream().filter(snapshotId -> snapshotId.equals(excluded) == false).anyMatch( + snapshotId -> { + final Version known = repositoryData.getVersion(snapshotId); + return (known == null ? repository.getSnapshotInfo(snapshotId).version() : known) + .before(SHARD_GEN_IN_REPO_DATA_VERSION); + }); } catch (SnapshotMissingException e) { logger.warn("Failed to load snapshot metadata, assuming repository is in old format", e); return true; diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index 14ca217ade1b8..cd3983781d0f8 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -39,16 +39,13 @@ import java.io.IOException; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; import java.util.stream.Stream; -import static org.elasticsearch.common.settings.Setting.boolSetting; import static org.elasticsearch.common.settings.Setting.intSetting; public class ProxyConnectionStrategy extends RemoteConnectionStrategy { @@ -75,12 +72,12 @@ public class ProxyConnectionStrategy extends RemoteConnectionStrategy { Setting.Property.Dynamic, Setting.Property.NodeScope)); /** - * Whether to include the hostname as a server_name attribute + * A configurable server_name attribute */ - public static final Setting.AffixSetting INCLUDE_SERVER_NAME = Setting.affixKeySetting( + public static final Setting.AffixSetting SERVER_NAME = Setting.affixKeySetting( "cluster.remote.", - "include_server_name", - (ns, key) -> boolSetting(key, false, new StrategyValidator<>(ns, key, ConnectionStrategy.PROXY), + "server_name", + (ns, key) -> Setting.simpleString(key, new StrategyValidator<>(ns, key, ConnectionStrategy.PROXY), Setting.Property.Dynamic, Setting.Property.NodeScope)); static final int CHANNELS_PER_CONNECTION = 1; @@ -89,12 +86,10 @@ public class ProxyConnectionStrategy extends RemoteConnectionStrategy { private static final Logger logger = LogManager.getLogger(ProxyConnectionStrategy.class); private final int maxNumConnections; - private final AtomicLong counter = new AtomicLong(0); private final String configuredAddress; - private final boolean includeServerName; + private final String configuredServerName; private final Supplier address; private final AtomicReference remoteClusterName = new AtomicReference<>(); - private final ConnectionProfile profile; private final ConnectionManager.ConnectionValidator clusterNameValidator; ProxyConnectionStrategy(String clusterAlias, TransportService transportService, RemoteConnectionManager connectionManager, @@ -105,35 +100,30 @@ public class ProxyConnectionStrategy extends RemoteConnectionStrategy { connectionManager, REMOTE_SOCKET_CONNECTIONS.getConcreteSettingForNamespace(clusterAlias).get(settings), REMOTE_CLUSTER_ADDRESSES.getConcreteSettingForNamespace(clusterAlias).get(settings), - INCLUDE_SERVER_NAME.getConcreteSettingForNamespace(clusterAlias).get(settings)); + SERVER_NAME.getConcreteSettingForNamespace(clusterAlias).get(settings)); } ProxyConnectionStrategy(String clusterAlias, TransportService transportService, RemoteConnectionManager connectionManager, int maxNumConnections, String configuredAddress) { this(clusterAlias, transportService, connectionManager, maxNumConnections, configuredAddress, - () -> resolveAddress(configuredAddress), false); + () -> resolveAddress(configuredAddress), null); } ProxyConnectionStrategy(String clusterAlias, TransportService transportService, RemoteConnectionManager connectionManager, - int maxNumConnections, String configuredAddress, boolean includeServerName) { + int maxNumConnections, String configuredAddress, String configuredServerName) { this(clusterAlias, transportService, connectionManager, maxNumConnections, configuredAddress, - () -> resolveAddress(configuredAddress), includeServerName); + () -> resolveAddress(configuredAddress), configuredServerName); } ProxyConnectionStrategy(String clusterAlias, TransportService transportService, RemoteConnectionManager connectionManager, int maxNumConnections, String configuredAddress, Supplier address, - boolean includeServerName) { + String configuredServerName) { super(clusterAlias, transportService, connectionManager); this.maxNumConnections = maxNumConnections; this.configuredAddress = configuredAddress; - this.includeServerName = includeServerName; + this.configuredServerName = configuredServerName; assert Strings.isEmpty(configuredAddress) == false : "Cannot use proxy connection strategy with no configured addresses"; this.address = address; - // TODO: Move into the ConnectionManager - this.profile = new ConnectionProfile.Builder() - .addConnections(1, TransportRequestOptions.Type.REG, TransportRequestOptions.Type.PING) - .addConnections(0, TransportRequestOptions.Type.BULK, TransportRequestOptions.Type.STATE, TransportRequestOptions.Type.RECOVERY) - .build(); this.clusterNameValidator = (newConnection, actualProfile, listener) -> transportService.handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { @@ -223,15 +213,15 @@ public void onFailure(Exception e) { for (int i = 0; i < remaining; ++i) { String id = clusterAlias + "#" + resolved; Map attributes; - if (includeServerName) { - attributes = Collections.singletonMap("server_name", resolved.address().getHostString()); - } else { + if (Strings.isNullOrEmpty(configuredServerName)) { attributes = Collections.emptyMap(); + } else { + attributes = Collections.singletonMap("server_name", configuredServerName); } DiscoveryNode node = new DiscoveryNode(id, resolved, attributes, DiscoveryNodeRole.BUILT_IN_ROLES, Version.CURRENT.minimumCompatibilityVersion()); - connectionManager.connectToNode(node, profile, clusterNameValidator, new ActionListener<>() { + connectionManager.connectToNode(node, null, clusterNameValidator, new ActionListener<>() { @Override public void onResponse(Void v) { compositeListener.onResponse(v); @@ -258,12 +248,6 @@ public void onFailure(Exception e) { } } - private TransportAddress nextAddress(List resolvedAddresses) { - long curr; - while ((curr = counter.getAndIncrement()) == Long.MIN_VALUE) ; - return resolvedAddresses.get(Math.floorMod(curr, resolvedAddresses.size())); - } - private static TransportAddress resolveAddress(String address) { return new TransportAddress(parseConfiguredAddress(address)); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index ba4f98d0d09a3..08c819c70a47c 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -111,7 +111,7 @@ public void listenForUpdates(ClusterSettings clusterSettings) { SniffConnectionStrategy.REMOTE_NODE_CONNECTIONS, ProxyConnectionStrategy.REMOTE_CLUSTER_ADDRESSES, ProxyConnectionStrategy.REMOTE_SOCKET_CONNECTIONS, - ProxyConnectionStrategy.INCLUDE_SERVER_NAME); + ProxyConnectionStrategy.SERVER_NAME); clusterSettings.addAffixGroupUpdateConsumer(remoteClusterSettings, this::validateAndUpdateRemoteCluster); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index ae697bba95b49..c87872317de8a 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -66,18 +66,13 @@ final class RemoteClusterConnection implements Closeable { * @param transportService the local nodes transport service */ RemoteClusterConnection(Settings settings, String clusterAlias, TransportService transportService) { - this(settings, clusterAlias, transportService, - createConnectionManager(RemoteConnectionStrategy.buildConnectionProfile(clusterAlias, settings), transportService)); - } - - RemoteClusterConnection(Settings settings, String clusterAlias, TransportService transportService, - ConnectionManager connectionManager) { this.transportService = transportService; this.clusterAlias = clusterAlias; - this.remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + ConnectionProfile profile = RemoteConnectionStrategy.buildConnectionProfile(clusterAlias, settings); + this.remoteConnectionManager = new RemoteConnectionManager(clusterAlias, createConnectionManager(profile, transportService)); this.connectionStrategy = RemoteConnectionStrategy.buildStrategy(clusterAlias, transportService, remoteConnectionManager, settings); // we register the transport service here as a listener to make sure we notify handlers on disconnect etc. - connectionManager.addListener(transportService); + this.remoteConnectionManager.getConnectionManager().addListener(transportService); this.skipUnavailable = RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE .getConcreteSettingForNamespace(clusterAlias).get(settings); this.threadPool = transportService.threadPool; diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java index 1ee8fba2bd7eb..dff596f7c1112 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -170,11 +169,9 @@ private static Stream getClusterAlias(Settings settings, Setting.Aff return allConcreteSettings.map(affixSetting::getNamespace); } - static InetSocketAddress parseConfiguredAddress(String remoteHost) { - final Tuple hostPort = parseHostPort(remoteHost); - final String host = hostPort.v1(); - assert hostPort.v2() != null : remoteHost; - final int port = hostPort.v2(); + static InetSocketAddress parseConfiguredAddress(String configuredAddress) { + final String host = parseHost(configuredAddress); + final int port = parsePort(configuredAddress); InetAddress hostAddress; try { hostAddress = InetAddress.getByName(host); @@ -184,10 +181,8 @@ static InetSocketAddress parseConfiguredAddress(String remoteHost) { return new InetSocketAddress(hostAddress, port); } - private static Tuple parseHostPort(final String remoteHost) { - final String host = remoteHost.substring(0, indexOfPortSeparator(remoteHost)); - final int port = parsePort(remoteHost); - return Tuple.tuple(host, port); + static String parseHost(final String configuredAddress) { + return configuredAddress.substring(0, indexOfPortSeparator(configuredAddress)); } static int parsePort(String remoteHost) { diff --git a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java index fcc4bde951dd0..8ce03e9997527 100644 --- a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java @@ -412,7 +412,7 @@ private static DiscoveryNode resolveSeedNode(String clusterAlias, String address Version.CURRENT.minimumCompatibilityVersion()); } else { TransportAddress transportAddress = new TransportAddress(parseConfiguredAddress(proxyAddress)); - String hostName = address.substring(0, indexOfPortSeparator(address)); + String hostName = RemoteConnectionStrategy.parseHost(proxyAddress); return new DiscoveryNode("", clusterAlias + "#" + address, UUIDs.randomBase64UUID(), hostName, address, transportAddress, Collections.singletonMap("server_name", hostName), DiscoveryNodeRole.BUILT_IN_ROLES, Version.CURRENT.minimumCompatibilityVersion()); @@ -429,14 +429,6 @@ static Predicate getNodePredicate(Settings settings) { return DEFAULT_NODE_PREDICATE; } - private static int indexOfPortSeparator(String remoteHost) { - int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300 - if (portSeparator == -1 || portSeparator == remoteHost.length()) { - throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead"); - } - return portSeparator; - } - private static DiscoveryNode maybeAddProxyAddress(String proxyAddress, DiscoveryNode node) { if (proxyAddress == null || proxyAddress.isEmpty()) { return node; diff --git a/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java b/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java index 92eb641a6b1a9..efbf6d6018d2c 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java @@ -231,7 +231,8 @@ private HandshakeResponse(Version requestVersion, StreamInput in) throws IOExcep // During the handshake process, nodes set their stream version to the minimum compatibility // version they support. When deserializing the response, we use the version the other node // told us that it actually is in the handshake response (`version`). - if (requestVersion.onOrAfter(Version.V_7_6_0) && version.onOrAfter(Version.V_7_6_0)) { + // TODO: On backport update to 6.7 + if (requestVersion.onOrAfter(Version.V_8_0_0) && version.onOrAfter(Version.V_8_0_0)) { clusterName = new ClusterName(in); discoveryNode = new DiscoveryNode(in); } else { @@ -248,7 +249,8 @@ public void writeTo(StreamOutput out) throws IOException { // version they support. When deciding what response to send, we use the version the other node // told us that it actually is in the handshake request (`requestVersion`). If it did not tell // us a `requestVersion`, it is at least a pre-7.6 node. - if (requestVersion != null && requestVersion.onOrAfter(Version.V_7_6_0) && version.onOrAfter(Version.V_7_6_0)) { + // TODO: On backport update to 6.7 + if (requestVersion != null && requestVersion.onOrAfter(Version.V_8_0_0) && version.onOrAfter(Version.V_8_0_0)) { clusterName.writeTo(out); discoveryNode.writeTo(out); } diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.ErrorOnUnknown b/server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.ErrorOnUnknown new file mode 100644 index 0000000000000..38f08271bce10 --- /dev/null +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.ErrorOnUnknown @@ -0,0 +1 @@ +org.elasticsearch.common.xcontent.SuggestingErrorOnUnknown diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java index 06bd3dc26d8f6..5a581d2d4088e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java @@ -56,7 +56,7 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws XContentParseException iae = expectThrows(XContentParseException.class, () -> ClusterUpdateSettingsRequest.fromXContent(createParser(xContentType.xContent(), mutated))); assertThat(iae.getMessage(), - containsString("[cluster_update_settings_request] unknown field [" + unsupportedField + "], parser not found")); + containsString("[cluster_update_settings_request] unknown field [" + unsupportedField + "]")); } else { try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { ClusterUpdateSettingsRequest parsedRequest = ClusterUpdateSettingsRequest.fromXContent(parser); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java index 23129ae546fe6..7b344bf335c88 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; @@ -108,6 +109,25 @@ public TokenStream create(TokenStream tokenStream) { } } + class DeprecatedTokenFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory { + + DeprecatedTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(indexSettings, name, settings); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + deprecationLogger.deprecated("Using deprecated token filter [deprecated]"); + return tokenStream; + } + + @Override + public TokenStream normalize(TokenStream tokenStream) { + deprecationLogger.deprecated("Using deprecated token filter [deprecated]"); + return tokenStream; + } + } + class AppendCharFilterFactory extends AbstractCharFilterFactory { final String suffix; @@ -136,7 +156,7 @@ public Map> getTokenizers() { @Override public Map> getTokenFilters() { - return singletonMap("mock", MockFactory::new); + return Map.of("mock", MockFactory::new, "deprecated", DeprecatedTokenFilterFactory::new); } @Override @@ -492,4 +512,28 @@ public void testExceedSetMaxTokenLimit() { assertEquals(e.getMessage(), "The number of tokens produced by calling _analyze has exceeded the allowed maximum of [" + idxMaxTokenCount + "]." + " This limit can be set by changing the [index.analyze.max_token_count] index level setting."); } + + public void testDeprecationWarnings() throws IOException { + AnalyzeAction.Request req = new AnalyzeAction.Request(); + req.tokenizer("standard"); + req.addTokenFilter("lowercase"); + req.addTokenFilter("deprecated"); + req.text("test text"); + + AnalyzeAction.Response analyze = + TransportAnalyzeAction.analyze(req, registry, mockIndexService(), maxTokenCount); + assertEquals(2, analyze.getTokens().size()); + assertWarnings("Using deprecated token filter [deprecated]"); + + // normalizer + req = new AnalyzeAction.Request(); + req.addTokenFilter("lowercase"); + req.addTokenFilter("deprecated"); + req.text("text"); + + analyze = + TransportAnalyzeAction.analyze(req, registry, mockIndexService(), maxTokenCount); + assertEquals(1, analyze.getTokens().size()); + assertWarnings("Using deprecated token filter [deprecated]"); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java deleted file mode 100644 index 5fc8ce5fe3cfd..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.ObjectIntMap; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse.ShardCounts; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.TestShardRouting; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; -import org.elasticsearch.indices.flush.SyncedFlushService; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.test.XContentTestUtils.convertToMap; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; - -public class SyncedFlushUnitTests extends ESTestCase { - - private static class TestPlan { - public SyncedFlushResponse.ShardCounts totalCounts; - public Map countsPerIndex = new HashMap<>(); - public ObjectIntMap expectedFailuresPerIndex = new ObjectIntHashMap<>(); - public SyncedFlushResponse result; - } - - public void testIndicesSyncedFlushResult() throws IOException { - final TestPlan testPlan = createTestPlan(); - assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total)); - assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful)); - assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed)); - assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); - Map asMap = convertToMap(testPlan.result); - assertShardCount("_shards header", (Map) asMap.get("_shards"), testPlan.totalCounts); - - assertThat("unexpected number of indices", asMap.size(), equalTo(1 + testPlan.countsPerIndex.size())); // +1 for the shards header - for (String index : testPlan.countsPerIndex.keySet()) { - Map indexMap = (Map) asMap.get(index); - assertShardCount(index, indexMap, testPlan.countsPerIndex.get(index)); - List> failureList = (List>) indexMap.get("failures"); - final int expectedFailures = testPlan.expectedFailuresPerIndex.get(index); - if (expectedFailures == 0) { - assertNull(index + " has unexpected failures", failureList); - } else { - assertNotNull(index + " should have failures", failureList); - assertThat(failureList, hasSize(expectedFailures)); - } - } - } - - public void testResponseStreaming() throws IOException { - final TestPlan testPlan = createTestPlan(); - assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total)); - assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful)); - assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed)); - assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); - BytesStreamOutput out = new BytesStreamOutput(); - testPlan.result.writeTo(out); - StreamInput in = out.bytes().streamInput(); - SyncedFlushResponse readResponse = new SyncedFlushResponse(in); - assertThat(readResponse.totalShards(), equalTo(testPlan.totalCounts.total)); - assertThat(readResponse.successfulShards(), equalTo(testPlan.totalCounts.successful)); - assertThat(readResponse.failedShards(), equalTo(testPlan.totalCounts.failed)); - assertThat(readResponse.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); - assertThat(readResponse.getShardsResultPerIndex().size(), equalTo(testPlan.result.getShardsResultPerIndex().size())); - for (Map.Entry> entry : readResponse.getShardsResultPerIndex().entrySet()) { - List originalShardsResults = testPlan.result.getShardsResultPerIndex().get(entry.getKey()); - assertNotNull(originalShardsResults); - List readShardsResults = entry.getValue(); - assertThat(readShardsResults.size(), equalTo(originalShardsResults.size())); - for (int i = 0; i < readShardsResults.size(); i++) { - ShardsSyncedFlushResult originalShardResult = originalShardsResults.get(i); - ShardsSyncedFlushResult readShardResult = readShardsResults.get(i); - assertThat(originalShardResult.failureReason(), equalTo(readShardResult.failureReason())); - assertThat(originalShardResult.failed(), equalTo(readShardResult.failed())); - assertThat(originalShardResult.getShardId(), equalTo(readShardResult.getShardId())); - assertThat(originalShardResult.successfulShards(), equalTo(readShardResult.successfulShards())); - assertThat(originalShardResult.syncId(), equalTo(readShardResult.syncId())); - assertThat(originalShardResult.totalShards(), equalTo(readShardResult.totalShards())); - assertThat(originalShardResult.failedShards().size(), equalTo(readShardResult.failedShards().size())); - for (Map.Entry shardEntry - : originalShardResult.failedShards().entrySet()) { - SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.failedShards().get(shardEntry.getKey()); - assertNotNull(readShardResponse); - SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); - assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); - assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); - } - assertThat(originalShardResult.shardResponses().size(), equalTo(readShardResult.shardResponses().size())); - for (Map.Entry shardEntry - : originalShardResult.shardResponses().entrySet()) { - SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses() - .get(shardEntry.getKey()); - assertNotNull(readShardResponse); - SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); - assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); - assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); - } - } - } - } - - private void assertShardCount(String name, Map header, ShardCounts expectedCounts) { - assertThat(name + " has unexpected total count", (Integer) header.get("total"), equalTo(expectedCounts.total)); - assertThat(name + " has unexpected successful count", (Integer) header.get("successful"), equalTo(expectedCounts.successful)); - assertThat(name + " has unexpected failed count", (Integer) header.get("failed"), equalTo(expectedCounts.failed)); - } - - protected TestPlan createTestPlan() { - final TestPlan testPlan = new TestPlan(); - final Map> indicesResults = new HashMap<>(); - final int indexCount = randomIntBetween(1, 10); - int totalShards = 0; - int totalSuccesful = 0; - int totalFailed = 0; - for (int i = 0; i < indexCount; i++) { - final String index = "index_" + i; - int shards = randomIntBetween(1, 4); - int replicas = randomIntBetween(0, 2); - int successful = 0; - int failed = 0; - int failures = 0; - List shardsResults = new ArrayList<>(); - for (int shard = 0; shard < shards; shard++) { - final ShardId shardId = new ShardId(index, "_na_", shard); - if (randomInt(5) < 2) { - // total shard failure - failed += replicas + 1; - failures++; - shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure")); - } else { - Map shardResponses = new HashMap<>(); - for (int copy = 0; copy < replicas + 1; copy++) { - final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy, - null, copy == 0, ShardRoutingState.STARTED); - if (randomInt(5) < 2) { - // shard copy failure - failed++; - failures++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId)); - } else { - successful++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse((String) null)); - } - } - shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); - } - } - indicesResults.put(index, shardsResults); - testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed)); - testPlan.expectedFailuresPerIndex.put(index, failures); - totalFailed += failed; - totalShards += shards * (replicas + 1); - totalSuccesful += successful; - } - testPlan.result = new SyncedFlushResponse(indicesResults); - testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccesful, totalFailed); - return testPlan; - } - -} diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java index 7276ef2ebada4..834aa57b2d825 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java @@ -91,23 +91,17 @@ public void testExecuteVerboseItem() throws Exception { assertThat(actualItemResponse, instanceOf(SimulateDocumentVerboseResult.class)); SimulateDocumentVerboseResult simulateDocumentVerboseResult = (SimulateDocumentVerboseResult) actualItemResponse; assertThat(simulateDocumentVerboseResult.getProcessorResults().size(), equalTo(2)); - assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getProcessorTag(), equalTo("test-id")); - IngestDocument firstProcessorIngestDocument = simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument(); - assertThat(firstProcessorIngestDocument, not(sameInstance(this.ingestDocument))); - assertIngestDocument(firstProcessorIngestDocument, this.ingestDocument); - assertThat(firstProcessorIngestDocument.getSourceAndMetadata(), not(sameInstance(this.ingestDocument.getSourceAndMetadata()))); + assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getProcessorTag(), equalTo("test-id")); + assertVerboseResult(simulateDocumentVerboseResult.getProcessorResults().get(0), pipeline.getId(), ingestDocument); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getFailure(), nullValue()); + assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getProcessorTag(), equalTo("test-id")); - IngestDocument secondProcessorIngestDocument = simulateDocumentVerboseResult.getProcessorResults().get(1).getIngestDocument(); - assertThat(secondProcessorIngestDocument, not(sameInstance(this.ingestDocument))); - assertIngestDocument(secondProcessorIngestDocument, this.ingestDocument); - assertThat(secondProcessorIngestDocument.getSourceAndMetadata(), not(sameInstance(this.ingestDocument.getSourceAndMetadata()))); - assertThat(secondProcessorIngestDocument.getSourceAndMetadata(), - not(sameInstance(firstProcessorIngestDocument.getSourceAndMetadata()))); + assertVerboseResult(simulateDocumentVerboseResult.getProcessorResults().get(1), pipeline.getId(), ingestDocument); + assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getIngestDocument().getSourceAndMetadata(), + not(sameInstance(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument().getSourceAndMetadata()))); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getFailure(), nullValue()); } - public void testExecuteItem() throws Exception { TestProcessor processor = new TestProcessor("processor_0", "mock", ingestDocument -> {}); Pipeline pipeline = new Pipeline("_id", "_description", version, new CompoundProcessor(processor, processor)); @@ -147,10 +141,7 @@ public void testExecuteVerboseItemExceptionWithoutOnFailure() throws Exception { assertThat(simulateDocumentVerboseResult.getProcessorResults().size(), equalTo(2)); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getProcessorTag(), equalTo("processor_0")); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getFailure(), nullValue()); - assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument(), not(sameInstance(ingestDocument))); - assertIngestDocument(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument(), ingestDocument); - assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument().getSourceAndMetadata(), - not(sameInstance(ingestDocument.getSourceAndMetadata()))); + assertVerboseResult(simulateDocumentVerboseResult.getProcessorResults().get(0), pipeline.getId(), ingestDocument); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getProcessorTag(), equalTo("processor_1")); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getIngestDocument(), nullValue()); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getFailure(), instanceOf(RuntimeException.class)); @@ -191,14 +182,12 @@ public void testExecuteVerboseItemWithOnFailure() throws Exception { metadata.put(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD, "mock"); metadata.put(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD, "processor_0"); metadata.put(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD, "processor failed"); - assertIngestDocument(simulateDocumentVerboseResult.getProcessorResults().get(1).getIngestDocument(), - ingestDocumentWithOnFailureMetadata); - + assertVerboseResult(simulateDocumentVerboseResult.getProcessorResults().get(1), pipeline.getId(), + ingestDocumentWithOnFailureMetadata); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getFailure(), nullValue()); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(2).getProcessorTag(), equalTo("processor_2")); - assertThat(simulateDocumentVerboseResult.getProcessorResults().get(2).getIngestDocument(), not(sameInstance(ingestDocument))); - assertIngestDocument(simulateDocumentVerboseResult.getProcessorResults().get(2).getIngestDocument(), ingestDocument); + assertVerboseResult(simulateDocumentVerboseResult.getProcessorResults().get(2), pipeline.getId(), ingestDocument); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(2).getFailure(), nullValue()); } @@ -221,10 +210,7 @@ public void testExecuteVerboseItemExceptionWithIgnoreFailure() throws Exception assertThat(simulateDocumentVerboseResult.getProcessorResults().size(), equalTo(1)); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getProcessorTag(), equalTo("processor_0")); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getFailure(), sameInstance(exception)); - assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument(), not(sameInstance(ingestDocument))); - assertIngestDocument(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument(), ingestDocument); - assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument().getSourceAndMetadata(), - not(sameInstance(ingestDocument.getSourceAndMetadata()))); + assertVerboseResult(simulateDocumentVerboseResult.getProcessorResults().get(0), pipeline.getId(), ingestDocument); } public void testExecuteVerboseItemWithoutExceptionAndWithIgnoreFailure() throws Exception { @@ -245,10 +231,7 @@ public void testExecuteVerboseItemWithoutExceptionAndWithIgnoreFailure() throws assertThat(simulateDocumentVerboseResult.getProcessorResults().size(), equalTo(1)); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getProcessorTag(), equalTo("processor_0")); assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getFailure(), nullValue()); - assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument(), not(sameInstance(ingestDocument))); - assertIngestDocument(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument(), ingestDocument); - assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument().getSourceAndMetadata(), - not(sameInstance(ingestDocument.getSourceAndMetadata()))); + assertVerboseResult(simulateDocumentVerboseResult.getProcessorResults().get(0), pipeline.getId(), ingestDocument); } public void testExecuteItemWithFailure() throws Exception { @@ -392,4 +375,19 @@ public String getType() { } } + private static void assertVerboseResult(SimulateProcessorResult result, + String expectedPipelineId, + IngestDocument expectedIngestDocument) { + IngestDocument simulateVerboseIngestDocument = result.getIngestDocument(); + // Remove and compare pipeline key. It is always in the verbose result, + // since that is a snapshot of how the ingest doc looks during pipeline execution, but not in the final ingestDocument. + // The key gets added and removed during pipeline execution. + String actualPipelineId = (String) simulateVerboseIngestDocument.getIngestMetadata().remove("pipeline"); + assertThat(actualPipelineId, equalTo(expectedPipelineId)); + + assertThat(simulateVerboseIngestDocument, not(sameInstance(expectedIngestDocument))); + assertIngestDocument(simulateVerboseIngestDocument, expectedIngestDocument); + assertThat(simulateVerboseIngestDocument.getSourceAndMetadata(), not(sameInstance(expectedIngestDocument.getSourceAndMetadata()))); + } + } diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index 2d76256c91a85..20526366624fb 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -288,7 +288,7 @@ public void testUnknownFieldParsing() throws Exception { .endObject()); XContentParseException ex = expectThrows(XContentParseException.class, () -> request.fromXContent(contentParser)); - assertEquals("[1:2] [UpdateRequest] unknown field [unknown_field], parser not found", ex.getMessage()); + assertEquals("[1:2] [UpdateRequest] unknown field [unknown_field]", ex.getMessage()); UpdateRequest request2 = new UpdateRequest("test", "1"); XContentParser unknownObject = createParser(XContentFactory.jsonBuilder() @@ -299,7 +299,7 @@ public void testUnknownFieldParsing() throws Exception { .endObject() .endObject()); ex = expectThrows(XContentParseException.class, () -> request2.fromXContent(unknownObject)); - assertEquals("[1:76] [UpdateRequest] unknown field [params], parser not found", ex.getMessage()); + assertEquals("[1:76] [UpdateRequest] unknown field [params]", ex.getMessage()); } public void testFetchSourceParsing() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java new file mode 100644 index 0000000000000..7f19e725c5d4f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.action.index; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction.AdjustableSemaphore; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.test.ESTestCase; + +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +public class MappingUpdatedActionTests extends ESTestCase { + + public void testAdjustableSemaphore() { + AdjustableSemaphore sem = new AdjustableSemaphore(1, randomBoolean()); + assertEquals(1, sem.availablePermits()); + assertTrue(sem.tryAcquire()); + assertEquals(0, sem.availablePermits()); + assertFalse(sem.tryAcquire()); + assertEquals(0, sem.availablePermits()); + + // increase the number of max permits to 2 + sem.setMaxPermits(2); + assertEquals(1, sem.availablePermits()); + assertTrue(sem.tryAcquire()); + assertEquals(0, sem.availablePermits()); + + // release all current permits + sem.release(); + assertEquals(1, sem.availablePermits()); + sem.release(); + assertEquals(2, sem.availablePermits()); + + // reduce number of max permits to 1 + sem.setMaxPermits(1); + assertEquals(1, sem.availablePermits()); + // set back to 2 + sem.setMaxPermits(2); + assertEquals(2, sem.availablePermits()); + + // take both permits and reduce max permits + assertTrue(sem.tryAcquire()); + assertTrue(sem.tryAcquire()); + assertEquals(0, sem.availablePermits()); + assertFalse(sem.tryAcquire()); + sem.setMaxPermits(1); + assertEquals(-1, sem.availablePermits()); + assertFalse(sem.tryAcquire()); + + // release one permit + sem.release(); + assertEquals(0, sem.availablePermits()); + assertFalse(sem.tryAcquire()); + + // release second permit + sem.release(); + assertEquals(1, sem.availablePermits()); + assertTrue(sem.tryAcquire()); + } + + public void testMappingUpdatedActionBlocks() throws Exception { + List> inFlightListeners = new CopyOnWriteArrayList<>(); + final MappingUpdatedAction mua = new MappingUpdatedAction(Settings.builder() + .put(MappingUpdatedAction.INDICES_MAX_IN_FLIGHT_UPDATES_SETTING.getKey(), 1).build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) { + + @Override + protected void sendUpdateMapping(Index index, Mapping mappingUpdate, ActionListener listener) { + inFlightListeners.add(listener); + } + }; + + PlainActionFuture fut1 = new PlainActionFuture<>(); + mua.updateMappingOnMaster(null, null, fut1); + assertEquals(1, inFlightListeners.size()); + assertEquals(0, mua.blockedThreads()); + + PlainActionFuture fut2 = new PlainActionFuture<>(); + Thread thread = new Thread(() -> { + mua.updateMappingOnMaster(null, null, fut2); // blocked + }); + thread.start(); + assertBusy(() -> assertEquals(1, mua.blockedThreads())); + + assertEquals(1, inFlightListeners.size()); + assertFalse(fut1.isDone()); + inFlightListeners.remove(0).onResponse(null); + assertTrue(fut1.isDone()); + + thread.join(); + assertEquals(0, mua.blockedThreads()); + assertEquals(1, inFlightListeners.size()); + assertFalse(fut2.isDone()); + inFlightListeners.remove(0).onResponse(null); + assertTrue(fut2.isDone()); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index e4560d0613ccd..3004dc99ac360 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -991,17 +991,18 @@ public void testCannotJoinClusterWithDifferentUUID() throws IllegalAccessExcepti cluster1.runRandomly(); cluster1.stabilise(); - final ClusterNode newNode; + final ClusterNode nodeInOtherCluster; try (Cluster cluster2 = new Cluster(3)) { cluster2.runRandomly(); cluster2.stabilise(); - final ClusterNode nodeInOtherCluster = randomFrom(cluster2.clusterNodes); - newNode = cluster1.new ClusterNode(nextNodeIndex.getAndIncrement(), - nodeInOtherCluster.getLocalNode(), n -> cluster1.new MockPersistedState(n, nodeInOtherCluster.persistedState, - Function.identity(), Function.identity()), nodeInOtherCluster.nodeSettings); + nodeInOtherCluster = randomFrom(cluster2.clusterNodes); } + final ClusterNode newNode = cluster1.new ClusterNode(nextNodeIndex.getAndIncrement(), + nodeInOtherCluster.getLocalNode(), n -> cluster1.new MockPersistedState(n, nodeInOtherCluster.persistedState, + Function.identity(), Function.identity()), nodeInOtherCluster.nodeSettings); + cluster1.clusterNodes.add(newNode); MockLogAppender mockAppender = new MockLogAppender(); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java new file mode 100644 index 0000000000000..867f05131f654 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import joptsimple.OptionSet; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.hamcrest.Matchers.containsString; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class RemoveCustomsCommandIT extends ESIntegTestCase { + + public void testRemoveCustomsAbortedByUser() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); + ensureStableCluster(1); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); + expectThrows(() -> removeCustoms(environment, true, new String[]{ "index-graveyard" }), + ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); + } + + public void testRemoveCustomsSuccessful() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + String node = internalCluster().startNode(); + createIndex("test"); + client().admin().indices().prepareDelete("test").get(); + assertEquals(1, client().admin().cluster().prepareState().get().getState().metaData().indexGraveyard().getTombstones().size()); + Settings dataPathSettings = internalCluster().dataPathSettings(node); + ensureStableCluster(1); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); + MockTerminal terminal = removeCustoms(environment, false, + randomBoolean() ? + new String[]{ "index-graveyard" } : + new String[]{ "index-*" } + ); + assertThat(terminal.getOutput(), containsString(RemoveCustomsCommand.CUSTOMS_REMOVED_MSG)); + assertThat(terminal.getOutput(), containsString("The following customs will be removed:")); + assertThat(terminal.getOutput(), containsString("index-graveyard")); + + internalCluster().startNode(dataPathSettings); + assertEquals(0, client().admin().cluster().prepareState().get().getState().metaData().indexGraveyard().getTombstones().size()); + } + + public void testCustomDoesNotMatch() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + String node = internalCluster().startNode(); + createIndex("test"); + client().admin().indices().prepareDelete("test").get(); + assertEquals(1, client().admin().cluster().prepareState().get().getState().metaData().indexGraveyard().getTombstones().size()); + Settings dataPathSettings = internalCluster().dataPathSettings(node); + ensureStableCluster(1); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); + UserException ex = expectThrows(UserException.class, () -> removeCustoms(environment, false, + new String[]{ "index-greveyard-with-typos" })); + assertThat(ex.getMessage(), containsString("No custom metadata matching [index-greveyard-with-typos] were " + + "found on this node")); + } + + private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environment environment, boolean abort, String... args) + throws Exception { + final MockTerminal terminal = new MockTerminal(); + final OptionSet options = command.getParser().parse(args); + final String input; + + if (abort) { + input = randomValueOtherThanMany(c -> c.equalsIgnoreCase("y"), () -> randomAlphaOfLength(1)); + } else { + input = randomBoolean() ? "y" : "Y"; + } + + terminal.addTextInput(input); + + try { + command.execute(terminal, options, environment); + } finally { + assertThat(terminal.getOutput(), containsString(ElasticsearchNodeCommand.STOP_WARNING_MSG)); + } + + return terminal; + } + + private MockTerminal removeCustoms(Environment environment, boolean abort, String... args) throws Exception { + final MockTerminal terminal = executeCommand(new RemoveCustomsCommand(), environment, abort, args); + assertThat(terminal.getOutput(), containsString(RemoveCustomsCommand.CONFIRMATION_MSG)); + assertThat(terminal.getOutput(), containsString(RemoveCustomsCommand.CUSTOMS_REMOVED_MSG)); + return terminal; + } + + private void expectThrows(ThrowingRunnable runnable, String message) { + ElasticsearchException ex = expectThrows(ElasticsearchException.class, runnable); + assertThat(ex.getMessage(), containsString(message)); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java new file mode 100644 index 0000000000000..b3f37a9b17a7a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import joptsimple.OptionSet; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class RemoveSettingsCommandIT extends ESIntegTestCase { + + public void testRemoveSettingsAbortedByUser() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + String node = internalCluster().startNode(); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false).build()).get(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); + ensureStableCluster(1); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); + expectThrows(() -> removeSettings(environment, true, + new String[]{ DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey() }), + ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); + } + + public void testRemoveSettingsSuccessful() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + String node = internalCluster().startNode(); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false).build()).get(); + assertThat(client().admin().cluster().prepareState().get().getState().metaData().persistentSettings().keySet(), + contains(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey())); + Settings dataPathSettings = internalCluster().dataPathSettings(node); + ensureStableCluster(1); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); + MockTerminal terminal = removeSettings(environment, false, + randomBoolean() ? + new String[]{ DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey() } : + new String[]{ "cluster.routing.allocation.disk.*" } + ); + assertThat(terminal.getOutput(), containsString(RemoveSettingsCommand.SETTINGS_REMOVED_MSG)); + assertThat(terminal.getOutput(), containsString("The following settings will be removed:")); + assertThat(terminal.getOutput(), containsString( + DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey() + ": " + false)); + + internalCluster().startNode(dataPathSettings); + assertThat(client().admin().cluster().prepareState().get().getState().metaData().persistentSettings().keySet(), + not(contains(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey()))); + } + + public void testSettingDoesNotMatch() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + String node = internalCluster().startNode(); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false).build()).get(); + assertThat(client().admin().cluster().prepareState().get().getState().metaData().persistentSettings().keySet(), + contains(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey())); + Settings dataPathSettings = internalCluster().dataPathSettings(node); + ensureStableCluster(1); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); + UserException ex = expectThrows(UserException.class, () -> removeSettings(environment, false, + new String[]{ "cluster.routing.allocation.disk.bla.*" })); + assertThat(ex.getMessage(), containsString("No persistent cluster settings matching [cluster.routing.allocation.disk.bla.*] were " + + "found on this node")); + } + + private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environment environment, boolean abort, String... args) + throws Exception { + final MockTerminal terminal = new MockTerminal(); + final OptionSet options = command.getParser().parse(args); + final String input; + + if (abort) { + input = randomValueOtherThanMany(c -> c.equalsIgnoreCase("y"), () -> randomAlphaOfLength(1)); + } else { + input = randomBoolean() ? "y" : "Y"; + } + + terminal.addTextInput(input); + + try { + command.execute(terminal, options, environment); + } finally { + assertThat(terminal.getOutput(), containsString(ElasticsearchNodeCommand.STOP_WARNING_MSG)); + } + + return terminal; + } + + private MockTerminal removeSettings(Environment environment, boolean abort, String... args) throws Exception { + final MockTerminal terminal = executeCommand(new RemoveSettingsCommand(), environment, abort, args); + assertThat(terminal.getOutput(), containsString(RemoveSettingsCommand.CONFIRMATION_MSG)); + assertThat(terminal.getOutput(), containsString(RemoveSettingsCommand.SETTINGS_REMOVED_MSG)); + return terminal; + } + + private void expectThrows(ThrowingRunnable runnable, String message) { + ElasticsearchException ex = expectThrows(ElasticsearchException.class, runnable); + assertThat(ex.getMessage(), containsString(message)); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 55fc91b943640..0ca28bac60e84 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -22,23 +22,20 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.env.NodeMetaData; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.gateway.GatewayMetaState; +import org.elasticsearch.gateway.PersistedClusterStateService; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import java.io.IOException; -import java.nio.file.Path; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -49,7 +46,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase { @@ -138,44 +134,20 @@ public void testDetachNodeLocked() throws IOException { } } - public void testBootstrapNoNodeMetaData() throws IOException { + public void testBootstrapNoNodeMetaData() { Settings envSettings = buildEnvSettings(Settings.EMPTY); Environment environment = TestEnvironment.newEnvironment(envSettings); - try (NodeEnvironment nodeEnvironment = new NodeEnvironment(envSettings, environment)) { - NodeMetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); - } - - expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NO_NODE_METADATA_FOUND_MSG); + expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_NODE_METADATA_FOUND_MSG); } public void testBootstrapNotBootstrappedCluster() throws Exception { String node = internalCluster().startNode( - Settings.builder() - .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup - .build()); - assertBusy(() -> { - ClusterState state = client().admin().cluster().prepareState().setLocal(true) - .execute().actionGet().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); - }); - - Settings dataPathSettings = internalCluster().dataPathSettings(node); - - internalCluster().stopRandomDataNode(); - - Environment environment = TestEnvironment.newEnvironment( - Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); - expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG); - } - - public void testDetachNotBootstrappedCluster() throws Exception { - String node = internalCluster().startNode( - Settings.builder() - .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup - .build()); + Settings.builder() + .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup + .build()); assertBusy(() -> { ClusterState state = client().admin().cluster().prepareState().setLocal(true) - .execute().actionGet().getState(); + .execute().actionGet().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -185,65 +157,35 @@ public void testDetachNotBootstrappedCluster() throws Exception { Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); - expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG); - } - - public void testBootstrapNoManifestFile() throws IOException { - internalCluster().setBootstrapMasterNodeIndex(0); - String node = internalCluster().startNode(); - Settings dataPathSettings = internalCluster().dataPathSettings(node); - ensureStableCluster(1); - NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); - internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment( - Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); - Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); - - expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG); - } - - public void testDetachNoManifestFile() throws IOException { - internalCluster().setBootstrapMasterNodeIndex(0); - String node = internalCluster().startNode(); - Settings dataPathSettings = internalCluster().dataPathSettings(node); - ensureStableCluster(1); - NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); - internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment( - Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); - Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); - - expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG); + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG); } - public void testBootstrapNoMetaData() throws IOException { + public void testBootstrapNoClusterState() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); String node = internalCluster().startNode(); Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); - MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + PersistedClusterStateService.deleteAll(nodeEnvironment.nodeDataPaths()); - expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG); + expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_NODE_METADATA_FOUND_MSG); } - public void testDetachNoMetaData() throws IOException { + public void testDetachNoClusterState() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); String node = internalCluster().startNode(); Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); - MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + PersistedClusterStateService.deleteAll(nodeEnvironment.nodeDataPaths()); - expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG); + expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_NODE_METADATA_FOUND_MSG); } public void testBootstrapAbortedByUser() throws IOException { @@ -318,14 +260,16 @@ public void test3MasterNodes2Failed() throws Exception { logger.info("--> stop 1st master-eligible node and data-only node"); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(0))); + assertBusy(() -> internalCluster().getInstance(GatewayMetaState.class, dataNode).allPendingAsyncStatesWritten()); internalCluster().stopRandomDataNode(); logger.info("--> unsafely-bootstrap 1st master-eligible node"); MockTerminal terminal = unsafeBootstrap(environmentMaster1); - MetaData metaData = MetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodeEnvironment.nodeDataPaths()); + MetaData metaData = ElasticsearchNodeCommand.createPersistedClusterStateService(Settings.EMPTY, nodeEnvironment.nodeDataPaths()) + .loadBestOnDiskState().metaData; assertThat(terminal.getOutput(), containsString( - String.format(Locale.ROOT, UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, - metaData.coordinationMetaData().term(), metaData.version()))); + String.format(Locale.ROOT, UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, + metaData.coordinationMetaData().term(), metaData.version()))); logger.info("--> start 1st master-eligible node"); internalCluster().startMasterOnlyNode(master1DataPathSettings); @@ -376,6 +320,8 @@ public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Excepti logger.info("--> index 1 doc and ensure index is green"); client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); ensureGreen("test"); + assertBusy(() -> internalCluster().getInstances(IndicesService.class).forEach( + indicesService -> assertTrue(indicesService.allPendingDanglingIndicesWritten()))); logger.info("--> verify 1 doc in the index"); assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); @@ -383,6 +329,7 @@ public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Excepti logger.info("--> stop data-only node and detach it from the old cluster"); Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode); + assertBusy(() -> internalCluster().getInstance(GatewayMetaState.class, dataNode).allPendingAsyncStatesWritten()); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNode)); final Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(dataNodeDataPathSettings).build()); @@ -459,65 +406,4 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetaData( assertThat(state.metaData().settings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); } - - private static class SimulatedDeleteFailureException extends RuntimeException { - } - - public void testCleanupOldMetaDataFails() throws Exception { - // establish some metadata. - internalCluster().setBootstrapMasterNodeIndex(0); - String node = internalCluster().startNode(); - Settings dataPathSettings = internalCluster().dataPathSettings(node); - final Environment environment = TestEnvironment.newEnvironment( - Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); - internalCluster().stopRandomDataNode(); - - // find data paths - Path[] dataPaths; - try (NodeEnvironment nodeEnvironment = new NodeEnvironment(environment.settings(), environment)) { - dataPaths = nodeEnvironment.nodeDataPaths(); - } - - NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); - - final Manifest originalManifest = loadLatestManifest(dataPaths, namedXContentRegistry); - final MetaData originalMetaData = loadMetaData(dataPaths, namedXContentRegistry, originalManifest); - - executeCommand(new UnsafeBootstrapMasterCommand() { - @Override - protected void cleanUpOldMetaData(Terminal terminal, Path[] dataPaths, long newGeneration) { - throw new SimulatedDeleteFailureException(); - } - }, environment, false); - - - // check original meta-data left untouched. - assertEquals(loadMetaData(dataPaths, namedXContentRegistry, originalManifest).clusterUUID(), originalMetaData.clusterUUID()); - - // check that we got new clusterUUID despite deletion failing - final Manifest secondManifest = loadLatestManifest(dataPaths, namedXContentRegistry); - final MetaData secondMetaData = loadMetaData(dataPaths, namedXContentRegistry, secondManifest); - assertThat(secondManifest.getGlobalGeneration(), greaterThan(originalManifest.getGlobalGeneration())); - assertNotEquals(originalMetaData.clusterUUID(), secondMetaData.clusterUUID()); - - // check that a new run will cleanup. - executeCommand(new UnsafeBootstrapMasterCommand(), environment, false); - - assertNull(loadMetaData(dataPaths, namedXContentRegistry, originalManifest)); - assertNull(loadMetaData(dataPaths, namedXContentRegistry, secondManifest)); - - final Manifest finalManifest = loadLatestManifest(dataPaths, namedXContentRegistry); - final MetaData finalMetaData = loadMetaData(dataPaths, namedXContentRegistry, finalManifest); - - assertNotNull(finalMetaData); - assertNotEquals(secondMetaData.clusterUUID(), finalMetaData.clusterUUID()); - } - - private Manifest loadLatestManifest(Path[] dataPaths, NamedXContentRegistry namedXContentRegistry) throws IOException { - return Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, dataPaths); - } - - private MetaData loadMetaData(Path[] dataPaths, NamedXContentRegistry namedXContentRegistry, Manifest manifest) { - return MetaData.FORMAT.loadGeneration(logger, namedXContentRegistry, manifest.getGlobalGeneration(), dataPaths); - } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java index 32e7a3bcbd873..20deb39c72322 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java @@ -82,6 +82,18 @@ public void testUpgrade() { assertSame(src, service.upgradeIndexMetaData(src, Version.CURRENT.minimumIndexCompatibilityVersion())); // no double upgrade } + public void testUpgradeCustomSimilarity() { + MetaDataIndexUpgradeService service = getMetaDataIndexUpgradeService(); + IndexMetaData src = newIndexMeta("foo", + Settings.builder() + .put("index.similarity.my_similarity.type", "DFR") + .put("index.similarity.my_similarity.after_effect", "l") + .build()); + assertFalse(service.isUpgraded(src)); + src = service.upgradeIndexMetaData(src, Version.CURRENT.minimumIndexCompatibilityVersion()); + assertTrue(service.isUpgraded(src)); + } + public void testIsUpgraded() { MetaDataIndexUpgradeService service = getMetaDataIndexUpgradeService(); IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build()); diff --git a/server/src/test/java/org/elasticsearch/common/RoundingTests.java b/server/src/test/java/org/elasticsearch/common/RoundingTests.java index 7de894d081f21..8e19bdaf5547a 100644 --- a/server/src/test/java/org/elasticsearch/common/RoundingTests.java +++ b/server/src/test/java/org/elasticsearch/common/RoundingTests.java @@ -201,10 +201,18 @@ public void testOffsetRounding() { Rounding rounding = Rounding.builder(Rounding.DateTimeUnit.DAY_OF_MONTH).offset(twoHours).build(); assertThat(rounding.round(0), equalTo(-oneDay + twoHours)); assertThat(rounding.round(twoHours), equalTo(twoHours)); + assertThat(rounding.nextRoundingValue(-oneDay), equalTo(-oneDay + twoHours)); + assertThat(rounding.nextRoundingValue(0), equalTo(twoHours)); + assertThat(rounding.withoutOffset().round(0), equalTo(0L)); + assertThat(rounding.withoutOffset().nextRoundingValue(0), equalTo(oneDay)); rounding = Rounding.builder(Rounding.DateTimeUnit.DAY_OF_MONTH).offset(-twoHours).build(); assertThat(rounding.round(0), equalTo(-twoHours)); assertThat(rounding.round(oneDay - twoHours), equalTo(oneDay - twoHours)); + assertThat(rounding.nextRoundingValue(-oneDay), equalTo(-twoHours)); + assertThat(rounding.nextRoundingValue(0), equalTo(oneDay - twoHours)); + assertThat(rounding.withoutOffset().round(0), equalTo(0L)); + assertThat(rounding.withoutOffset().nextRoundingValue(0), equalTo(oneDay)); } /** diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoBoundingBoxTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoBoundingBoxTests.java index ef705fa27c42c..309302214f462 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoBoundingBoxTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoBoundingBoxTests.java @@ -19,11 +19,13 @@ package org.elasticsearch.common.geo; +import org.apache.lucene.geo.GeoEncodingUtils; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -133,6 +135,35 @@ public void testNullTopBottomLeftRight() throws Exception { } } + public void testPointInBounds() { + for (int iter = 0; iter < 1000; iter++) { + GeoBoundingBox geoBoundingBox = randomBBox(); + GeoBoundingBox bbox = new GeoBoundingBox( + new GeoPoint(quantizeLat(geoBoundingBox.top()), quantizeLon(geoBoundingBox.left())), + new GeoPoint(quantizeLat(geoBoundingBox.bottom()), quantizeLon(geoBoundingBox.right()))); + if (bbox.left() > bbox.right()) { + double lonWithin = randomBoolean() ? + randomDoubleBetween(bbox.left(), 180.0, true) + : randomDoubleBetween(-180.0, bbox.right(), true); + double latWithin = randomDoubleBetween(bbox.bottom(), bbox.top(), true); + double lonOutside = randomDoubleBetween(bbox.left(), bbox.right(), true); + double latOutside = randomBoolean() ? randomDoubleBetween(Math.max(bbox.top(), bbox.bottom()), 90, false) + : randomDoubleBetween(-90, Math.min(bbox.bottom(), bbox.top()), false); + + assertTrue(bbox.pointInBounds(lonWithin, latWithin)); + assertFalse(bbox.pointInBounds(lonOutside, latOutside)); + } else { + double lonWithin = randomDoubleBetween(bbox.left(), bbox.right(), true); + double latWithin = randomDoubleBetween(bbox.bottom(), bbox.top(), true); + double lonOutside = GeoUtils.normalizeLon(randomDoubleBetween(bbox.right(), 180, false)); + double latOutside = GeoUtils.normalizeLat(randomDoubleBetween(bbox.top(), 90, false)); + + assertTrue(bbox.pointInBounds(lonWithin, latWithin)); + assertFalse(bbox.pointInBounds(lonOutside, latOutside)); + } + } + } + private void assertBBox(GeoBoundingBox expected, XContentBuilder builder) throws IOException { try (XContentParser parser = createParser(builder)) { parser.nextToken(); @@ -140,10 +171,17 @@ private void assertBBox(GeoBoundingBox expected, XContentBuilder builder) throws } } - private GeoBoundingBox randomBBox() { - double topLat = GeometryTestUtils.randomLat(); - double bottomLat = randomDoubleBetween(GeoUtils.MIN_LAT, topLat, true); - return new GeoBoundingBox(new GeoPoint(topLat, GeometryTestUtils.randomLon()), - new GeoPoint(bottomLat, GeometryTestUtils.randomLon())); + public static GeoBoundingBox randomBBox() { + Rectangle rectangle = GeometryTestUtils.randomRectangle(); + return new GeoBoundingBox(new GeoPoint(rectangle.getMaxLat(), rectangle.getMinLon()), + new GeoPoint(rectangle.getMinLat(), rectangle.getMaxLon())); + } + + private static double quantizeLat(double lat) { + return GeoEncodingUtils.decodeLatitude(GeoEncodingUtils.encodeLatitude(lat)); + } + + private static double quantizeLon(double lon) { + return GeoEncodingUtils.decodeLongitude(GeoEncodingUtils.encodeLongitude(lon)); } } diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/SuggestingErrorOnUnknownTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/SuggestingErrorOnUnknownTests.java new file mode 100644 index 0000000000000..8e2e0b05612a1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/xcontent/SuggestingErrorOnUnknownTests.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; + +import static org.hamcrest.Matchers.equalTo; + +public class SuggestingErrorOnUnknownTests extends ESTestCase { + private String errorMessage(String unknownField, String... candidates) { + return new SuggestingErrorOnUnknown().errorMessage("test", unknownField, Arrays.asList(candidates)); + } + + public void testNoCandidates() { + assertThat(errorMessage("foo"), equalTo("[test] unknown field [foo]")); + } + public void testBadCandidates() { + assertThat(errorMessage("foo", "bar", "baz"), equalTo("[test] unknown field [foo]")); + } + public void testOneCandidate() { + assertThat(errorMessage("foo", "bar", "fop"), equalTo("[test] unknown field [foo] did you mean [fop]?")); + } + public void testManyCandidate() { + assertThat(errorMessage("foo", "bar", "fop", "fou", "baz"), + equalTo("[test] unknown field [foo] did you mean any of [fop, fou]?")); + } +} diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java index 85d2f7e654f1f..40ef3a48c048f 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -24,6 +24,8 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.gateway.PersistedClusterStateService; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; @@ -47,11 +49,13 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class NodeEnvironmentIT extends ESIntegTestCase { - public void testStartFailureOnDataForNonDataNode() { + public void testStartFailureOnDataForNonDataNode() throws Exception { final String indexName = "test-fail-on-data"; logger.info("--> starting one node"); - String node = internalCluster().startNode(); + final boolean writeDanglingIndices = randomBoolean(); + String node = internalCluster().startNode(Settings.builder() + .put(IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING.getKey(), writeDanglingIndices).build()); Settings dataPathSettings = internalCluster().dataPathSettings(node); logger.info("--> creating index"); @@ -60,6 +64,10 @@ public void testStartFailureOnDataForNonDataNode() { .put("index.number_of_replicas", 0) ).get(); final String indexUUID = resolveIndex(indexName).getUUID(); + if (writeDanglingIndices) { + assertBusy(() -> internalCluster().getInstances(IndicesService.class).forEach( + indicesService -> assertTrue(indicesService.allPendingDanglingIndicesWritten()))); + } logger.info("--> restarting the node with node.data=false and node.master=false"); IllegalStateException ex = expectThrows(IllegalStateException.class, @@ -74,13 +82,19 @@ public Settings onNodeStopped(String nodeName) { .build(); } })); - assertThat(ex.getMessage(), containsString(indexUUID)); - assertThat(ex.getMessage(), - startsWith("Node is started with " - + Node.NODE_DATA_SETTING.getKey() - + "=false and " - + Node.NODE_MASTER_SETTING.getKey() - + "=false, but has index metadata")); + if (writeDanglingIndices) { + assertThat(ex.getMessage(), + startsWith("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false and " + + Node.NODE_MASTER_SETTING.getKey() + + "=false, but has index metadata")); + } else { + assertThat(ex.getMessage(), + startsWith("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false, but has shard data")); + } logger.info("--> start the node again with node.data=true and node.master=true"); internalCluster().startNode(dataPathSettings); @@ -124,14 +138,14 @@ public Settings onNodeStopped(String nodeName) { public void testFailsToStartIfDowngraded() { final IllegalStateException illegalStateException = expectThrowsOnRestart(dataPaths -> - NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(randomAlphaOfLength(10), NodeMetaDataTests.tooNewVersion()), dataPaths)); + PersistedClusterStateService.overrideVersion(NodeMetaDataTests.tooNewVersion(), dataPaths)); assertThat(illegalStateException.getMessage(), allOf(startsWith("cannot downgrade a node from version ["), endsWith("] to version [" + Version.CURRENT + "]"))); } public void testFailsToStartIfUpgradedTooFar() { final IllegalStateException illegalStateException = expectThrowsOnRestart(dataPaths -> - NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(randomAlphaOfLength(10), NodeMetaDataTests.tooOldVersion()), dataPaths)); + PersistedClusterStateService.overrideVersion(NodeMetaDataTests.tooOldVersion(), dataPaths)); assertThat(illegalStateException.getMessage(), allOf(startsWith("cannot upgrade a node from version ["), endsWith("] directly to version [" + Version.CURRENT + "]"))); } @@ -191,8 +205,10 @@ public void testUpgradeDataFolder() throws IOException, InterruptedException { assertThat(ise.getMessage(), containsString("unexpected folder encountered during data folder upgrade")); Files.delete(badFolder); - final Path conflictingFolder = randomFrom(dataPaths).resolve("indices"); - if (Files.exists(conflictingFolder) == false) { + final Path randomDataPath = randomFrom(dataPaths); + final Path conflictingFolder = randomDataPath.resolve("indices"); + final Path sourceFolder = randomDataPath.resolve("nodes").resolve("0").resolve("indices"); + if (Files.exists(sourceFolder) && Files.exists(conflictingFolder) == false) { Files.createDirectories(conflictingFolder); ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings)); assertThat(ise.getMessage(), containsString("target folder already exists during data folder upgrade")); @@ -223,10 +239,15 @@ public void testFailsToStartOnDataPathsFromMultipleNodes() throws IOException { internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(1))); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(0))); - final IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> PersistedClusterStateService.nodeMetaData(allDataPaths.stream().map(PathUtils::get).toArray(Path[]::new))); + + assertThat(illegalStateException.getMessage(), containsString("unexpected node ID in metadata")); + + illegalStateException = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(Settings.builder().putList(Environment.PATH_DATA_SETTING.getKey(), allDataPaths))); - assertThat(illegalStateException.getMessage(), containsString("belong to multiple nodes with IDs")); + assertThat(illegalStateException.getMessage(), containsString("unexpected node ID in metadata")); final List node0DataPathsPlusOne = new ArrayList<>(node0DataPaths); node0DataPathsPlusOne.add(createTempDir().toString()); diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 19100343d2b15..e273acfafc812 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -384,7 +384,7 @@ public void testCustomDataPaths() throws Exception { env.close(); } - public void testPersistentNodeId() throws IOException { + public void testNodeIdNotPersistedAtInitialization() throws IOException { NodeEnvironment env = newNodeEnvironment(new String[0], Settings.builder() .put("node.local_storage", false) .put("node.master", false) @@ -398,7 +398,7 @@ public void testPersistentNodeId() throws IOException { nodeID = env.nodeId(); env.close(); env = newNodeEnvironment(paths, Settings.EMPTY); - assertThat(env.nodeId(), equalTo(nodeID)); + assertThat(env.nodeId(), not(equalTo(nodeID))); env.close(); env = newNodeEnvironment(Settings.EMPTY); assertThat(env.nodeId(), not(equalTo(nodeID))); diff --git a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java index 7520a214f9e20..300191d3f2f3f 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java @@ -21,14 +21,13 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matcher; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.not; -import static org.mockito.Matchers.contains; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class NodeRepurposeCommandIT extends ESIntegTestCase { @@ -38,14 +37,14 @@ public void testRepurpose() throws Exception { logger.info("--> starting two nodes"); final String masterNode = internalCluster().startMasterOnlyNode(); - final String dataNode = internalCluster().startDataOnlyNode(); + final String dataNode = internalCluster().startDataOnlyNode( + Settings.builder().put(IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING.getKey(), false).build()); logger.info("--> creating index"); prepareCreate(indexName, Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) ).get(); - final String indexUUID = resolveIndex(indexName).getUUID(); logger.info("--> indexing a simple document"); client().prepareIndex(indexName).setId("1").setSource("field1", "value1").get(); @@ -82,10 +81,10 @@ public void testRepurpose() throws Exception { ); logger.info("--> Repurposing node 1"); - executeRepurposeCommand(noMasterNoDataSettingsForDataNode, indexUUID, 1); + executeRepurposeCommand(noMasterNoDataSettingsForDataNode, 1, 1); ElasticsearchException lockedException = expectThrows(ElasticsearchException.class, - () -> executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, indexUUID, 1) + () -> executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, 1, 1) ); assertThat(lockedException.getMessage(), containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG)); @@ -101,7 +100,7 @@ public void testRepurpose() throws Exception { internalCluster().stopRandomNode(s -> true); internalCluster().stopRandomNode(s -> true); - executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, indexUUID, 0); + executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, 1, 0); // by restarting as master and data node, we can check that the index definition was really deleted and also that the tool // does not mess things up so much that the nodes cannot boot as master or data node any longer. @@ -114,14 +113,13 @@ public void testRepurpose() throws Exception { assertFalse(indexExists(indexName)); } - private void executeRepurposeCommand(Settings settings, String indexUUID, int expectedShardCount) throws Exception { + private void executeRepurposeCommand(Settings settings, int expectedIndexCount, + int expectedShardCount) throws Exception { boolean verbose = randomBoolean(); Settings settingsWithPath = Settings.builder().put(internalCluster().getDefaultSettings()).put(settings).build(); - int expectedIndexCount = TestEnvironment.newEnvironment(settingsWithPath).dataFiles().length; Matcher matcher = allOf( - containsString(NodeRepurposeCommand.noMasterMessage(1, expectedShardCount, expectedIndexCount)), - not(contains(NodeRepurposeCommand.PRE_V7_MESSAGE)), - NodeRepurposeCommandTests.conditionalNot(containsString(indexUUID), verbose == false)); + containsString(NodeRepurposeCommand.noMasterMessage(expectedIndexCount, expectedShardCount, 0)), + NodeRepurposeCommandTests.conditionalNot(containsString("test-repurpose"), verbose == false)); NodeRepurposeCommandTests.verifySuccess(settingsWithPath, matcher, verbose); } diff --git a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java index 8f713e57bf4da..783d4131f83db 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java @@ -23,13 +23,17 @@ import org.elasticsearch.Version; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.coordination.ElasticsearchNodeCommand; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.index.Index; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; @@ -40,16 +44,13 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; -import java.util.Collections; import java.util.stream.Stream; import static org.elasticsearch.env.NodeRepurposeCommand.NO_CLEANUP; import static org.elasticsearch.env.NodeRepurposeCommand.NO_DATA_TO_CLEAN_UP_FOUND; import static org.elasticsearch.env.NodeRepurposeCommand.NO_SHARD_DATA_TO_CLEAN_UP_FOUND; -import static org.elasticsearch.env.NodeRepurposeCommand.PRE_V7_MESSAGE; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.not; public class NodeRepurposeCommandTests extends ESTestCase { @@ -68,6 +69,12 @@ public void createNodePaths() throws IOException { environment = TestEnvironment.newEnvironment(dataMasterSettings); try (NodeEnvironment nodeEnvironment = new NodeEnvironment(dataMasterSettings, environment)) { nodePaths = nodeEnvironment.nodeDataPaths(); + final String nodeId = randomAlphaOfLength(10); + try (PersistedClusterStateService.Writer writer = new PersistedClusterStateService(nodePaths, nodeId, + xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(dataMasterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L, true).createWriter()) { + writer.writeFullStateAndCommit(1L, ClusterState.EMPTY_STATE); + } } dataNoMasterSettings = Settings.builder() .put(dataMasterSettings) @@ -86,27 +93,32 @@ public void createNodePaths() throws IOException { } public void testEarlyExitNoCleanup() throws Exception { - createIndexDataFiles(dataMasterSettings, randomInt(10)); + createIndexDataFiles(dataMasterSettings, randomInt(10), randomBoolean()); verifyNoQuestions(dataMasterSettings, containsString(NO_CLEANUP)); verifyNoQuestions(dataNoMasterSettings, containsString(NO_CLEANUP)); } public void testNothingToCleanup() throws Exception { - verifyNoQuestions(noDataNoMasterSettings, allOf(containsString(NO_DATA_TO_CLEAN_UP_FOUND), not(containsString(PRE_V7_MESSAGE)))); - verifyNoQuestions(noDataMasterSettings, - allOf(containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND), not(containsString(PRE_V7_MESSAGE)))); - - createManifest(null); + verifyNoQuestions(noDataNoMasterSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noDataMasterSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); + + Environment environment = TestEnvironment.newEnvironment(noDataMasterSettings); + if (randomBoolean()) { + try (NodeEnvironment env = new NodeEnvironment(noDataMasterSettings, environment)) { + try (PersistedClusterStateService.Writer writer = + ElasticsearchNodeCommand.createPersistedClusterStateService(Settings.EMPTY, env.nodeDataPaths()).createWriter()) { + writer.writeFullStateAndCommit(1L, ClusterState.EMPTY_STATE); + } + } + } - verifyNoQuestions(noDataNoMasterSettings, allOf(containsString(NO_DATA_TO_CLEAN_UP_FOUND), not(containsString(PRE_V7_MESSAGE)))); - verifyNoQuestions(noDataMasterSettings, - allOf(containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND), not(containsString(PRE_V7_MESSAGE)))); + verifyNoQuestions(noDataNoMasterSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noDataMasterSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); - createIndexDataFiles(dataMasterSettings, 0); + createIndexDataFiles(dataMasterSettings, 0, randomBoolean()); - verifyNoQuestions(noDataMasterSettings, - allOf(containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND), not(containsString(PRE_V7_MESSAGE)))); + verifyNoQuestions(noDataMasterSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); } @@ -119,33 +131,20 @@ public void testLocked() throws IOException { } public void testCleanupAll() throws Exception { - Manifest oldManifest = createManifest(INDEX); - checkCleanupAll(not(containsString(PRE_V7_MESSAGE))); - - Manifest newManifest = loadManifest(); - assertThat(newManifest.getIndexGenerations().entrySet(), hasSize(0)); - assertManifestIdenticalExceptIndices(oldManifest, newManifest); - } - - public void testCleanupAllPreV7() throws Exception { - checkCleanupAll(containsString(PRE_V7_MESSAGE)); - } - - private void checkCleanupAll(Matcher additionalOutputMatcher) throws Exception { - int shardCount = randomInt(10); + int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); - createIndexDataFiles(dataMasterSettings, shardCount); + boolean hasClusterState = randomBoolean(); + createIndexDataFiles(dataMasterSettings, shardCount, hasClusterState); String messageText = NodeRepurposeCommand.noMasterMessage( 1, environment.dataFiles().length*shardCount, - environment.dataFiles().length); + 0); Matcher outputMatcher = allOf( containsString(messageText), - additionalOutputMatcher, - conditionalNot(containsString("testUUID"), verbose == false), - conditionalNot(containsString("testIndex"), verbose == false) + conditionalNot(containsString("testIndex"), verbose == false || hasClusterState == false), + conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState) ); verifyUnchangedOnAbort(noDataNoMasterSettings, outputMatcher, verbose); @@ -162,18 +161,17 @@ private void checkCleanupAll(Matcher additionalOutputMatcher) throws Exc public void testCleanupShardData() throws Exception { int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); - Manifest manifest = randomBoolean() ? createManifest(INDEX) : null; - - createIndexDataFiles(dataMasterSettings, shardCount); + boolean hasClusterState = randomBoolean(); + createIndexDataFiles(dataMasterSettings, shardCount, hasClusterState); Matcher matcher = allOf( containsString(NodeRepurposeCommand.shardMessage(environment.dataFiles().length * shardCount, 1)), conditionalNot(containsString("testUUID"), verbose == false), - conditionalNot(containsString("testIndex"), verbose == false) + conditionalNot(containsString("testIndex"), verbose == false || hasClusterState == false), + conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState) ); - verifyUnchangedOnAbort(noDataMasterSettings, - matcher, verbose); + verifyUnchangedOnAbort(noDataMasterSettings, matcher, verbose); // verify test setup expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataMasterSettings, environment).close()); @@ -182,12 +180,6 @@ public void testCleanupShardData() throws Exception { //verify clean. new NodeEnvironment(noDataMasterSettings, environment).close(); - - if (manifest != null) { - Manifest newManifest = loadManifest(); - assertThat(newManifest.getIndexGenerations().entrySet(), hasSize(1)); - assertManifestIdenticalExceptIndices(manifest, newManifest); - } } static void verifySuccess(Settings settings, Matcher outputMatcher, boolean verbose) throws Exception { @@ -237,31 +229,22 @@ private static void executeRepurposeCommand(MockTerminal terminal, Settings sett nodeRepurposeCommand.testExecute(terminal, options, env); } - private Manifest createManifest(Index index) throws org.elasticsearch.gateway.WriteStateException { - Manifest manifest = new Manifest(randomIntBetween(1,100), randomIntBetween(1,100), randomIntBetween(1,100), - index != null ? Collections.singletonMap(index, randomLongBetween(1,100)) : Collections.emptyMap()); - Manifest.FORMAT.writeAndCleanup(manifest, nodePaths); - return manifest; - } - - private Manifest loadManifest() throws IOException { - return Manifest.FORMAT.loadLatestState(logger, new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), nodePaths); - } - - private void assertManifestIdenticalExceptIndices(Manifest oldManifest, Manifest newManifest) { - assertEquals(oldManifest.getGlobalGeneration(), newManifest.getGlobalGeneration()); - assertEquals(oldManifest.getClusterStateVersion(), newManifest.getClusterStateVersion()); - assertEquals(oldManifest.getCurrentTerm(), newManifest.getCurrentTerm()); - } - - private void createIndexDataFiles(Settings settings, int shardCount) throws IOException { + private void createIndexDataFiles(Settings settings, int shardCount, boolean writeClusterState) throws IOException { int shardDataDirNumber = randomInt(10); - try (NodeEnvironment env = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings))) { - IndexMetaData.FORMAT.write(IndexMetaData.builder(INDEX.getName()) - .settings(Settings.builder().put("index.version.created", Version.CURRENT)) - .numberOfShards(1) - .numberOfReplicas(1) - .build(), env.indexPaths(INDEX)); + Environment environment = TestEnvironment.newEnvironment(settings); + try (NodeEnvironment env = new NodeEnvironment(settings, environment)) { + if (writeClusterState) { + try (PersistedClusterStateService.Writer writer = + ElasticsearchNodeCommand.createPersistedClusterStateService(Settings.EMPTY, env.nodeDataPaths()).createWriter()) { + writer.writeFullStateAndCommit(1L, ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(IndexMetaData.builder(INDEX.getName()) + .settings(Settings.builder().put("index.version.created", Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, INDEX.getUUID())) + .numberOfShards(1) + .numberOfReplicas(1)).build()) + .build()); + } + } for (Path path : env.indexPaths(INDEX)) { for (int i = 0; i < shardCount; ++i) { Files.createDirectories(path.resolve(Integer.toString(shardDataDirNumber))); diff --git a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java index e7eb4c53dfec9..742c052a07b96 100644 --- a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java +++ b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java @@ -18,23 +18,25 @@ */ package org.elasticsearch.env; +import joptsimple.OptionParser; +import joptsimple.OptionSet; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.gateway.MetaDataStateFormat; -import org.elasticsearch.gateway.WriteStateException; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.test.ESTestCase; +import org.junit.After; import org.junit.Before; import java.io.IOException; import java.nio.file.Path; -import static org.elasticsearch.env.NodeMetaData.NODE_ID_KEY; -import static org.elasticsearch.env.NodeMetaData.NODE_VERSION_KEY; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -43,6 +45,8 @@ public class OverrideNodeVersionCommandTests extends ESTestCase { private Environment environment; private Path[] nodePaths; + private String nodeId; + private final OptionSet noOptions = new OptionParser().parse(); @Before public void createNodePaths() throws IOException { @@ -50,24 +54,41 @@ public void createNodePaths() throws IOException { environment = TestEnvironment.newEnvironment(settings); try (NodeEnvironment nodeEnvironment = new NodeEnvironment(settings, environment)) { nodePaths = nodeEnvironment.nodeDataPaths(); + nodeId = nodeEnvironment.nodeId(); + + try (PersistedClusterStateService.Writer writer = new PersistedClusterStateService(nodePaths, nodeId, + xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L, true).createWriter()) { + writer.writeFullStateAndCommit(1L, ClusterState.builder(ClusterName.DEFAULT).metaData(MetaData.builder() + .persistentSettings(Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), true).build()).build()) + .build()); + } } } + @After + public void checkClusterStateIntact() throws IOException { + assertTrue(MetaData.SETTING_READ_ONLY_SETTING.get(new PersistedClusterStateService(nodePaths, nodeId, + xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L, true) + .loadBestOnDiskState().metaData.persistentSettings())); + } + public void testFailsOnEmptyPath() { final Path emptyPath = createTempDir(); final MockTerminal mockTerminal = new MockTerminal(); final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> - new OverrideNodeVersionCommand().processNodePaths(mockTerminal, new Path[]{emptyPath}, environment)); + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, new Path[]{emptyPath}, noOptions, environment)); assertThat(elasticsearchException.getMessage(), equalTo(OverrideNodeVersionCommand.NO_METADATA_MESSAGE)); expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); } - public void testFailsIfUnnecessary() throws WriteStateException { + public void testFailsIfUnnecessary() throws IOException { final Version nodeVersion = Version.fromId(between(Version.CURRENT.minimumIndexCompatibilityVersion().id, Version.CURRENT.id)); - NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(randomAlphaOfLength(10), nodeVersion), nodePaths); + PersistedClusterStateService.overrideVersion(nodeVersion, nodePaths); final MockTerminal mockTerminal = new MockTerminal(); final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> - new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment)); + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, noOptions, environment)); assertThat(elasticsearchException.getMessage(), allOf( containsString("compatible with current version"), containsString(Version.CURRENT.toString()), @@ -76,13 +97,12 @@ public void testFailsIfUnnecessary() throws WriteStateException { } public void testWarnsIfTooOld() throws Exception { - final String nodeId = randomAlphaOfLength(10); final Version nodeVersion = NodeMetaDataTests.tooOldVersion(); - NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + PersistedClusterStateService.overrideVersion(nodeVersion, nodePaths); final MockTerminal mockTerminal = new MockTerminal(); mockTerminal.addTextInput("n\n"); final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> - new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment)); + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, noOptions, environment)); assertThat(elasticsearchException.getMessage(), equalTo("aborted by user")); assertThat(mockTerminal.getOutput(), allOf( containsString("too old"), @@ -92,19 +112,17 @@ public void testWarnsIfTooOld() throws Exception { containsString(nodeVersion.toString()))); expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); - final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); - assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + final NodeMetaData nodeMetaData = PersistedClusterStateService.nodeMetaData(nodePaths); assertThat(nodeMetaData.nodeVersion(), equalTo(nodeVersion)); } public void testWarnsIfTooNew() throws Exception { - final String nodeId = randomAlphaOfLength(10); final Version nodeVersion = NodeMetaDataTests.tooNewVersion(); - NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + PersistedClusterStateService.overrideVersion(nodeVersion, nodePaths); final MockTerminal mockTerminal = new MockTerminal(); mockTerminal.addTextInput(randomFrom("yy", "Yy", "n", "yes", "true", "N", "no")); final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> - new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment)); + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, noOptions, environment)); assertThat(elasticsearchException.getMessage(), equalTo("aborted by user")); assertThat(mockTerminal.getOutput(), allOf( containsString("data loss"), @@ -113,18 +131,16 @@ public void testWarnsIfTooNew() throws Exception { containsString(nodeVersion.toString()))); expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); - final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); - assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + final NodeMetaData nodeMetaData = PersistedClusterStateService.nodeMetaData(nodePaths); assertThat(nodeMetaData.nodeVersion(), equalTo(nodeVersion)); } public void testOverwritesIfTooOld() throws Exception { - final String nodeId = randomAlphaOfLength(10); final Version nodeVersion = NodeMetaDataTests.tooOldVersion(); - NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + PersistedClusterStateService.overrideVersion(nodeVersion, nodePaths); final MockTerminal mockTerminal = new MockTerminal(); mockTerminal.addTextInput(randomFrom("y", "Y")); - new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment); + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, noOptions, environment); assertThat(mockTerminal.getOutput(), allOf( containsString("too old"), containsString("data loss"), @@ -134,18 +150,16 @@ public void testOverwritesIfTooOld() throws Exception { containsString(OverrideNodeVersionCommand.SUCCESS_MESSAGE))); expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); - final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); - assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + final NodeMetaData nodeMetaData = PersistedClusterStateService.nodeMetaData(nodePaths); assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); } public void testOverwritesIfTooNew() throws Exception { - final String nodeId = randomAlphaOfLength(10); final Version nodeVersion = NodeMetaDataTests.tooNewVersion(); - NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + PersistedClusterStateService.overrideVersion(nodeVersion, nodePaths); final MockTerminal mockTerminal = new MockTerminal(); mockTerminal.addTextInput(randomFrom("y", "Y")); - new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment); + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, noOptions, environment); assertThat(mockTerminal.getOutput(), allOf( containsString("data loss"), containsString("You should not use this tool"), @@ -154,62 +168,7 @@ public void testOverwritesIfTooNew() throws Exception { containsString(OverrideNodeVersionCommand.SUCCESS_MESSAGE))); expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); - final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); - assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + final NodeMetaData nodeMetaData = PersistedClusterStateService.nodeMetaData(nodePaths); assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); } - - public void testLenientlyIgnoresExtraFields() throws Exception { - final String nodeId = randomAlphaOfLength(10); - final Version nodeVersion = NodeMetaDataTests.tooNewVersion(); - FutureNodeMetaData.FORMAT.writeAndCleanup(new FutureNodeMetaData(nodeId, nodeVersion, randomLong()), nodePaths); - try { - NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); - fail("An exception should have been thrown"); - } catch (ElasticsearchException e) { - assertThat(ExceptionsHelper.stackTrace(e), containsString("unknown field [future_field]")); - } - - final MockTerminal mockTerminal = new MockTerminal(); - mockTerminal.addTextInput(randomFrom("y", "Y")); - new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment); - assertThat(mockTerminal.getOutput(), allOf( - containsString("data loss"), - containsString("You should not use this tool"), - containsString(Version.CURRENT.toString()), - containsString(nodeVersion.toString()), - containsString(OverrideNodeVersionCommand.SUCCESS_MESSAGE))); - expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); - - final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); - assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); - assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); - } - - private static class FutureNodeMetaData { - private final String nodeId; - private final Version nodeVersion; - private final long futureValue; - - FutureNodeMetaData(String nodeId, Version nodeVersion, long futureValue) { - this.nodeId = nodeId; - this.nodeVersion = nodeVersion; - this.futureValue = futureValue; - } - - static final MetaDataStateFormat FORMAT - = new MetaDataStateFormat(NodeMetaData.FORMAT.getPrefix()) { - @Override - public void toXContent(XContentBuilder builder, FutureNodeMetaData state) throws IOException { - builder.field(NODE_ID_KEY, state.nodeId); - builder.field(NODE_VERSION_KEY, state.nodeVersion.id); - builder.field("future_field", state.futureValue); - } - - @Override - public FutureNodeMetaData fromXContent(XContentParser parser) { - throw new AssertionError("shouldn't be loading a FutureNodeMetaData"); - } - }; - } } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 5cc73134bbcff..56d059f7278d1 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -30,10 +30,8 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.coordination.CoordinationMetaData; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -42,11 +40,9 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.IndexClosedException; @@ -57,7 +53,6 @@ import org.elasticsearch.test.InternalTestCluster.RestartCallback; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; import java.util.List; import java.util.Map; @@ -377,14 +372,13 @@ public void testRecoverBrokenIndexMetadata() throws Exception { ClusterState state = client().admin().cluster().prepareState().get().getState(); final IndexMetaData metaData = state.getMetaData().index("test"); - final IndexMetaData brokenMeta = IndexMetaData.builder(metaData).settings(Settings.builder().put(metaData.getSettings()) + final IndexMetaData.Builder brokenMeta = IndexMetaData.builder(metaData).settings(Settings.builder().put(metaData.getSettings()) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.minimumIndexCompatibilityVersion().id) // this is invalid but should be archived .put("index.similarity.BM25.type", "boolean") // this one is not validated ahead of time and breaks allocation - .put("index.analysis.filter.myCollator.type", "icu_collation") - ).build(); - writeBrokenMeta(metaStateService -> metaStateService.writeIndexAndUpdateManifest("broken metadata", brokenMeta)); + .put("index.analysis.filter.myCollator.type", "icu_collation")); + restartNodesOnBrokenClusterState(ClusterState.builder(state).metaData(MetaData.builder(state.getMetaData()).put(brokenMeta))); // check that the cluster does not keep reallocating shards assertBusy(() -> { @@ -447,9 +441,9 @@ public void testRecoverMissingAnalyzer() throws Exception { ClusterState state = client().admin().cluster().prepareState().get().getState(); final IndexMetaData metaData = state.getMetaData().index("test"); - final IndexMetaData brokenMeta = IndexMetaData.builder(metaData).settings(metaData.getSettings() - .filter((s) -> "index.analysis.analyzer.test.tokenizer".equals(s) == false)).build(); - writeBrokenMeta(metaStateService -> metaStateService.writeIndexAndUpdateManifest("broken metadata", brokenMeta)); + final IndexMetaData.Builder brokenMeta = IndexMetaData.builder(metaData).settings(metaData.getSettings() + .filter((s) -> "index.analysis.analyzer.test.tokenizer".equals(s) == false)); + restartNodesOnBrokenClusterState(ClusterState.builder(state).metaData(MetaData.builder(state.getMetaData()).put(brokenMeta))); // check that the cluster does not keep reallocating shards assertBusy(() -> { @@ -494,7 +488,7 @@ public void testArchiveBrokenClusterSettings() throws Exception { final MetaData brokenMeta = MetaData.builder(metaData).persistentSettings(Settings.builder() .put(metaData.persistentSettings()).put("this.is.unknown", true) .put(MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), "broken").build()).build(); - writeBrokenMeta(metaStateService -> metaStateService.writeGlobalStateAndUpdateManifest("broken metadata", brokenMeta)); + restartNodesOnBrokenClusterState(ClusterState.builder(state).metaData(brokenMeta)); ensureYellow("test"); // wait for state recovery state = client().admin().cluster().prepareState().get().getState(); @@ -512,6 +506,8 @@ public void testArchiveBrokenClusterSettings() throws Exception { assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/48701") + // This test relates to loading a broken state that was written by a 6.x node, but for now we do not load state from old nodes. public void testHalfDeletedIndexImport() throws Exception { // It's possible for a 6.x node to add a tombstone for an index but not actually delete the index metadata from disk since that // deletion is slightly deferred and may race against the node being shut down; if you upgrade to 7.x when in this state then the @@ -526,36 +522,40 @@ public void testHalfDeletedIndexImport() throws Exception { final MetaData metaData = internalCluster().getInstance(ClusterService.class).state().metaData(); final Path[] paths = internalCluster().getInstance(NodeEnvironment.class).nodeDataPaths(); - writeBrokenMeta(metaStateService -> { - metaStateService.writeGlobalState("test", MetaData.builder(metaData) - // we remove the manifest file, resetting the term and making this look like an upgrade from 6.x, so must also reset the - // term in the coordination metadata - .coordinationMetaData(CoordinationMetaData.builder(metaData.coordinationMetaData()).term(0L).build()) - // add a tombstone but do not delete the index metadata from disk - .putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metaData.index("test").getIndex()).build()).build()); - for (final Path path : paths) { - try (Stream stateFiles = Files.list(path.resolve(MetaDataStateFormat.STATE_DIR_NAME))) { - for (final Path manifestPath : stateFiles - .filter(p -> p.getFileName().toString().startsWith(Manifest.FORMAT.getPrefix())).collect(Collectors.toList())) { - IOUtils.rm(manifestPath); - } - } - } - }); +// writeBrokenMeta(metaStateService -> { +// metaStateService.writeGlobalState("test", MetaData.builder(metaData) +// // we remove the manifest file, resetting the term and making this look like an upgrade from 6.x, so must also reset the +// // term in the coordination metadata +// .coordinationMetaData(CoordinationMetaData.builder(metaData.coordinationMetaData()).term(0L).build()) +// // add a tombstone but do not delete the index metadata from disk +// .putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metaData.index("test").getIndex()).build()).build()); +// for (final Path path : paths) { +// try (Stream stateFiles = Files.list(path.resolve(MetaDataStateFormat.STATE_DIR_NAME))) { +// for (final Path manifestPath : stateFiles +// .filter(p -> p.getFileName().toString().startsWith(Manifest.FORMAT.getPrefix())).collect(Collectors.toList())) { +// IOUtils.rm(manifestPath); +// } +// } +// } +// }); ensureGreen(); assertBusy(() -> assertThat(internalCluster().getInstance(NodeEnvironment.class).availableIndexFolders(), empty())); } - private void writeBrokenMeta(CheckedConsumer writer) throws Exception { - Map metaStateServices = Stream.of(internalCluster().getNodeNames()) - .collect(Collectors.toMap(Function.identity(), nodeName -> internalCluster().getInstance(MetaStateService.class, nodeName))); + private void restartNodesOnBrokenClusterState(ClusterState.Builder clusterStateBuilder) throws Exception { + Map lucenePersistedStateFactories = Stream.of(internalCluster().getNodeNames()) + .collect(Collectors.toMap(Function.identity(), + nodeName -> internalCluster().getInstance(PersistedClusterStateService.class, nodeName))); + final ClusterState clusterState = clusterStateBuilder.build(); internalCluster().fullRestart(new RestartCallback(){ @Override public Settings onNodeStopped(String nodeName) throws Exception { - final MetaStateService metaStateService = metaStateServices.get(nodeName); - writer.accept(metaStateService); + final PersistedClusterStateService lucenePersistedStateFactory = lucenePersistedStateFactories.get(nodeName); + try (PersistedClusterStateService.Writer writer = lucenePersistedStateFactory.createWriter()) { + writer.writeFullStateAndCommit(clusterState.term(), clusterState); + } return super.onNodeStopped(nodeName); } }); diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java index e723d08d7352c..5015f74efeeaf 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -19,29 +19,50 @@ package org.elasticsearch.gateway; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.MockDirectoryWrapper; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.CoordinationMetaData; import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfigExclusion; import org.elasticsearch.cluster.coordination.CoordinationState; -import org.elasticsearch.cluster.coordination.InMemoryPersistedState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; - +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOError; +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class GatewayMetaStatePersistedStateTests extends ESTestCase { private NodeEnvironment nodeEnvironment; @@ -53,7 +74,7 @@ public class GatewayMetaStatePersistedStateTests extends ESTestCase { public void setUp() throws Exception { nodeEnvironment = newNodeEnvironment(); localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Collections.emptyMap(), - Sets.newHashSet(DiscoveryNodeRole.MASTER_ROLE), Version.CURRENT); + Sets.newHashSet(DiscoveryNodeRole.MASTER_ROLE), Version.CURRENT); clusterName = new ClusterName(randomAlphaOfLength(10)); settings = Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName.value()).build(); super.setUp(); @@ -69,57 +90,68 @@ private CoordinationState.PersistedState newGatewayPersistedState() { final MockGatewayMetaState gateway = new MockGatewayMetaState(localNode); gateway.start(settings, nodeEnvironment, xContentRegistry()); final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); - assertThat(persistedState, not(instanceOf(InMemoryPersistedState.class))); + assertThat(persistedState, instanceOf(GatewayMetaState.LucenePersistedState.class)); return persistedState; } - private CoordinationState.PersistedState maybeNew(CoordinationState.PersistedState persistedState) { + private CoordinationState.PersistedState maybeNew(CoordinationState.PersistedState persistedState) throws IOException { if (randomBoolean()) { + persistedState.close(); return newGatewayPersistedState(); } return persistedState; } - public void testInitialState() { - CoordinationState.PersistedState gateway = newGatewayPersistedState(); - ClusterState state = gateway.getLastAcceptedState(); - assertThat(state.getClusterName(), equalTo(clusterName)); - assertTrue(MetaData.isGlobalStateEquals(state.metaData(), MetaData.EMPTY_META_DATA)); - assertThat(state.getVersion(), equalTo(Manifest.empty().getClusterStateVersion())); - assertThat(state.getNodes().getLocalNode(), equalTo(localNode)); - - long currentTerm = gateway.getCurrentTerm(); - assertThat(currentTerm, equalTo(Manifest.empty().getCurrentTerm())); + public void testInitialState() throws IOException { + CoordinationState.PersistedState gateway = null; + try { + gateway = newGatewayPersistedState(); + ClusterState state = gateway.getLastAcceptedState(); + assertThat(state.getClusterName(), equalTo(clusterName)); + assertTrue(MetaData.isGlobalStateEquals(state.metaData(), MetaData.EMPTY_META_DATA)); + assertThat(state.getVersion(), equalTo(Manifest.empty().getClusterStateVersion())); + assertThat(state.getNodes().getLocalNode(), equalTo(localNode)); + + long currentTerm = gateway.getCurrentTerm(); + assertThat(currentTerm, equalTo(Manifest.empty().getCurrentTerm())); + } finally { + IOUtils.close(gateway); + } } - public void testSetCurrentTerm() { - CoordinationState.PersistedState gateway = newGatewayPersistedState(); - - for (int i = 0; i < randomIntBetween(1, 5); i++) { - final long currentTerm = randomNonNegativeLong(); - gateway.setCurrentTerm(currentTerm); - gateway = maybeNew(gateway); - assertThat(gateway.getCurrentTerm(), equalTo(currentTerm)); + public void testSetCurrentTerm() throws IOException { + CoordinationState.PersistedState gateway = null; + try { + gateway = newGatewayPersistedState(); + + for (int i = 0; i < randomIntBetween(1, 5); i++) { + final long currentTerm = randomNonNegativeLong(); + gateway.setCurrentTerm(currentTerm); + gateway = maybeNew(gateway); + assertThat(gateway.getCurrentTerm(), equalTo(currentTerm)); + } + } finally { + IOUtils.close(gateway); } } private ClusterState createClusterState(long version, MetaData metaData) { return ClusterState.builder(clusterName). - nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).build()). - version(version). - metaData(metaData). - build(); + nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).build()). + version(version). + metaData(metaData). + build(); } private CoordinationMetaData createCoordinationMetaData(long term) { CoordinationMetaData.Builder builder = CoordinationMetaData.builder(); builder.term(term); builder.lastAcceptedConfiguration( - new CoordinationMetaData.VotingConfiguration( - Sets.newHashSet(generateRandomStringArray(10, 10, false)))); + new CoordinationMetaData.VotingConfiguration( + Sets.newHashSet(generateRandomStringArray(10, 10, false)))); builder.lastCommittedConfiguration( - new CoordinationMetaData.VotingConfiguration( - Sets.newHashSet(generateRandomStringArray(10, 10, false)))); + new CoordinationMetaData.VotingConfiguration( + Sets.newHashSet(generateRandomStringArray(10, 10, false)))); for (int i = 0; i < randomIntBetween(0, 5); i++) { builder.addVotingConfigExclusion(new VotingConfigExclusion(randomAlphaOfLength(10), randomAlphaOfLength(10))); } @@ -129,12 +161,12 @@ private CoordinationMetaData createCoordinationMetaData(long term) { private IndexMetaData createIndexMetaData(String indexName, int numberOfShards, long version) { return IndexMetaData.builder(indexName).settings( - Settings.builder() - .put(IndexMetaData.SETTING_INDEX_UUID, indexName) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build() + Settings.builder() + .put(IndexMetaData.SETTING_INDEX_UUID, indexName) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .build() ).version(version).build(); } @@ -146,70 +178,174 @@ private void assertClusterStateEqual(ClusterState expected, ClusterState actual) } } - public void testSetLastAcceptedState() { - CoordinationState.PersistedState gateway = newGatewayPersistedState(); - final long term = randomNonNegativeLong(); - - for (int i = 0; i < randomIntBetween(1, 5); i++) { - final long version = randomNonNegativeLong(); - final String indexName = randomAlphaOfLength(10); - final IndexMetaData indexMetaData = createIndexMetaData(indexName, randomIntBetween(1,5), randomNonNegativeLong()); - final MetaData metaData = MetaData.builder(). + public void testSetLastAcceptedState() throws IOException { + CoordinationState.PersistedState gateway = null; + try { + gateway = newGatewayPersistedState(); + final long term = randomNonNegativeLong(); + + for (int i = 0; i < randomIntBetween(1, 5); i++) { + final long version = randomNonNegativeLong(); + final String indexName = randomAlphaOfLength(10); + final IndexMetaData indexMetaData = createIndexMetaData(indexName, randomIntBetween(1, 5), randomNonNegativeLong()); + final MetaData metaData = MetaData.builder(). persistentSettings(Settings.builder().put(randomAlphaOfLength(10), randomAlphaOfLength(10)).build()). coordinationMetaData(createCoordinationMetaData(term)). put(indexMetaData, false). build(); - ClusterState state = createClusterState(version, metaData); + ClusterState state = createClusterState(version, metaData); - gateway.setLastAcceptedState(state); - gateway = maybeNew(gateway); + gateway.setLastAcceptedState(state); + gateway = maybeNew(gateway); - ClusterState lastAcceptedState = gateway.getLastAcceptedState(); - assertClusterStateEqual(state, lastAcceptedState); + ClusterState lastAcceptedState = gateway.getLastAcceptedState(); + assertClusterStateEqual(state, lastAcceptedState); + } + } finally { + IOUtils.close(gateway); } } - public void testSetLastAcceptedStateTermChanged() { - CoordinationState.PersistedState gateway = newGatewayPersistedState(); + public void testSetLastAcceptedStateTermChanged() throws IOException { + CoordinationState.PersistedState gateway = null; + try { + gateway = newGatewayPersistedState(); - final String indexName = randomAlphaOfLength(10); - final int numberOfShards = randomIntBetween(1, 5); - final long version = randomNonNegativeLong(); - final long term = randomNonNegativeLong(); - final IndexMetaData indexMetaData = createIndexMetaData(indexName, numberOfShards, version); - final ClusterState state = createClusterState(randomNonNegativeLong(), + final String indexName = randomAlphaOfLength(10); + final int numberOfShards = randomIntBetween(1, 5); + final long version = randomNonNegativeLong(); + final long term = randomValueOtherThan(Long.MAX_VALUE, ESTestCase::randomNonNegativeLong); + final IndexMetaData indexMetaData = createIndexMetaData(indexName, numberOfShards, version); + final ClusterState state = createClusterState(randomNonNegativeLong(), MetaData.builder().coordinationMetaData(createCoordinationMetaData(term)).put(indexMetaData, false).build()); - gateway.setLastAcceptedState(state); + gateway.setLastAcceptedState(state); - gateway = maybeNew(gateway); - final long newTerm = randomValueOtherThan(term, ESTestCase::randomNonNegativeLong); - final int newNumberOfShards = randomValueOtherThan(numberOfShards, () -> randomIntBetween(1,5)); - final IndexMetaData newIndexMetaData = createIndexMetaData(indexName, newNumberOfShards, version); - final ClusterState newClusterState = createClusterState(randomNonNegativeLong(), + gateway = maybeNew(gateway); + final long newTerm = randomLongBetween(term + 1, Long.MAX_VALUE); + final int newNumberOfShards = randomValueOtherThan(numberOfShards, () -> randomIntBetween(1, 5)); + final IndexMetaData newIndexMetaData = createIndexMetaData(indexName, newNumberOfShards, version); + final ClusterState newClusterState = createClusterState(randomNonNegativeLong(), MetaData.builder().coordinationMetaData(createCoordinationMetaData(newTerm)).put(newIndexMetaData, false).build()); - gateway.setLastAcceptedState(newClusterState); + gateway.setLastAcceptedState(newClusterState); - gateway = maybeNew(gateway); - assertThat(gateway.getLastAcceptedState().metaData().index(indexName), equalTo(newIndexMetaData)); + gateway = maybeNew(gateway); + assertThat(gateway.getLastAcceptedState().metaData().index(indexName), equalTo(newIndexMetaData)); + } finally { + IOUtils.close(gateway); + } } - public void testCurrentTermAndTermAreDifferent() { - CoordinationState.PersistedState gateway = newGatewayPersistedState(); + public void testCurrentTermAndTermAreDifferent() throws IOException { + CoordinationState.PersistedState gateway = null; + try { + gateway = newGatewayPersistedState(); - long currentTerm = randomNonNegativeLong(); - long term = randomValueOtherThan(currentTerm, ESTestCase::randomNonNegativeLong); + long currentTerm = randomNonNegativeLong(); + long term = randomValueOtherThan(currentTerm, ESTestCase::randomNonNegativeLong); - gateway.setCurrentTerm(currentTerm); - gateway.setLastAcceptedState(createClusterState(randomNonNegativeLong(), + gateway.setCurrentTerm(currentTerm); + gateway.setLastAcceptedState(createClusterState(randomNonNegativeLong(), MetaData.builder().coordinationMetaData(CoordinationMetaData.builder().term(term).build()).build())); - gateway = maybeNew(gateway); - assertThat(gateway.getCurrentTerm(), equalTo(currentTerm)); - assertThat(gateway.getLastAcceptedState().coordinationMetaData().term(), equalTo(term)); + gateway = maybeNew(gateway); + assertThat(gateway.getCurrentTerm(), equalTo(currentTerm)); + assertThat(gateway.getLastAcceptedState().coordinationMetaData().term(), equalTo(term)); + } finally { + IOUtils.close(gateway); + } + } + + public void testMarkAcceptedConfigAsCommitted() throws IOException { + CoordinationState.PersistedState gateway = null; + try { + gateway = newGatewayPersistedState(); + + // generate random coordinationMetaData with different lastAcceptedConfiguration and lastCommittedConfiguration + CoordinationMetaData coordinationMetaData; + do { + coordinationMetaData = createCoordinationMetaData(randomNonNegativeLong()); + } while (coordinationMetaData.getLastAcceptedConfiguration().equals(coordinationMetaData.getLastCommittedConfiguration())); + + ClusterState state = createClusterState(randomNonNegativeLong(), + MetaData.builder().coordinationMetaData(coordinationMetaData) + .clusterUUID(randomAlphaOfLength(10)).build()); + gateway.setLastAcceptedState(state); + + gateway = maybeNew(gateway); + assertThat(gateway.getLastAcceptedState().getLastAcceptedConfiguration(), + not(equalTo(gateway.getLastAcceptedState().getLastCommittedConfiguration()))); + gateway.markLastAcceptedStateAsCommitted(); + + CoordinationMetaData expectedCoordinationMetaData = CoordinationMetaData.builder(coordinationMetaData) + .lastCommittedConfiguration(coordinationMetaData.getLastAcceptedConfiguration()).build(); + ClusterState expectedClusterState = + ClusterState.builder(state).metaData(MetaData.builder().coordinationMetaData(expectedCoordinationMetaData) + .clusterUUID(state.metaData().clusterUUID()).clusterUUIDCommitted(true).build()).build(); + + gateway = maybeNew(gateway); + assertClusterStateEqual(expectedClusterState, gateway.getLastAcceptedState()); + gateway.markLastAcceptedStateAsCommitted(); + + gateway = maybeNew(gateway); + assertClusterStateEqual(expectedClusterState, gateway.getLastAcceptedState()); + } finally { + IOUtils.close(gateway); + } } - public void testMarkAcceptedConfigAsCommitted() { - CoordinationState.PersistedState gateway = newGatewayPersistedState(); + public void testStatePersistedOnLoad() throws IOException { + // open LucenePersistedState to make sure that cluster state is written out to each data path + final PersistedClusterStateService persistedClusterStateService = + new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L); + final ClusterState state = createClusterState(randomNonNegativeLong(), + MetaData.builder().clusterUUID(randomAlphaOfLength(10)).build()); + try (GatewayMetaState.LucenePersistedState ignored = new GatewayMetaState.LucenePersistedState( + persistedClusterStateService, 42L, state)) { + + } + + nodeEnvironment.close(); + + // verify that the freshest state was rewritten to each data path + for (Path path : nodeEnvironment.nodeDataPaths()) { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_DATA_SETTING.getKey(), path.toString()).build(); + try (NodeEnvironment nodeEnvironment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings))) { + final PersistedClusterStateService newPersistedClusterStateService = + new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L); + final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService.loadBestOnDiskState(); + assertFalse(onDiskState.empty()); + assertThat(onDiskState.currentTerm, equalTo(42L)); + assertClusterStateEqual(state, + ClusterState.builder(ClusterName.DEFAULT) + .version(onDiskState.lastAcceptedVersion) + .metaData(onDiskState.metaData).build()); + } + } + } + + public void testDataOnlyNodePersistence() throws Exception { + DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Collections.emptyMap(), + Sets.newHashSet(DiscoveryNodeRole.DATA_ROLE), Version.CURRENT); + Settings settings = Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName.value()).put( + Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_NAME_SETTING.getKey(), "test").build(); + final MockGatewayMetaState gateway = new MockGatewayMetaState(localNode); + final TransportService transportService = mock(TransportService.class); + TestThreadPool threadPool = new TestThreadPool("testMarkAcceptedConfigAsCommittedOnDataOnlyNode"); + when(transportService.getThreadPool()).thenReturn(threadPool); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + final PersistedClusterStateService persistedClusterStateService = + new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L); + gateway.start(settings, transportService, clusterService, + new MetaStateService(nodeEnvironment, xContentRegistry()), null, null, persistedClusterStateService); + final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); + assertThat(persistedState, instanceOf(GatewayMetaState.AsyncLucenePersistedState.class)); //generate random coordinationMetaData with different lastAcceptedConfiguration and lastCommittedConfiguration CoordinationMetaData coordinationMetaData; @@ -218,26 +354,163 @@ public void testMarkAcceptedConfigAsCommitted() { } while (coordinationMetaData.getLastAcceptedConfiguration().equals(coordinationMetaData.getLastCommittedConfiguration())); ClusterState state = createClusterState(randomNonNegativeLong(), - MetaData.builder().coordinationMetaData(coordinationMetaData) - .clusterUUID(randomAlphaOfLength(10)).build()); - gateway.setLastAcceptedState(state); - - gateway = maybeNew(gateway); - assertThat(gateway.getLastAcceptedState().getLastAcceptedConfiguration(), - not(equalTo(gateway.getLastAcceptedState().getLastCommittedConfiguration()))); - gateway.markLastAcceptedStateAsCommitted(); + MetaData.builder().coordinationMetaData(coordinationMetaData) + .clusterUUID(randomAlphaOfLength(10)).build()); + persistedState.setLastAcceptedState(state); + assertBusy(() -> assertTrue(gateway.allPendingAsyncStatesWritten())); + + assertThat(persistedState.getLastAcceptedState().getLastAcceptedConfiguration(), + not(equalTo(persistedState.getLastAcceptedState().getLastCommittedConfiguration()))); + CoordinationMetaData persistedCoordinationMetaData = + persistedClusterStateService.loadBestOnDiskState().metaData.coordinationMetaData(); + assertThat(persistedCoordinationMetaData.getLastAcceptedConfiguration(), + equalTo(GatewayMetaState.AsyncLucenePersistedState.staleStateConfiguration)); + assertThat(persistedCoordinationMetaData.getLastCommittedConfiguration(), + equalTo(GatewayMetaState.AsyncLucenePersistedState.staleStateConfiguration)); + + persistedState.markLastAcceptedStateAsCommitted(); + assertBusy(() -> assertTrue(gateway.allPendingAsyncStatesWritten())); CoordinationMetaData expectedCoordinationMetaData = CoordinationMetaData.builder(coordinationMetaData) - .lastCommittedConfiguration(coordinationMetaData.getLastAcceptedConfiguration()).build(); + .lastCommittedConfiguration(coordinationMetaData.getLastAcceptedConfiguration()).build(); ClusterState expectedClusterState = - ClusterState.builder(state).metaData(MetaData.builder().coordinationMetaData(expectedCoordinationMetaData) - .clusterUUID(state.metaData().clusterUUID()).clusterUUIDCommitted(true).build()).build(); + ClusterState.builder(state).metaData(MetaData.builder().coordinationMetaData(expectedCoordinationMetaData) + .clusterUUID(state.metaData().clusterUUID()).clusterUUIDCommitted(true).build()).build(); + + assertClusterStateEqual(expectedClusterState, persistedState.getLastAcceptedState()); + persistedCoordinationMetaData = persistedClusterStateService.loadBestOnDiskState().metaData.coordinationMetaData(); + assertThat(persistedCoordinationMetaData.getLastAcceptedConfiguration(), + equalTo(GatewayMetaState.AsyncLucenePersistedState.staleStateConfiguration)); + assertThat(persistedCoordinationMetaData.getLastCommittedConfiguration(), + equalTo(GatewayMetaState.AsyncLucenePersistedState.staleStateConfiguration)); + assertTrue(persistedClusterStateService.loadBestOnDiskState().metaData.clusterUUIDCommitted()); + + // generate a series of updates and check if batching works + final String indexName = randomAlphaOfLength(10); + long currentTerm = state.term(); + for (int i = 0; i < 1000; i++) { + if (rarely()) { + // bump term + currentTerm = currentTerm + (rarely() ? randomIntBetween(1, 5) : 0L); + persistedState.setCurrentTerm(currentTerm); + } else { + // update cluster state + final int numberOfShards = randomIntBetween(1, 5); + final long term = Math.min(state.term() + (rarely() ? randomIntBetween(1, 5) : 0L), currentTerm); + final IndexMetaData indexMetaData = createIndexMetaData(indexName, numberOfShards, i); + state = createClusterState(state.version() + 1, + MetaData.builder().coordinationMetaData(createCoordinationMetaData(term)).put(indexMetaData, false).build()); + persistedState.setLastAcceptedState(state); + } + } + assertEquals(currentTerm, persistedState.getCurrentTerm()); + assertClusterStateEqual(state, persistedState.getLastAcceptedState()); + assertBusy(() -> assertTrue(gateway.allPendingAsyncStatesWritten())); + + gateway.close(); + + try (CoordinationState.PersistedState reloadedPersistedState = newGatewayPersistedState()) { + assertEquals(currentTerm, reloadedPersistedState.getCurrentTerm()); + assertClusterStateEqual(GatewayMetaState.AsyncLucenePersistedState.resetVotingConfiguration(state), + reloadedPersistedState.getLastAcceptedState()); + assertNotNull(reloadedPersistedState.getLastAcceptedState().metaData().index(indexName)); + } + + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } - gateway = maybeNew(gateway); - assertClusterStateEqual(expectedClusterState, gateway.getLastAcceptedState()); - gateway.markLastAcceptedStateAsCommitted(); + public void testStatePersistenceWithIOIssues() throws IOException { + final AtomicReference ioExceptionRate = new AtomicReference<>(0.01d); + final List list = new ArrayList<>(); + final PersistedClusterStateService persistedClusterStateService = + new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L) { + @Override + Directory createDirectory(Path path) { + final MockDirectoryWrapper wrapper = newMockFSDirectory(path); + wrapper.setAllowRandomFileNotFoundException(randomBoolean()); + wrapper.setRandomIOExceptionRate(ioExceptionRate.get()); + wrapper.setRandomIOExceptionRateOnOpen(ioExceptionRate.get()); + list.add(wrapper); + return wrapper; + } + }; + ClusterState state = createClusterState(randomNonNegativeLong(), + MetaData.builder().clusterUUID(randomAlphaOfLength(10)).build()); + long currentTerm = 42L; + try (GatewayMetaState.LucenePersistedState persistedState = new GatewayMetaState.LucenePersistedState( + persistedClusterStateService, currentTerm, state)) { + + try { + if (randomBoolean()) { + final ClusterState newState = createClusterState(randomNonNegativeLong(), + MetaData.builder().clusterUUID(randomAlphaOfLength(10)).build()); + persistedState.setLastAcceptedState(newState); + state = newState; + } else { + final long newTerm = currentTerm + 1; + persistedState.setCurrentTerm(newTerm); + currentTerm = newTerm; + } + } catch (IOError | Exception e) { + assertNotNull(ExceptionsHelper.unwrap(e, IOException.class)); + } + + ioExceptionRate.set(0.0d); + for (MockDirectoryWrapper wrapper : list) { + wrapper.setRandomIOExceptionRate(ioExceptionRate.get()); + wrapper.setRandomIOExceptionRateOnOpen(ioExceptionRate.get()); + } + + for (int i = 0; i < randomIntBetween(1, 5); i++) { + if (randomBoolean()) { + final long version = randomNonNegativeLong(); + final String indexName = randomAlphaOfLength(10); + final IndexMetaData indexMetaData = createIndexMetaData(indexName, randomIntBetween(1, 5), randomNonNegativeLong()); + final MetaData metaData = MetaData.builder(). + persistentSettings(Settings.builder().put(randomAlphaOfLength(10), randomAlphaOfLength(10)).build()). + coordinationMetaData(createCoordinationMetaData(1L)). + put(indexMetaData, false). + build(); + state = createClusterState(version, metaData); + persistedState.setLastAcceptedState(state); + } else { + currentTerm += 1; + persistedState.setCurrentTerm(currentTerm); + } + } + + assertEquals(state, persistedState.getLastAcceptedState()); + assertEquals(currentTerm, persistedState.getCurrentTerm()); + + } catch (IOError | Exception e) { + if (ioExceptionRate.get() == 0.0d) { + throw e; + } + assertNotNull(ExceptionsHelper.unwrap(e, IOException.class)); + return; + } - gateway = maybeNew(gateway); - assertClusterStateEqual(expectedClusterState, gateway.getLastAcceptedState()); + nodeEnvironment.close(); + + // verify that the freshest state was rewritten to each data path + for (Path path : nodeEnvironment.nodeDataPaths()) { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_DATA_SETTING.getKey(), path.toString()).build(); + try (NodeEnvironment nodeEnvironment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings))) { + final PersistedClusterStateService newPersistedClusterStateService = + new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L); + final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService.loadBestOnDiskState(); + assertFalse(onDiskState.empty()); + assertThat(onDiskState.currentTerm, equalTo(currentTerm)); + assertClusterStateEqual(state, + ClusterState.builder(ClusterName.DEFAULT) + .version(onDiskState.lastAcceptedVersion) + .metaData(onDiskState.metaData).build()); + } + } } + } diff --git a/server/src/test/java/org/elasticsearch/gateway/IncrementalClusterStateWriterTests.java b/server/src/test/java/org/elasticsearch/gateway/IncrementalClusterStateWriterTests.java index 65432466c61a1..5903326551ca7 100644 --- a/server/src/test/java/org/elasticsearch/gateway/IncrementalClusterStateWriterTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/IncrementalClusterStateWriterTests.java @@ -173,7 +173,7 @@ private IndexMetaData createIndexMetaData(String name) { public void testGetRelevantIndicesWithUnassignedShardsOnMasterEligibleNode() { IndexMetaData indexMetaData = createIndexMetaData("test"); Set indices = IncrementalClusterStateWriter.getRelevantIndices(clusterStateWithUnassignedIndex(indexMetaData, true)); - assertThat(indices.size(), equalTo(1)); + assertThat(indices.size(), equalTo(0)); } public void testGetRelevantIndicesWithUnassignedShardsOnDataOnlyNode() { @@ -443,12 +443,12 @@ public void testSlowLogging() throws WriteStateException, IllegalAccessException final long slowWriteLoggingThresholdMillis; final Settings settings; if (randomBoolean()) { - slowWriteLoggingThresholdMillis = IncrementalClusterStateWriter.SLOW_WRITE_LOGGING_THRESHOLD.get(Settings.EMPTY).millis(); + slowWriteLoggingThresholdMillis = PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.get(Settings.EMPTY).millis(); settings = Settings.EMPTY; } else { slowWriteLoggingThresholdMillis = randomLongBetween(2, 100000); settings = Settings.builder() - .put(IncrementalClusterStateWriter.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), slowWriteLoggingThresholdMillis + "ms") + .put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), slowWriteLoggingThresholdMillis + "ms") .build(); } @@ -489,7 +489,7 @@ public void testSlowLogging() throws WriteStateException, IllegalAccessException "*")); clusterSettings.applySettings(Settings.builder() - .put(IncrementalClusterStateWriter.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), writeDurationMillis.get() + "ms") + .put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), writeDurationMillis.get() + "ms") .build()); assertExpectedLogs(clusterState, incrementalClusterStateWriter, new MockLogAppender.SeenEventExpectation( "should see warning at reduced threshold", diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java b/server/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java index 2d629c9ca1ee2..6cadf896453c3 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java @@ -21,10 +21,11 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.Discovery; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.test.ESIntegTestCase; @@ -55,8 +56,8 @@ public void testMetaWrittenAlsoOnDataNode() throws Exception { assertIndexInMetaState(masterNode, "test"); } - public void testMetaIsRemovedIfAllShardsFromIndexRemoved() throws Exception { - // this test checks that the index state is removed from a data only node once all shards have been allocated away from it + public void testIndexFilesAreRemovedIfAllShardsFromIndexRemoved() throws Exception { + // this test checks that the index data is removed from a data only node once all shards have been allocated away from it String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY); List nodeNames= internalCluster().startDataOnlyNodes(2); String node1 = nodeNames.get(0); @@ -69,8 +70,10 @@ public void testMetaIsRemovedIfAllShardsFromIndexRemoved() throws Exception { ensureGreen(); assertIndexInMetaState(node1, index); Index resolveIndex = resolveIndex(index); + assertIndexDirectoryExists(node1, resolveIndex); assertIndexDirectoryDeleted(node2, resolveIndex); assertIndexInMetaState(masterNode, index); + assertIndexDirectoryDeleted(masterNode, resolveIndex); logger.debug("relocating index..."); client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder() @@ -79,7 +82,13 @@ public void testMetaIsRemovedIfAllShardsFromIndexRemoved() throws Exception { ensureGreen(); assertIndexDirectoryDeleted(node1, resolveIndex); assertIndexInMetaState(node2, index); + assertIndexDirectoryExists(node2, resolveIndex); assertIndexInMetaState(masterNode, index); + assertIndexDirectoryDeleted(masterNode, resolveIndex); + + client().admin().indices().prepareDelete(index).get(); + assertIndexDirectoryDeleted(node1, resolveIndex); + assertIndexDirectoryDeleted(node2, resolveIndex); } @SuppressWarnings("unchecked") @@ -156,17 +165,19 @@ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { } protected void assertIndexDirectoryDeleted(final String nodeName, final Index index) throws Exception { - assertBusy(() -> { - logger.info("checking if index directory exists..."); - assertFalse("Expecting index directory of " + index + " to be deleted from node " + nodeName, - indexDirectoryExists(nodeName, index)); - } + assertBusy(() -> assertFalse("Expecting index directory of " + index + " to be deleted from node " + nodeName, + indexDirectoryExists(nodeName, index)) + ); + } + + protected void assertIndexDirectoryExists(final String nodeName, final Index index) throws Exception { + assertBusy(() -> assertTrue("Expecting index directory of " + index + " to exist on node " + nodeName, + indexDirectoryExists(nodeName, index)) ); } protected void assertIndexInMetaState(final String nodeName, final String indexName) throws Exception { assertBusy(() -> { - logger.info("checking if meta state exists..."); try { assertTrue("Expecting meta state of index " + indexName + " to be on node " + nodeName, getIndicesMetaDataOnNode(nodeName).containsKey(indexName)); @@ -190,8 +201,7 @@ private boolean indexDirectoryExists(String nodeName, Index index) { } private ImmutableOpenMap getIndicesMetaDataOnNode(String nodeName) { - GatewayMetaState nodeMetaState = ((InternalTestCluster) cluster()).getInstance(GatewayMetaState.class, nodeName); - MetaData nodeMetaData = nodeMetaState.getMetaData(); - return nodeMetaData.getIndices(); + final Coordinator coordinator = (Coordinator) internalCluster().getInstance(Discovery.class, nodeName); + return coordinator.getApplierState().getMetaData().getIndices(); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java index 2f34cc4300d2d..97b29327b9d3b 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java @@ -155,7 +155,7 @@ public void testLoadFullStateMissingGlobalMetaData() throws IOException { assertThat(loadedMetaData.index("test1"), equalTo(index)); } - public void testLoadFullStateAndUpdate() throws IOException { + public void testLoadFullStateAndUpdateAndClean() throws IOException { IndexMetaData index = indexMetaData("test1"); MetaData metaData = MetaData.builder() .persistentSettings(Settings.builder().put("test1", "value1").build()) @@ -201,5 +201,15 @@ public void testLoadFullStateAndUpdate() throws IOException { assertThat(loadedMetaData.persistentSettings(), equalTo(newMetaData.persistentSettings())); assertThat(loadedMetaData.hasIndex("test1"), equalTo(true)); assertThat(loadedMetaData.index("test1"), equalTo(index)); + + if (randomBoolean()) { + metaStateService.unreferenceAll(); + } else { + metaStateService.deleteAll(); + } + manifestAndMetaData = metaStateService.loadFullState(); + assertTrue(manifestAndMetaData.v1().isEmpty()); + metaData = manifestAndMetaData.v2(); + assertTrue(MetaData.isGlobalStateEquals(metaData, MetaData.EMPTY_META_DATA)); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java new file mode 100644 index 0000000000000..ef4b5f6f7ec9e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java @@ -0,0 +1,925 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gateway; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.SimpleFSDirectory; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.coordination.CoordinationMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.NodeMetaData; +import org.elasticsearch.gateway.PersistedClusterStateService.Writer; +import org.elasticsearch.index.Index; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.io.IOError; +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.nullValue; + +public class PersistedClusterStateServiceTests extends ESTestCase { + + private PersistedClusterStateService newPersistedClusterStateService(NodeEnvironment nodeEnvironment) { + return new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), + usually() + ? BigArrays.NON_RECYCLING_INSTANCE + : new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + () -> 0L); + } + + public void testPersistsAndReloadsTerm() throws IOException { + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) { + final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment); + final long newTerm = randomNonNegativeLong(); + + assertThat(persistedClusterStateService.loadBestOnDiskState().currentTerm, equalTo(0L)); + try (Writer writer = persistedClusterStateService.createWriter()) { + writer.writeFullStateAndCommit(newTerm, ClusterState.EMPTY_STATE); + assertThat(persistedClusterStateService.loadBestOnDiskState().currentTerm, equalTo(newTerm)); + } + + assertThat(persistedClusterStateService.loadBestOnDiskState().currentTerm, equalTo(newTerm)); + } + } + + public void testPersistsAndReloadsGlobalMetadata() throws IOException { + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) { + final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment); + final String clusterUUID = UUIDs.randomBase64UUID(random()); + final long version = randomLongBetween(1L, Long.MAX_VALUE); + + ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService); + try (Writer writer = persistedClusterStateService.createWriter()) { + writer.writeFullStateAndCommit(0L, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .clusterUUID(clusterUUID) + .clusterUUIDCommitted(true) + .version(version)) + .incrementVersion().build()); + clusterState = loadPersistedClusterState(persistedClusterStateService); + assertThat(clusterState.metaData().clusterUUID(), equalTo(clusterUUID)); + assertTrue(clusterState.metaData().clusterUUIDCommitted()); + assertThat(clusterState.metaData().version(), equalTo(version)); + } + + try (Writer writer = persistedClusterStateService.createWriter()) { + writer.writeFullStateAndCommit(0L, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .clusterUUID(clusterUUID) + .clusterUUIDCommitted(true) + .version(version + 1)) + .incrementVersion().build()); + } + + clusterState = loadPersistedClusterState(persistedClusterStateService); + assertThat(clusterState.metaData().clusterUUID(), equalTo(clusterUUID)); + assertTrue(clusterState.metaData().clusterUUIDCommitted()); + assertThat(clusterState.metaData().version(), equalTo(version + 1)); + } + } + + private static void writeState(Writer writer, long currentTerm, ClusterState clusterState, + ClusterState previousState) throws IOException { + if (randomBoolean() || clusterState.term() != previousState.term() || writer.fullStateWritten == false) { + writer.writeFullStateAndCommit(currentTerm, clusterState); + } else { + writer.writeIncrementalStateAndCommit(currentTerm, previousState, clusterState); + } + } + + public void testLoadsFreshestState() throws IOException { + final Path[] dataPaths = createDataPaths(); + final long freshTerm = randomLongBetween(1L, Long.MAX_VALUE); + final long staleTerm = randomBoolean() ? freshTerm : randomLongBetween(1L, freshTerm); + final long freshVersion = randomLongBetween(2L, Long.MAX_VALUE); + final long staleVersion = staleTerm == freshTerm ? randomLongBetween(1L, freshVersion - 1) : randomLongBetween(1L, Long.MAX_VALUE); + + final HashSet unimportantPaths = Arrays.stream(dataPaths).collect(Collectors.toCollection(HashSet::new)); + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths)) { + final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment)); + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + writeState(writer, staleTerm, + ClusterState.builder(clusterState).version(staleVersion) + .metaData(MetaData.builder(clusterState.metaData()).coordinationMetaData( + CoordinationMetaData.builder(clusterState.coordinationMetaData()).term(staleTerm).build())).build(), + clusterState); + } + } + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(new Path[]{randomFrom(dataPaths)})) { + unimportantPaths.remove(nodeEnvironment.nodeDataPaths()[0]); + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment)); + writeState(writer, freshTerm, + ClusterState.builder(clusterState).version(freshVersion) + .metaData(MetaData.builder(clusterState.metaData()).coordinationMetaData( + CoordinationMetaData.builder(clusterState.coordinationMetaData()).term(freshTerm).build())).build(), + clusterState); + } + } + + if (randomBoolean() && unimportantPaths.isEmpty() == false) { + IOUtils.rm(randomFrom(unimportantPaths)); + } + + // verify that the freshest state is chosen + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths)) { + final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService(nodeEnvironment) + .loadBestOnDiskState(); + final ClusterState clusterState = clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metaData); + assertThat(clusterState.term(), equalTo(freshTerm)); + assertThat(clusterState.version(), equalTo(freshVersion)); + } + } + + public void testFailsOnMismatchedNodeIds() throws IOException { + final Path[] dataPaths1 = createDataPaths(); + final Path[] dataPaths2 = createDataPaths(); + + final String[] nodeIds = new String[2]; + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) { + nodeIds[0] = nodeEnvironment.nodeId(); + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment)); + writer.writeFullStateAndCommit(0L, + ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build()); + } + } + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) { + nodeIds[1] = nodeEnvironment.nodeId(); + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment)); + writer.writeFullStateAndCommit(0L, + ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build()); + } + } + + NodeMetaData.FORMAT.cleanupOldFiles(Long.MAX_VALUE, dataPaths2); + + final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new); + + final String failure = expectThrows(IllegalStateException.class, () -> newNodeEnvironment(combinedPaths)).getMessage(); + assertThat(failure, + allOf(containsString("unexpected node ID in metadata"), containsString(nodeIds[0]), containsString(nodeIds[1]))); + assertTrue("[" + failure + "] should match " + Arrays.toString(dataPaths2), + Arrays.stream(dataPaths2).anyMatch(p -> failure.contains(p.toString()))); + + // verify that loadBestOnDiskState has same check + final String message = expectThrows(IllegalStateException.class, + () -> new PersistedClusterStateService(combinedPaths, nodeIds[0], xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + () -> 0L, randomBoolean()).loadBestOnDiskState()).getMessage(); + assertThat(message, + allOf(containsString("unexpected node ID in metadata"), containsString(nodeIds[0]), containsString(nodeIds[1]))); + assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths2), + Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString()))); + } + + public void testFailsOnMismatchedCommittedClusterUUIDs() throws IOException { + final Path[] dataPaths1 = createDataPaths(); + final Path[] dataPaths2 = createDataPaths(); + final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new); + + final String clusterUUID1 = UUIDs.randomBase64UUID(random()); + final String clusterUUID2 = UUIDs.randomBase64UUID(random()); + + // first establish consistent node IDs and write initial metadata + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) { + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment)); + assertFalse(clusterState.metaData().clusterUUIDCommitted()); + writer.writeFullStateAndCommit(0L, clusterState); + } + } + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) { + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment)); + assertFalse(clusterState.metaData().clusterUUIDCommitted()); + writer.writeFullStateAndCommit(0L, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .clusterUUID(clusterUUID1) + .clusterUUIDCommitted(true) + .version(1)) + .incrementVersion().build()); + } + } + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) { + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment)); + assertFalse(clusterState.metaData().clusterUUIDCommitted()); + writer.writeFullStateAndCommit(0L, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .clusterUUID(clusterUUID2) + .clusterUUIDCommitted(true) + .version(1)) + .incrementVersion().build()); + } + } + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) { + final String message = expectThrows(IllegalStateException.class, + () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage(); + assertThat(message, + allOf(containsString("mismatched cluster UUIDs in metadata"), containsString(clusterUUID1), containsString(clusterUUID2))); + assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths1), + Arrays.stream(dataPaths1).anyMatch(p -> message.contains(p.toString()))); + assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths2), + Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString()))); + } + } + + public void testFailsIfFreshestStateIsInStaleTerm() throws IOException { + final Path[] dataPaths1 = createDataPaths(); + final Path[] dataPaths2 = createDataPaths(); + final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new); + + final long staleCurrentTerm = randomLongBetween(1L, Long.MAX_VALUE - 1); + final long freshCurrentTerm = randomLongBetween(staleCurrentTerm + 1, Long.MAX_VALUE); + + final long freshTerm = randomLongBetween(1L, Long.MAX_VALUE); + final long staleTerm = randomBoolean() ? freshTerm : randomLongBetween(1L, freshTerm); + final long freshVersion = randomLongBetween(2L, Long.MAX_VALUE); + final long staleVersion = staleTerm == freshTerm ? randomLongBetween(1L, freshVersion - 1) : randomLongBetween(1L, Long.MAX_VALUE); + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) { + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment)); + assertFalse(clusterState.metaData().clusterUUIDCommitted()); + writeState(writer, staleCurrentTerm, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()).version(1) + .coordinationMetaData(CoordinationMetaData.builder(clusterState.coordinationMetaData()).term(staleTerm).build())) + .version(staleVersion) + .build(), + clusterState); + } + } + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) { + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment)); + writeState(writer, freshCurrentTerm, clusterState, clusterState); + } + } + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) { + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService(nodeEnvironment) + .loadBestOnDiskState(); + final ClusterState clusterState = clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metaData); + writeState(writer, onDiskState.currentTerm, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()).version(2) + .coordinationMetaData(CoordinationMetaData.builder(clusterState.coordinationMetaData()).term(freshTerm).build())) + .version(freshVersion) + .build(), clusterState); + } + } + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) { + final String message = expectThrows(IllegalStateException.class, + () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage(); + assertThat(message, allOf( + containsString("inconsistent terms found"), + containsString(Long.toString(staleCurrentTerm)), + containsString(Long.toString(freshCurrentTerm)))); + assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths1), + Arrays.stream(dataPaths1).anyMatch(p -> message.contains(p.toString()))); + assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths2), + Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString()))); + } + } + + public void testFailsGracefullyOnExceptionDuringFlush() throws IOException { + final AtomicBoolean throwException = new AtomicBoolean(); + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) { + final PersistedClusterStateService persistedClusterStateService + = new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L) { + @Override + Directory createDirectory(Path path) throws IOException { + return new FilterDirectory(super.createDirectory(path)) { + @Override + public IndexOutput createOutput(String name, IOContext context) throws IOException { + if (throwException.get()) { + throw new IOException("simulated"); + } + return super.createOutput(name, context); + } + }; + } + }; + + try (Writer writer = persistedClusterStateService.createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService); + final long newTerm = randomNonNegativeLong(); + final ClusterState newState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .clusterUUID(UUIDs.randomBase64UUID(random())) + .clusterUUIDCommitted(true) + .version(randomLongBetween(1L, Long.MAX_VALUE))) + .incrementVersion().build(); + throwException.set(true); + assertThat(expectThrows(IOException.class, () -> + writeState(writer, newTerm, newState, clusterState)).getMessage(), + containsString("simulated")); + } + } + } + + public void testClosesWriterOnFatalError() throws IOException { + final AtomicBoolean throwException = new AtomicBoolean(); + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) { + final PersistedClusterStateService persistedClusterStateService + = new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L) { + @Override + Directory createDirectory(Path path) throws IOException { + return new FilterDirectory(super.createDirectory(path)) { + @Override + public void sync(Collection names) { + throw new OutOfMemoryError("simulated"); + } + }; + } + }; + + try (Writer writer = persistedClusterStateService.createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService); + final long newTerm = randomNonNegativeLong(); + final ClusterState newState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .clusterUUID(UUIDs.randomBase64UUID(random())) + .clusterUUIDCommitted(true) + .version(randomLongBetween(1L, Long.MAX_VALUE))) + .incrementVersion().build(); + throwException.set(true); + assertThat(expectThrows(OutOfMemoryError.class, () -> { + if (randomBoolean()) { + writeState(writer, newTerm, newState, clusterState); + } else { + writer.commit(newTerm, newState.version()); + } + }).getMessage(), + containsString("simulated")); + assertFalse(writer.isOpen()); + } + + // check if we can open writer again + try (Writer ignored = persistedClusterStateService.createWriter()) { + + } + } + } + + public void testCrashesWithIOErrorOnCommitFailure() throws IOException { + final AtomicBoolean throwException = new AtomicBoolean(); + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) { + final PersistedClusterStateService persistedClusterStateService + = new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L) { + @Override + Directory createDirectory(Path path) throws IOException { + return new FilterDirectory(super.createDirectory(path)) { + @Override + public void rename(String source, String dest) throws IOException { + if (throwException.get() && dest.startsWith("segments")) { + throw new IOException("simulated"); + } + } + }; + } + }; + + try (Writer writer = persistedClusterStateService.createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService); + final long newTerm = randomNonNegativeLong(); + final ClusterState newState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .clusterUUID(UUIDs.randomBase64UUID(random())) + .clusterUUIDCommitted(true) + .version(randomLongBetween(1L, Long.MAX_VALUE))) + .incrementVersion().build(); + throwException.set(true); + assertThat(expectThrows(IOError.class, () -> { + if (randomBoolean()) { + writeState(writer, newTerm, newState, clusterState); + } else { + writer.commit(newTerm, newState.version()); + } + }).getMessage(), + containsString("simulated")); + assertFalse(writer.isOpen()); + } + + // check if we can open writer again + try (Writer ignored = persistedClusterStateService.createWriter()) { + + } + } + } + + public void testFailsIfGlobalMetadataIsMissing() throws IOException { + // if someone attempted surgery on the metadata index by hand, e.g. deleting broken segments, then maybe the global metadata + // isn't there any more + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) { + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment)); + writeState(writer, 0L, ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build(), + clusterState); + } + + final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths()); + try (Directory directory = new SimpleFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) { + final IndexWriterConfig indexWriterConfig = new IndexWriterConfig(); + indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE); + try (IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig)) { + indexWriter.commit(); + } + } + + final String message = expectThrows(IllegalStateException.class, + () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage(); + assertThat(message, allOf(containsString("no global metadata found"), containsString(brokenPath.toString()))); + } + } + + public void testFailsIfGlobalMetadataIsDuplicated() throws IOException { + // if someone attempted surgery on the metadata index by hand, e.g. deleting broken segments, then maybe the global metadata + // is duplicated + + final Path[] dataPaths1 = createDataPaths(); + final Path[] dataPaths2 = createDataPaths(); + final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new); + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) { + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment)); + writeState(writer, 0L, ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build(), + clusterState); + } + + final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths()); + final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths())); + try (Directory directory = new SimpleFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME)); + Directory dupDirectory = new SimpleFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) { + try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) { + indexWriter.addIndexes(dupDirectory); + indexWriter.commit(); + } + } + + final String message = expectThrows(IllegalStateException.class, + () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage(); + assertThat(message, allOf(containsString("duplicate global metadata found"), containsString(brokenPath.toString()))); + } + } + + public void testFailsIfIndexMetadataIsDuplicated() throws IOException { + // if someone attempted surgery on the metadata index by hand, e.g. deleting broken segments, then maybe some index metadata + // is duplicated + + final Path[] dataPaths1 = createDataPaths(); + final Path[] dataPaths2 = createDataPaths(); + final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new); + + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) { + final String indexUUID = UUIDs.randomBase64UUID(random()); + final String indexName = randomAlphaOfLength(10); + + try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment)); + writeState(writer, 0L, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .version(1L) + .coordinationMetaData(CoordinationMetaData.builder(clusterState.coordinationMetaData()).term(1L).build()) + .put(IndexMetaData.builder(indexName) + .version(1L) + .settings(Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, indexUUID)))) + .incrementVersion().build(), + clusterState); + } + + final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths()); + final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths())); + try (Directory directory = new SimpleFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME)); + Directory dupDirectory = new SimpleFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) { + try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) { + indexWriter.deleteDocuments(new Term("type", "global")); // do not duplicate global metadata + indexWriter.addIndexes(dupDirectory); + indexWriter.commit(); + } + } + + final String message = expectThrows(IllegalStateException.class, + () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage(); + assertThat(message, allOf( + containsString("duplicate metadata found"), + containsString(brokenPath.toString()), + containsString(indexName), + containsString(indexUUID))); + } + } + + public void testPersistsAndReloadsIndexMetadataIffVersionOrTermChanges() throws IOException { + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) { + final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment); + final long globalVersion = randomLongBetween(1L, Long.MAX_VALUE); + final String indexUUID = UUIDs.randomBase64UUID(random()); + final long indexMetaDataVersion = randomLongBetween(1L, Long.MAX_VALUE); + + final long oldTerm = randomLongBetween(1L, Long.MAX_VALUE - 1); + final long newTerm = randomLongBetween(oldTerm + 1, Long.MAX_VALUE); + + try (Writer writer = persistedClusterStateService.createWriter()) { + ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService); + writeState(writer, 0L, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .version(globalVersion) + .coordinationMetaData(CoordinationMetaData.builder(clusterState.coordinationMetaData()).term(oldTerm).build()) + .put(IndexMetaData.builder("test") + .version(indexMetaDataVersion - 1) // -1 because it's incremented in .put() + .settings(Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, indexUUID)))) + .incrementVersion().build(), + clusterState); + + + clusterState = loadPersistedClusterState(persistedClusterStateService); + IndexMetaData indexMetaData = clusterState.metaData().index("test"); + assertThat(indexMetaData.getIndexUUID(), equalTo(indexUUID)); + assertThat(indexMetaData.getVersion(), equalTo(indexMetaDataVersion)); + assertThat(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetaData.getSettings()), equalTo(0)); + // ensure we do not wastefully persist the same index metadata version by making a bad update with the same version + writer.writeIncrementalStateAndCommit(0L, clusterState, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .put(IndexMetaData.builder(indexMetaData).settings(Settings.builder() + .put(indexMetaData.getSettings()) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)).build(), false)) + .incrementVersion().build()); + + clusterState = loadPersistedClusterState(persistedClusterStateService); + indexMetaData = clusterState.metaData().index("test"); + assertThat(indexMetaData.getIndexUUID(), equalTo(indexUUID)); + assertThat(indexMetaData.getVersion(), equalTo(indexMetaDataVersion)); + assertThat(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetaData.getSettings()), equalTo(0)); + // ensure that we do persist the same index metadata version by making an update with a higher version + writeState(writer, 0L, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .put(IndexMetaData.builder(indexMetaData).settings(Settings.builder() + .put(indexMetaData.getSettings()) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)).build(), true)) + .incrementVersion().build(), + clusterState); + + clusterState = loadPersistedClusterState(persistedClusterStateService); + indexMetaData = clusterState.metaData().index("test"); + assertThat(indexMetaData.getVersion(), equalTo(indexMetaDataVersion + 1)); + assertThat(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetaData.getSettings()), equalTo(2)); + // ensure that we also persist the index metadata when the term changes + writeState(writer, 0L, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .coordinationMetaData(CoordinationMetaData.builder(clusterState.coordinationMetaData()).term(newTerm).build()) + .put(IndexMetaData.builder(indexMetaData).settings(Settings.builder() + .put(indexMetaData.getSettings()) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 3)).build(), false)) + .incrementVersion().build(), + clusterState); + } + + final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService); + final IndexMetaData indexMetaData = clusterState.metaData().index("test"); + assertThat(indexMetaData.getIndexUUID(), equalTo(indexUUID)); + assertThat(indexMetaData.getVersion(), equalTo(indexMetaDataVersion + 1)); + assertThat(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetaData.getSettings()), equalTo(3)); + } + } + + public void testPersistsAndReloadsIndexMetadataForMultipleIndices() throws IOException { + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) { + final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment); + + final long term = randomLongBetween(1L, Long.MAX_VALUE); + final String addedIndexUuid = UUIDs.randomBase64UUID(random()); + final String updatedIndexUuid = UUIDs.randomBase64UUID(random()); + final String deletedIndexUuid = UUIDs.randomBase64UUID(random()); + + try (Writer writer = persistedClusterStateService.createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService); + writeState(writer, 0L, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .version(clusterState.metaData().version() + 1) + .coordinationMetaData(CoordinationMetaData.builder(clusterState.coordinationMetaData()).term(term).build()) + .put(IndexMetaData.builder("updated") + .version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put() + .settings(Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, updatedIndexUuid))) + .put(IndexMetaData.builder("deleted") + .version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put() + .settings(Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, deletedIndexUuid)))) + .incrementVersion().build(), + clusterState); + } + + try (Writer writer = persistedClusterStateService.createWriter()) { + final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService); + + assertThat(clusterState.metaData().indices().size(), equalTo(2)); + assertThat(clusterState.metaData().index("updated").getIndexUUID(), equalTo(updatedIndexUuid)); + assertThat(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.get(clusterState.metaData().index("updated").getSettings()), + equalTo(1)); + assertThat(clusterState.metaData().index("deleted").getIndexUUID(), equalTo(deletedIndexUuid)); + + writeState(writer, 0L, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .version(clusterState.metaData().version() + 1) + .remove("deleted") + .put(IndexMetaData.builder("updated") + .settings(Settings.builder() + .put(clusterState.metaData().index("updated").getSettings()) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2))) + .put(IndexMetaData.builder("added") + .version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put() + .settings(Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, addedIndexUuid)))) + .incrementVersion().build(), + clusterState); + } + + final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService); + + assertThat(clusterState.metaData().indices().size(), equalTo(2)); + assertThat(clusterState.metaData().index("updated").getIndexUUID(), equalTo(updatedIndexUuid)); + assertThat(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.get(clusterState.metaData().index("updated").getSettings()), + equalTo(2)); + assertThat(clusterState.metaData().index("added").getIndexUUID(), equalTo(addedIndexUuid)); + assertThat(clusterState.metaData().index("deleted"), nullValue()); + } + } + + public void testReloadsMetadataAcrossMultipleSegments() throws IOException { + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) { + final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment); + + final int writes = between(5, 20); + final List indices = new ArrayList<>(writes); + + try (Writer writer = persistedClusterStateService.createWriter()) { + for (int i = 0; i < writes; i++) { + final Index index = new Index("test-" + i, UUIDs.randomBase64UUID(random())); + indices.add(index); + final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService); + writeState(writer, 0L, ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .version(i + 2) + .put(IndexMetaData.builder(index.getName()) + .settings(Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())))) + .incrementVersion().build(), + clusterState); + } + } + + final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService); + for (Index index : indices) { + final IndexMetaData indexMetaData = clusterState.metaData().index(index.getName()); + assertThat(indexMetaData.getIndexUUID(), equalTo(index.getUUID())); + } + } + } + + @TestLogging(value = "org.elasticsearch.gateway:WARN", reason = "to ensure that we log gateway events on WARN level") + public void testSlowLogging() throws IOException, IllegalAccessException { + final long slowWriteLoggingThresholdMillis; + final Settings settings; + if (randomBoolean()) { + slowWriteLoggingThresholdMillis = PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.get(Settings.EMPTY).millis(); + settings = Settings.EMPTY; + } else { + slowWriteLoggingThresholdMillis = randomLongBetween(2, 100000); + settings = Settings.builder() + .put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), slowWriteLoggingThresholdMillis + "ms") + .build(); + } + + final DiscoveryNode localNode = new DiscoveryNode("node", buildNewFakeTransportAddress(), Version.CURRENT); + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId())).build(); + + final long startTimeMillis = randomLongBetween(0L, Long.MAX_VALUE - slowWriteLoggingThresholdMillis * 10); + final AtomicLong currentTime = new AtomicLong(startTimeMillis); + final AtomicLong writeDurationMillis = new AtomicLong(slowWriteLoggingThresholdMillis); + + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) { + PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(nodeEnvironment, + xContentRegistry(), + usually() + ? BigArrays.NON_RECYCLING_INSTANCE + : new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()), + clusterSettings, + () -> currentTime.getAndAdd(writeDurationMillis.get())); + + try (Writer writer = persistedClusterStateService.createWriter()) { + assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.SeenEventExpectation( + "should see warning at threshold", + PersistedClusterStateService.class.getCanonicalName(), + Level.WARN, + "writing cluster state took [*] which is above the warn threshold of [*]; " + + "wrote full state with [0] indices")); + + writeDurationMillis.set(randomLongBetween(slowWriteLoggingThresholdMillis, slowWriteLoggingThresholdMillis * 2)); + assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.SeenEventExpectation( + "should see warning above threshold", + PersistedClusterStateService.class.getCanonicalName(), + Level.WARN, + "writing cluster state took [*] which is above the warn threshold of [*]; " + + "wrote full state with [0] indices")); + + writeDurationMillis.set(randomLongBetween(1, slowWriteLoggingThresholdMillis - 1)); + assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.UnseenEventExpectation( + "should not see warning below threshold", + PersistedClusterStateService.class.getCanonicalName(), + Level.WARN, + "*")); + + clusterSettings.applySettings(Settings.builder() + .put(PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD.getKey(), writeDurationMillis.get() + "ms") + .build()); + assertExpectedLogs(1L, null, clusterState, writer, new MockLogAppender.SeenEventExpectation( + "should see warning at reduced threshold", + PersistedClusterStateService.class.getCanonicalName(), + Level.WARN, + "writing cluster state took [*] which is above the warn threshold of [*]; " + + "wrote full state with [0] indices")); + + final ClusterState newClusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .version(clusterState.version()) + .put(IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, "test-uuid")))) + .incrementVersion().build(); + + assertExpectedLogs(1L, clusterState, newClusterState, writer, new MockLogAppender.SeenEventExpectation( + "should see warning at threshold", + PersistedClusterStateService.class.getCanonicalName(), + Level.WARN, + "writing cluster state took [*] which is above the warn threshold of [*]; " + + "wrote global metadata [false] and metadata for [1] indices and skipped [0] unchanged indices")); + + writeDurationMillis.set(randomLongBetween(1, writeDurationMillis.get() - 1)); + assertExpectedLogs(1L, clusterState, newClusterState, writer, new MockLogAppender.UnseenEventExpectation( + "should not see warning below threshold", + PersistedClusterStateService.class.getCanonicalName(), + Level.WARN, + "*")); + + assertThat(currentTime.get(), lessThan(startTimeMillis + 14 * slowWriteLoggingThresholdMillis)); // ensure no overflow + } + } + } + + private void assertExpectedLogs(long currentTerm, ClusterState previousState, ClusterState clusterState, + PersistedClusterStateService.Writer writer, MockLogAppender.LoggingExpectation expectation) + throws IllegalAccessException, IOException { + MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.start(); + mockAppender.addExpectation(expectation); + Logger classLogger = LogManager.getLogger(PersistedClusterStateService.class); + Loggers.addAppender(classLogger, mockAppender); + + try { + if (previousState == null) { + writer.writeFullStateAndCommit(currentTerm, clusterState); + } else { + writer.writeIncrementalStateAndCommit(currentTerm, previousState, clusterState); + } + } finally { + Loggers.removeAppender(classLogger, mockAppender); + mockAppender.stop(); + } + mockAppender.assertAllExpectationsMatched(); + } + + @Override + public Settings buildEnvSettings(Settings settings) { + assertTrue(settings.hasValue(Environment.PATH_DATA_SETTING.getKey())); + return Settings.builder() + .put(settings) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()).build(); + } + + public static Path[] createDataPaths() { + final Path[] dataPaths = new Path[randomIntBetween(1, 4)]; + for (int i = 0; i < dataPaths.length; i++) { + dataPaths[i] = createTempDir(); + } + return dataPaths; + } + + private NodeEnvironment newNodeEnvironment(Path[] dataPaths) throws IOException { + return newNodeEnvironment(Settings.builder() + .putList(Environment.PATH_DATA_SETTING.getKey(), Arrays.stream(dataPaths).map(Path::toString).collect(Collectors.toList())) + .build()); + } + + private static ClusterState loadPersistedClusterState(PersistedClusterStateService persistedClusterStateService) throws IOException { + final PersistedClusterStateService.OnDiskState onDiskState = persistedClusterStateService.loadBestOnDiskState(); + return clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metaData); + } + + private static ClusterState clusterStateFromMetadata(long version, MetaData metaData) { + return ClusterState.builder(ClusterName.DEFAULT).version(version).metaData(metaData).build(); + } + + +} diff --git a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java index 9868adfe3b86b..cf0c766972e09 100644 --- a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.gateway; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -150,10 +149,6 @@ public void testRecentPrimaryInformation() throws Exception { indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, between(10, 100)) .mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).collect(Collectors.toList())); - assertBusy(() -> { - SyncedFlushResponse syncedFlushResponse = client().admin().indices().prepareSyncedFlush(indexName).get(); - assertThat(syncedFlushResponse.successfulShards(), equalTo(2)); - }); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithReplica)); if (randomBoolean()) { indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, between(10, 100)) @@ -357,10 +352,11 @@ public void testPeerRecoveryForClosedIndices() throws Exception { assertNoOpRecoveries(indexName); } - private void ensureActivePeerRecoveryRetentionLeasesAdvanced(String indexName) throws Exception { + public static void ensureActivePeerRecoveryRetentionLeasesAdvanced(String indexName) throws Exception { + final ClusterService clusterService = internalCluster().clusterService(); assertBusy(() -> { Index index = resolveIndex(indexName); - Set activeRetentionLeaseIds = clusterService().state().routingTable().index(index).shard(0).shards().stream() + Set activeRetentionLeaseIds = clusterService.state().routingTable().index(index).shard(0).shards().stream() .map(shardRouting -> ReplicationTracker.getPeerRecoveryRetentionLeaseId(shardRouting.currentNodeId())) .collect(Collectors.toSet()); for (String node : internalCluster().nodesInclude(indexName)) { diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index adeb49faa8941..2c11b26580f3d 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.index.AssertingDirectoryReader; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FieldInvertState; @@ -442,7 +443,7 @@ public Analyzer get() { final Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName) { - throw new AssertionError("should not be here"); + return new TokenStreamComponents(new StandardTokenizer()); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index f7df2ee97f932..4a156db4f2101 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -20,12 +20,13 @@ package org.elasticsearch.index.analysis; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockTokenFilter; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -108,8 +109,8 @@ public void testOverrideDefaultAnalyzer() throws IOException { public void testOverrideDefaultAnalyzerWithoutAnalysisModeAll() throws IOException { Version version = VersionUtils.randomVersion(random()); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); - TokenFilterFactory tokenFilter = new AbstractTokenFilterFactory(IndexSettingsModule.newIndexSettings("index", settings), - "my_filter", Settings.EMPTY) { + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", settings); + TokenFilterFactory tokenFilter = new AbstractTokenFilterFactory(indexSettings, "my_filter", Settings.EMPTY) { @Override public AnalysisMode getAnalysisMode() { return randomFrom(AnalysisMode.SEARCH_TIME, AnalysisMode.INDEX_TIME); @@ -117,10 +118,16 @@ public AnalysisMode getAnalysisMode() { @Override public TokenStream create(TokenStream tokenStream) { - return null; + return tokenStream; } }; - Analyzer analyzer = new CustomAnalyzer(null, new CharFilterFactory[0], new TokenFilterFactory[] { tokenFilter }); + TokenizerFactory tokenizer = new AbstractTokenizerFactory(indexSettings, Settings.EMPTY, "my_tokenizer") { + @Override + public Tokenizer create() { + return new StandardTokenizer(); + } + }; + Analyzer analyzer = new CustomAnalyzer(tokenizer, new CharFilterFactory[0], new TokenFilterFactory[] { tokenFilter }); MapperException ex = expectThrows(MapperException.class, () -> emptyRegistry.build(IndexSettingsModule.newIndexSettings("index", settings), singletonMap("default", new PreBuiltAnalyzerProvider("default", AnalyzerScope.INDEX, analyzer)), emptyMap(), @@ -264,4 +271,122 @@ public void testEnsureCloseInvocationProperlyDelegated() throws IOException { registry.close(); verify(mock).close(); } + + public void testDeprecationsAndExceptions() throws IOException { + + AnalysisPlugin plugin = new AnalysisPlugin() { + + class MockFactory extends AbstractTokenFilterFactory { + MockFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(indexSettings, name, settings); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + if (indexSettings.getIndexVersionCreated().equals(Version.CURRENT)) { + deprecationLogger.deprecated("Using deprecated token filter [deprecated]"); + } + return tokenStream; + } + } + + class ExceptionFactory extends AbstractTokenFilterFactory { + + ExceptionFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(indexSettings, name, settings); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + if (indexSettings.getIndexVersionCreated().equals(Version.CURRENT)) { + throw new IllegalArgumentException("Cannot use token filter [exception]"); + } + return tokenStream; + } + } + + class UnusedMockFactory extends AbstractTokenFilterFactory { + UnusedMockFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(indexSettings, name, settings); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + deprecationLogger.deprecated("Using deprecated token filter [unused]"); + return tokenStream; + } + } + + class NormalizerFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory { + + NormalizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(indexSettings, name, settings); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + deprecationLogger.deprecated("Using deprecated token filter [deprecated_normalizer]"); + return tokenStream; + } + + } + + @Override + public Map> getTokenFilters() { + return Map.of("deprecated", MockFactory::new, "unused", UnusedMockFactory::new, + "deprecated_normalizer", NormalizerFactory::new, "exception", ExceptionFactory::new); + } + }; + + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("index.analysis.filter.deprecated.type", "deprecated") + .put("index.analysis.analyzer.custom.tokenizer", "standard") + .putList("index.analysis.analyzer.custom.filter", "lowercase", "deprecated") + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + + new AnalysisModule(TestEnvironment.newEnvironment(settings), + singletonList(plugin)).getAnalysisRegistry().build(idxSettings); + + // We should only get a warning from the token filter that is referenced in settings + assertWarnings("Using deprecated token filter [deprecated]"); + + indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.getPreviousVersion()) + .put("index.analysis.filter.deprecated.type", "deprecated_normalizer") + .putList("index.analysis.normalizer.custom.filter", "lowercase", "deprecated_normalizer") + .put("index.analysis.filter.deprecated.type", "deprecated") + .put("index.analysis.filter.exception.type", "exception") + .put("index.analysis.analyzer.custom.tokenizer", "standard") + // exception will not throw because we're not on Version.CURRENT + .putList("index.analysis.analyzer.custom.filter", "lowercase", "deprecated", "exception") + .build(); + idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + + new AnalysisModule(TestEnvironment.newEnvironment(settings), + singletonList(plugin)).getAnalysisRegistry().build(idxSettings); + + // We should only get a warning from the normalizer, because we're on a version where 'deprecated' + // works fine + assertWarnings("Using deprecated token filter [deprecated_normalizer]"); + + indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("index.analysis.filter.exception.type", "exception") + .put("index.analysis.analyzer.custom.tokenizer", "standard") + // exception will not throw because we're not on Version.LATEST + .putList("index.analysis.analyzer.custom.filter", "lowercase", "exception") + .build(); + IndexSettings exceptionSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + new AnalysisModule(TestEnvironment.newEnvironment(settings), + singletonList(plugin)).getAnalysisRegistry().build(exceptionSettings); + }); + assertEquals("Cannot use token filter [exception]", e.getMessage()); + + } } diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilterTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilterTests.java new file mode 100644 index 0000000000000..8c4c62487c21c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilterTests.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.TokenFilter; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; + +public class PreConfiguredTokenFilterTests extends ESTestCase { + + private final Settings emptyNodeSettings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + public void testCachingWithSingleton() throws IOException { + PreConfiguredTokenFilter pctf = + PreConfiguredTokenFilter.singleton("singleton", randomBoolean(), + (tokenStream) -> new TokenFilter(tokenStream) { + @Override + public boolean incrementToken() { + return false; + } + }); + + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", Settings.EMPTY); + + Version version1 = VersionUtils.randomVersion(random()); + Settings settings1 = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version1) + .build(); + TokenFilterFactory tff_v1_1 = + pctf.get(indexSettings, TestEnvironment.newEnvironment(emptyNodeSettings), "singleton", settings1); + TokenFilterFactory tff_v1_2 = + pctf.get(indexSettings, TestEnvironment.newEnvironment(emptyNodeSettings), "singleton", settings1); + assertSame(tff_v1_1, tff_v1_2); + + Version version2 = randomValueOtherThan(version1, () -> randomFrom(VersionUtils.allVersions())); + Settings settings2 = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version2) + .build(); + + TokenFilterFactory tff_v2 = + pctf.get(indexSettings, TestEnvironment.newEnvironment(emptyNodeSettings), "singleton", settings2); + assertSame(tff_v1_1, tff_v2); + } + + public void testCachingWithElasticsearchVersion() throws IOException { + PreConfiguredTokenFilter pctf = + PreConfiguredTokenFilter.elasticsearchVersion("elasticsearch_version", randomBoolean(), + (tokenStream, esVersion) -> new TokenFilter(tokenStream) { + @Override + public boolean incrementToken() { + return false; + } + }); + + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", Settings.EMPTY); + + Version version1 = VersionUtils.randomVersion(random()); + Settings settings1 = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version1) + .build(); + TokenFilterFactory tff_v1_1 = + pctf.get(indexSettings, TestEnvironment.newEnvironment(emptyNodeSettings), "elasticsearch_version", settings1); + TokenFilterFactory tff_v1_2 = + pctf.get(indexSettings, TestEnvironment.newEnvironment(emptyNodeSettings), "elasticsearch_version", settings1); + assertSame(tff_v1_1, tff_v1_2); + + Version version2 = randomValueOtherThan(version1, () -> randomFrom(VersionUtils.allVersions())); + Settings settings2 = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version2) + .build(); + + TokenFilterFactory tff_v2 = + pctf.get(indexSettings, TestEnvironment.newEnvironment(emptyNodeSettings), "elasticsearch_version", settings2); + assertNotSame(tff_v1_1, tff_v2); + } + + public void testCachingWithLuceneVersion() throws IOException { + PreConfiguredTokenFilter pctf = + PreConfiguredTokenFilter.luceneVersion("lucene_version", randomBoolean(), + (tokenStream, luceneVersion) -> new TokenFilter(tokenStream) { + @Override + public boolean incrementToken() { + return false; + } + }); + + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", Settings.EMPTY); + + Version version1 = Version.CURRENT; + Settings settings1 = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version1) + .build(); + TokenFilterFactory tff_v1_1 = + pctf.get(indexSettings, TestEnvironment.newEnvironment(emptyNodeSettings), "lucene_version", settings1); + TokenFilterFactory tff_v1_2 = + pctf.get(indexSettings, TestEnvironment.newEnvironment(emptyNodeSettings), "lucene_version", settings1); + assertSame(tff_v1_1, tff_v1_2); + + byte major = VersionUtils.getFirstVersion().major; + Version version2 = Version.fromString(major - 1 + ".0.0"); + Settings settings2 = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version2) + .build(); + + TokenFilterFactory tff_v2 = + pctf.get(indexSettings, TestEnvironment.newEnvironment(emptyNodeSettings), "lucene_version", settings2); + assertNotSame(tff_v1_1, tff_v2); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java index 1fd3f51ece916..d8712f582a5e5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java @@ -180,7 +180,7 @@ public void testParsesBooleansNestedStrict() throws IOException { public void testMultiFields() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", "boolean") @@ -192,7 +192,7 @@ public void testMultiFields() throws IOException { .endObject().endObject() .endObject().endObject()); DocumentMapper mapper = indexService.mapperService() - .merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + .merge("_doc", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java index 6805db662821f..445611aba40fb 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java @@ -232,7 +232,7 @@ public void testCopyToDynamicInnerInnerObjectParsing() throws Exception { } public void testCopyToStrictDynamicInnerObjectParsing() throws Exception { - String mapping = Strings.toString(jsonBuilder().startObject().startObject("type1") + String mapping = Strings.toString(jsonBuilder().startObject().startObject("_doc") .field("dynamic", "strict") .startObject("properties") .startObject("copy_test") @@ -243,7 +243,7 @@ public void testCopyToStrictDynamicInnerObjectParsing() throws Exception { .endObject().endObject()); DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() - .parse("type1", new CompressedXContent(mapping)); + .parse("_doc", new CompressedXContent(mapping)); BytesReference json = BytesReference.bytes(jsonBuilder().startObject() .field("copy_test", "foo") @@ -253,7 +253,7 @@ public void testCopyToStrictDynamicInnerObjectParsing() throws Exception { docMapper.parse(new SourceToParse("test", "1", json, XContentType.JSON)).rootDoc(); fail(); } catch (MapperParsingException ex) { - assertThat(ex.getMessage(), startsWith("mapping set to strict, dynamic introduction of [very] within [type1] is not allowed")); + assertThat(ex.getMessage(), startsWith("mapping set to strict, dynamic introduction of [very] within [_doc] is not allowed")); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index 477cf7de8285c..e7b2b90832ae6 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -62,11 +62,11 @@ protected Collection> getPlugins() { } public void testDefaults() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "date").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -91,11 +91,11 @@ public void testDefaults() throws Exception { } public void testNotIndexed() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "date").field("index", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -113,11 +113,11 @@ public void testNotIndexed() throws Exception { } public void testNoDocValues() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "date").field("doc_values", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -135,11 +135,11 @@ public void testNoDocValues() throws Exception { } public void testStore() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "date").field("store", true).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -169,11 +169,11 @@ public void testIgnoreMalformed() throws IOException { } private void testIgnoreMalfomedForValue(String value, String expectedException) throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "date").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -206,12 +206,12 @@ private void testIgnoreMalfomedForValue(String value, String expectedException) } public void testChangeFormat() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "date") .field("format", "epoch_second").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -230,13 +230,13 @@ public void testChangeFormat() throws IOException { public void testChangeLocale() throws IOException { assumeTrue("need java 9 for testing ",JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "date") .field("format", "E, d MMM yyyy HH:mm:ss Z") .field("locale", "de") .endObject().endObject().endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -250,7 +250,7 @@ public void testChangeLocale() throws IOException { public void testNullValue() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "date") @@ -258,7 +258,7 @@ public void testNullValue() throws IOException { .endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", BytesReference @@ -270,7 +270,7 @@ public void testNullValue() throws IOException { assertArrayEquals(new IndexableField[0], doc.rootDoc().getFields("field")); mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "date") @@ -279,7 +279,7 @@ public void testNullValue() throws IOException { .endObject() .endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); doc = mapper.parse(new SourceToParse("test", "1", BytesReference @@ -332,7 +332,7 @@ public void testTimeZoneParsing() throws Exception { final String timeZonePattern = "yyyy-MM-dd" + randomFrom("XXX", "[XXX]", "'['XXX']'"); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "date") @@ -341,7 +341,7 @@ public void testTimeZoneParsing() throws Exception { .endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); DateFormatter formatter = DateFormatter.forPattern(timeZonePattern); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperParserTests.java index c35e083d687b1..b16ecb9a1591b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperParserTests.java @@ -25,17 +25,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.test.ESSingleNodeTestCase; -import static org.hamcrest.Matchers.equalTo; - public class DocumentMapperParserTests extends ESSingleNodeTestCase { - public void testTypeLevel() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .endObject().endObject()); - - DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); - assertThat(mapper.type(), equalTo("type")); - } public void testFieldNameWithDots() throws Exception { IndexService indexService = createIndex("test"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java index 724c53d04f15b..8a14c1fbcca36 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java @@ -155,7 +155,7 @@ public void testChangeSearchAnalyzerToDefault() throws Exception { public void testConcurrentMergeTest() throws Throwable { final MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge("test", new CompressedXContent("{\"test\":{}}"), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("_doc", new CompressedXContent("{\"_doc\":{}}"), MapperService.MergeReason.MAPPING_UPDATE); final DocumentMapper documentMapper = mapperService.documentMapper(); DocumentFieldMappers dfm = documentMapper.mappers(); @@ -184,7 +184,7 @@ public void run() { Mapping update = doc.dynamicMappingsUpdate(); assert update != null; lastIntroducedFieldName.set(fieldName); - mapperService.merge("test", new CompressedXContent(update.toString()), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("_doc", new CompressedXContent(update.toString()), MapperService.MergeReason.MAPPING_UPDATE); } } catch (Exception e) { error.set(e); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index e430125f28187..1d76efdd008b4 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -549,7 +549,7 @@ public void testDynamicStrictLongArray() throws Exception { .endArray().endObject()); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, () -> mapper.parse(new SourceToParse("test", "1", bytes, XContentType.JSON))); - assertEquals("mapping set to strict, dynamic introduction of [foo] within [type] is not allowed", exception.getMessage()); + assertEquals("mapping set to strict, dynamic introduction of [foo] within [_doc] is not allowed", exception.getMessage()); } public void testMappedGeoPointArray() throws Exception { @@ -631,7 +631,7 @@ public void testDynamicStrictObject() throws Exception { .endObject().endObject()); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, () -> mapper.parse(new SourceToParse("test", "1", bytes, XContentType.JSON))); - assertEquals("mapping set to strict, dynamic introduction of [foo] within [type] is not allowed", exception.getMessage()); + assertEquals("mapping set to strict, dynamic introduction of [foo] within [_doc] is not allowed", exception.getMessage()); } public void testDynamicFalseValue() throws Exception { @@ -662,7 +662,7 @@ public void testDynamicStrictValue() throws Exception { .endObject()); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, () -> mapper.parse(new SourceToParse("test", "1", bytes, XContentType.JSON))); - assertEquals("mapping set to strict, dynamic introduction of [bar] within [type] is not allowed", exception.getMessage()); + assertEquals("mapping set to strict, dynamic introduction of [bar] within [_doc] is not allowed", exception.getMessage()); } public void testDynamicFalseNull() throws Exception { @@ -693,7 +693,7 @@ public void testDynamicStrictNull() throws Exception { .endObject()); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, () -> mapper.parse(new SourceToParse("test", "1", bytes, XContentType.JSON))); - assertEquals("mapping set to strict, dynamic introduction of [bar] within [type] is not allowed", exception.getMessage()); + assertEquals("mapping set to strict, dynamic introduction of [bar] within [_doc] is not allowed", exception.getMessage()); } public void testMappedNullValue() throws Exception { @@ -892,7 +892,7 @@ public void testDynamicStrictDottedFieldNameLongArray() throws Exception { .endArray().endObject()); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, () -> mapper.parse(new SourceToParse("test", "1", bytes, XContentType.JSON))); - assertEquals("mapping set to strict, dynamic introduction of [foo] within [type] is not allowed", exception.getMessage()); + assertEquals("mapping set to strict, dynamic introduction of [foo] within [_doc] is not allowed", exception.getMessage()); } public void testDynamicDottedFieldNameLong() throws Exception { @@ -1009,7 +1009,7 @@ public void testDynamicStrictDottedFieldNameLong() throws Exception { .endObject()); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, () -> mapper.parse(new SourceToParse("test", "1", bytes, XContentType.JSON))); - assertEquals("mapping set to strict, dynamic introduction of [foo] within [type] is not allowed", exception.getMessage()); + assertEquals("mapping set to strict, dynamic introduction of [foo] within [_doc] is not allowed", exception.getMessage()); } public void testDynamicDottedFieldNameObject() throws Exception { @@ -1136,7 +1136,7 @@ public void testDynamicStrictDottedFieldNameObject() throws Exception { .endObject().endObject()); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, () -> mapper.parse(new SourceToParse("test", "1", bytes, XContentType.JSON))); - assertEquals("mapping set to strict, dynamic introduction of [foo] within [type] is not allowed", exception.getMessage()); + assertEquals("mapping set to strict, dynamic introduction of [foo] within [_doc] is not allowed", exception.getMessage()); } public void testDocumentContainsMetadataField() throws Exception { @@ -1175,7 +1175,7 @@ public void testParseToJsonAndParse() throws Exception { DocumentMapper docMapper = parser.parse("person", new CompressedXContent(mapping)); String builtMapping = docMapper.mappingSource().string(); // reparse it - DocumentMapper builtDocMapper = parser.parse("person", new CompressedXContent(builtMapping)); + DocumentMapper builtDocMapper = parser.parse("_doc", new CompressedXContent(builtMapping)); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json")); Document doc = builtDocMapper.parse(new SourceToParse("test", "1", json, XContentType.JSON)).rootDoc(); assertThat(doc.getBinaryValue(docMapper.idFieldMapper().name()), equalTo(Uid.encodeId("1"))); @@ -1213,7 +1213,7 @@ public void testAttributes() throws Exception { assertThat((String) docMapper.meta().get("param1"), equalTo("value1")); String builtMapping = docMapper.mappingSource().string(); - DocumentMapper builtDocMapper = parser.parse("person", new CompressedXContent(builtMapping)); + DocumentMapper builtDocMapper = parser.parse("_doc", new CompressedXContent(builtMapping)); assertThat((String) builtDocMapper.meta().get("param1"), equalTo("value1")); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 9e0d9155d43bd..bb0a0af1b64a3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -123,7 +123,7 @@ public void testDynamicStrict() throws IOException { .field("field2", "value2") .endObject()), XContentType.JSON))); - assertThat(e.getMessage(), equalTo("mapping set to strict, dynamic introduction of [field2] within [type] is not allowed")); + assertThat(e.getMessage(), equalTo("mapping set to strict, dynamic introduction of [field2] within [_doc] is not allowed")); e = expectThrows(StrictDynamicMappingException.class, () -> defaultMapper.parse(new SourceToParse("test", "1", @@ -133,7 +133,7 @@ public void testDynamicStrict() throws IOException { .field("field2", (String) null) .endObject()), XContentType.JSON))); - assertThat(e.getMessage(), equalTo("mapping set to strict, dynamic introduction of [field2] within [type] is not allowed")); + assertThat(e.getMessage(), equalTo("mapping set to strict, dynamic introduction of [field2] within [_doc] is not allowed")); } public void testDynamicFalseWithInnerObjectButDynamicSetOnRoot() throws IOException { @@ -234,10 +234,10 @@ public void testField() throws Exception { IndexService indexService = createIndex("test"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type").endObject() + .startObject("_doc").endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()); @@ -245,7 +245,7 @@ public void testField() throws Exception { // original mapping not modified assertEquals(mapping, serialize(mapper)); // but we have an update - assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties") .startObject("foo") .field("type", "text") .startObject("fields") @@ -263,11 +263,11 @@ public void testIncremental() throws Exception { DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); // Make sure that mapping updates are incremental, this is important for performance otherwise // every new field introduction runs in linear time with the total number of fields - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("foo").field("type", "text").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar") @@ -276,7 +276,7 @@ public void testIncremental() throws Exception { // original mapping not modified assertEquals(mapping, serialize(mapper)); // but we have an update - assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties") // foo is NOT in the update .startObject("bar").field("type", "text") .startObject("fields") @@ -293,10 +293,10 @@ public void testIntroduceTwoFields() throws Exception { IndexService indexService = createIndex("test"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type").endObject() + .startObject("_doc").endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar") @@ -305,7 +305,7 @@ public void testIntroduceTwoFields() throws Exception { // original mapping not modified assertEquals(mapping, serialize(mapper)); // but we have an update - assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties") .startObject("bar").field("type", "text") .startObject("fields") .startObject("keyword") @@ -329,10 +329,10 @@ public void testObject() throws Exception { IndexService indexService = createIndex("test"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type").endObject() + .startObject("_doc").endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startObject("foo").startObject("bar") @@ -341,7 +341,7 @@ public void testObject() throws Exception { // original mapping not modified assertEquals(mapping, serialize(mapper)); // but we have an update - assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties") .startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz") .field("type", "text") .startObject("fields").startObject("keyword").field("type", "keyword") @@ -354,10 +354,10 @@ public void testArray() throws Exception { IndexService indexService = createIndex("test"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type").endObject() + .startObject("_doc").endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject() @@ -366,7 +366,7 @@ public void testArray() throws Exception { // original mapping not modified assertEquals(mapping, serialize(mapper)); // but we have an update - assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties") .startObject("foo") .field("type", "text") .startObject("fields") @@ -382,11 +382,11 @@ public void testArray() throws Exception { public void testInnerDynamicMapping() throws Exception { IndexService indexService = createIndex("test"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("foo").field("type", "object").endObject() .endObject().endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startObject("foo") @@ -395,7 +395,7 @@ public void testInnerDynamicMapping() throws Exception { // original mapping not modified assertEquals(mapping, serialize(mapper)); // but we have an update - assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties") .startObject("foo").startObject("properties").startObject("bar").startObject("properties") .startObject("baz").field("type", "text").startObject("fields") .startObject("keyword").field("type", "keyword").field("ignore_above", 256).endObject() @@ -407,10 +407,10 @@ public void testComplexArray() throws Exception { IndexService indexService = createIndex("test"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type").endObject() + .startObject("_doc").endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startArray("foo") @@ -418,7 +418,7 @@ public void testComplexArray() throws Exception { .startObject().field("baz", 3).endObject() .endArray().endObject()); assertEquals(mapping, serialize(mapper)); - assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties") .startObject("foo").startObject("properties") .startObject("bar").field("type", "text") .startObject("fields") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperTests.java index 9f87ad3d0390a..e09953d31abc8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperTests.java @@ -43,7 +43,7 @@ public void setup() { public void testParsing() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("alias-field") .field("type", "alias") @@ -55,7 +55,7 @@ public void testParsing() throws IOException { .endObject() .endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java index a34adaca2f144..1adc12548c090 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java @@ -60,11 +60,11 @@ protected Collection> getPlugins() { } public void testDefaults() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "ip").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -89,11 +89,11 @@ public void testDefaults() throws Exception { } public void testNotIndexed() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "ip").field("index", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -111,11 +111,11 @@ public void testNotIndexed() throws Exception { } public void testNoDocValues() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "ip").field("doc_values", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -134,11 +134,11 @@ public void testNoDocValues() throws Exception { } public void testStore() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "ip").field("store", true).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -162,11 +162,11 @@ public void testStore() throws Exception { } public void testIgnoreMalformed() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "ip").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -199,7 +199,7 @@ public void testIgnoreMalformed() throws Exception { public void testNullValue() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "ip") @@ -207,7 +207,7 @@ public void testNullValue() throws IOException { .endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", BytesReference @@ -219,7 +219,7 @@ public void testNullValue() throws IOException { assertArrayEquals(new IndexableField[0], doc.rootDoc().getFields("field")); mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "ip") @@ -228,7 +228,7 @@ public void testNullValue() throws IOException { .endObject() .endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); doc = mapper.parse(new SourceToParse("test", "1", BytesReference diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java index 2f861ca6fc79c..0c4b11004b460 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java @@ -48,11 +48,11 @@ public void setup() { } public void testStoreCidr() throws Exception { - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "ip_range") .field("store", true); mapping = mapping.endObject().endObject().endObject().endObject(); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(Strings.toString(mapping))); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(Strings.toString(mapping))); assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); final Map cases = new HashMap<>(); cases.put("192.168.0.0/15", "192.169.255.255"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index fd821f1170ce8..c2063d1478a9b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -94,11 +94,11 @@ public void setup() { } public void testDefaults() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "keyword").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -131,11 +131,11 @@ public void testDefaults() throws Exception { } public void testIgnoreAbove() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "keyword").field("ignore_above", 5).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -161,11 +161,11 @@ public void testIgnoreAbove() throws IOException { } public void testNullValue() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "keyword").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", BytesReference @@ -176,11 +176,11 @@ public void testNullValue() throws IOException { XContentType.JSON)); assertArrayEquals(new IndexableField[0], doc.rootDoc().getFields("field")); - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "keyword").field("null_value", "uri").endObject().endObject() .endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -206,11 +206,11 @@ public void testNullValue() throws IOException { } public void testEnableStore() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "keyword").field("store", true).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -227,11 +227,11 @@ public void testEnableStore() throws IOException { } public void testDisableIndex() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "keyword").field("index", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -249,11 +249,11 @@ public void testDisableIndex() throws IOException { } public void testDisableDocValues() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "keyword").field("doc_values", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -270,12 +270,12 @@ public void testDisableDocValues() throws IOException { } public void testIndexOptions() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "keyword") .field("index_options", "freqs").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -302,17 +302,17 @@ public void testIndexOptions() throws IOException { } public void testBoost() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "keyword").field("boost", 2f).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); } public void testEnableNorms() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", "keyword") @@ -322,7 +322,7 @@ public void testEnableNorms() throws IOException { .endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", BytesReference @@ -341,12 +341,12 @@ public void testEnableNorms() throws IOException { } public void testNormalizer() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", "keyword").field("normalizer", "my_lowercase").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapperTests.java index d53eb0bcac134..df6b43d041acf 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapperTests.java @@ -53,7 +53,7 @@ protected Collection> getPlugins() { } public void testDefaultConfiguration() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("location") .field("type", "geo_shape") .field("strategy", "recursive") @@ -61,7 +61,7 @@ public void testDefaultConfiguration() throws IOException { .endObject().endObject()); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() - .parse("type1", new CompressedXContent(mapping)); + .parse("_doc", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(LegacyGeoShapeFieldMapper.class)); assertEquals(mapping, defaultMapper.mappingSource().toString()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java index f32608b53c79b..d6dfe8054f4b5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java @@ -186,7 +186,7 @@ public void testMultiFieldsInConsistentOrder() throws Exception { XContentHelper.convertToMap(docMapper.mappingSource().compressedReference(), true, builder.contentType()).v2(); @SuppressWarnings("unchecked") Map multiFields = - (Map) XContentMapValues.extractValue("type.properties.my_field.fields", sourceAsMap); + (Map) XContentMapValues.extractValue("_doc.properties.my_field.fields", sourceAsMap); assertThat(multiFields.size(), equalTo(multiFieldNames.length)); int i = 0; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java index a1322c6614456..3181a92434121 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java @@ -53,11 +53,11 @@ protected void setTypeList() { @Override public void doTestDefaults(String type) throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -81,11 +81,11 @@ public void doTestDefaults(String type) throws Exception { @Override public void doTestNotIndexed(String type) throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type).field("index", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -104,11 +104,11 @@ public void doTestNotIndexed(String type) throws Exception { @Override public void doTestNoDocValues(String type) throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type).field("doc_values", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -128,11 +128,11 @@ public void doTestNoDocValues(String type) throws Exception { @Override public void doTestStore(String type) throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type).field("store", true).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -157,11 +157,11 @@ public void doTestStore(String type) throws Exception { @Override public void doTestCoerce(String type) throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -180,11 +180,11 @@ public void doTestCoerce(String type) throws IOException { IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type).field("coerce", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper2 = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper2.mappingSource().toString()); @@ -200,11 +200,11 @@ public void doTestCoerce(String type) throws IOException { @Override protected void doTestDecimalCoerce(String type) throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -223,10 +223,10 @@ protected void doTestDecimalCoerce(String type) throws IOException { public void testIgnoreMalformed() throws Exception { for (String type : TYPES) { for (Object malformedValue : new Object[] { "a", Boolean.FALSE }) { - String mapping = Strings.toString(jsonBuilder().startObject().startObject("type").startObject("properties") + String mapping = Strings.toString(jsonBuilder().startObject().startObject("_doc").startObject("properties") .startObject("field").field("type", type).endObject().endObject().endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -240,10 +240,10 @@ public void testIgnoreMalformed() throws Exception { assertThat(e.getCause().getMessage(), containsString("not numeric, can not use numeric value accessors")); } - mapping = Strings.toString(jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field") + mapping = Strings.toString(jsonBuilder().startObject().startObject("_doc").startObject("properties").startObject("field") .field("type", type).field("ignore_malformed", true).endObject().endObject().endObject().endObject()); - DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper2 = parser.parse("_doc", new CompressedXContent(mapping)); ParsedDocument doc = mapper2.parse(new SourceToParse("test", "1", BytesReference.bytes(jsonBuilder().startObject().field("field", malformedValue).endObject()), XContentType.JSON)); @@ -268,9 +268,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws }; for (Boolean ignoreMalformed : new Boolean[] { true, false }) { String mapping = Strings.toString( - jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field").field("type", type) + jsonBuilder().startObject().startObject("_doc").startObject("properties").startObject("field").field("type", type) .field("ignore_malformed", ignoreMalformed).endObject().endObject().endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); MapperParsingException e = expectThrows(MapperParsingException.class, @@ -287,7 +287,7 @@ public void testRejectNorms() throws IOException { // not supported as of 5.0 for (String type : TYPES) { DocumentMapperParser parser = createIndex("index-" + type).mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("foo") .field("type", type) @@ -295,7 +295,7 @@ public void testRejectNorms() throws IOException { .endObject() .endObject().endObject().endObject()); MapperParsingException e = expectThrows(MapperParsingException.class, - () -> parser.parse("type", new CompressedXContent(mapping))); + () -> parser.parse("_doc", new CompressedXContent(mapping))); assertThat(e.getMessage(), containsString("Mapping definition for [foo] has unsupported parameters: [norms")); } } @@ -322,7 +322,7 @@ public void testRejectIndexOptions() throws IOException { @Override protected void doTestNullValue(String type) throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", type) @@ -330,7 +330,7 @@ protected void doTestNullValue(String type) throws IOException { .endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", BytesReference @@ -348,7 +348,7 @@ protected void doTestNullValue(String type) throws IOException { missing = 123L; } mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", type) @@ -357,7 +357,7 @@ protected void doTestNullValue(String type) throws IOException { .endObject() .endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); doc = mapper.parse(new SourceToParse("test", "1", BytesReference diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 7672eb3df26b6..154ebf30f5984 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -181,24 +181,6 @@ public void testMerge() throws IOException { assertEquals(Dynamic.STRICT, mapper.root().dynamic()); } - public void testEmptyName() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder() - .startObject() - .startObject("") - .startObject("properties") - .startObject("name") - .field("type", "text") - .endObject() - .endObject() - .endObject().endObject()); - - // Empty name not allowed in index created after 5.0 - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - createIndex("test").mapperService().documentMapperParser().parse("", new CompressedXContent(mapping)); - }); - assertThat(e.getMessage(), containsString("name cannot be empty string")); - } - @Override protected Collection> getPlugins() { return pluginList(InternalSettingsPlugin.class); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java index d713988e19197..4c8b7c3794280 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java @@ -111,14 +111,14 @@ private Object getMax(String type) { @Override public void doTestDefaults(String type) throws Exception { - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type); if (type.equals("date_range")) { mapping = mapping.field("format", DATE_FORMAT); } mapping = mapping.endObject().endObject().endObject().endObject(); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(Strings.toString(mapping))); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(Strings.toString(mapping))); assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", BytesReference.bytes(XContentFactory.jsonBuilder() @@ -142,14 +142,14 @@ public void doTestDefaults(String type) throws Exception { @Override protected void doTestNotIndexed(String type) throws Exception { - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type).field("index", false); if (type.equals("date_range")) { mapping = mapping.field("format", DATE_FORMAT); } mapping = mapping.endObject().endObject().endObject().endObject(); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(Strings.toString(mapping))); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(Strings.toString(mapping))); assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", BytesReference.bytes(XContentFactory.jsonBuilder() @@ -167,13 +167,13 @@ protected void doTestNotIndexed(String type) throws Exception { @Override protected void doTestNoDocValues(String type) throws Exception { - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type).field("doc_values", false); if (type.equals("date_range")) { mapping = mapping.field("format", DATE_FORMAT); } mapping = mapping.endObject().endObject().endObject().endObject(); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(Strings.toString(mapping))); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(Strings.toString(mapping))); assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", BytesReference.bytes(XContentFactory.jsonBuilder() @@ -193,13 +193,13 @@ protected void doTestNoDocValues(String type) throws Exception { @Override protected void doTestStore(String type) throws Exception { - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type).field("store", true); if (type.equals("date_range")) { mapping = mapping.field("format", DATE_FORMAT); } mapping = mapping.endObject().endObject().endObject().endObject(); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(Strings.toString(mapping))); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(Strings.toString(mapping))); assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", BytesReference.bytes(XContentFactory.jsonBuilder() @@ -231,13 +231,13 @@ protected void doTestStore(String type) throws Exception { @Override public void doTestCoerce(String type) throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type); if (type.equals("date_range")) { mapping = mapping.field("format", DATE_FORMAT); } mapping = mapping.endObject().endObject().endObject().endObject(); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(Strings.toString(mapping))); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(Strings.toString(mapping))); assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); @@ -260,9 +260,9 @@ public void doTestCoerce(String type) throws IOException { // date_range ignores the coerce parameter and epoch_millis date format truncates floats (see issue: #14641) if (type.equals("date_range") == false) { - mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field") + mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties").startObject("field") .field("type", type).field("coerce", false).endObject().endObject().endObject().endObject(); - DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(Strings.toString(mapping))); + DocumentMapper mapper2 = parser.parse("_doc", new CompressedXContent(Strings.toString(mapping))); assertEquals(Strings.toString(mapping), mapper2.mappingSource().toString()); @@ -279,11 +279,11 @@ public void doTestCoerce(String type) throws IOException { @Override protected void doTestDecimalCoerce(String type) throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type); mapping = mapping.endObject().endObject().endObject().endObject(); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(Strings.toString(mapping))); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(Strings.toString(mapping))); assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); @@ -313,14 +313,14 @@ protected void doTestDecimalCoerce(String type) throws IOException { @Override protected void doTestNullValue(String type) throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type).field("store", true); if (type.equals("date_range")) { mapping = mapping.field("format", DATE_FORMAT); } mapping = mapping.endObject().endObject().endObject().endObject(); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(Strings.toString(mapping))); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(Strings.toString(mapping))); assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); // test null value for min and max @@ -383,14 +383,14 @@ public void testNoBounds() throws Exception { } public void doTestNoBounds(String type) throws IOException { - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", type).field("store", true); if (type.equals("date_range")) { mapping = mapping.field("format", DATE_FORMAT); } mapping = mapping.endObject().endObject().endObject().endObject(); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(Strings.toString(mapping))); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(Strings.toString(mapping))); assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); // test no bounds specified diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java index 0b805eb726646..8946d7a235420 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java @@ -32,99 +32,99 @@ public class RootObjectMapperTests extends ESSingleNodeTestCase { public void testNumericDetection() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .field("numeric_detection", false) .endObject() .endObject()); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + DocumentMapper mapper = mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // update with a different explicit value String mapping2 = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .field("numeric_detection", true) .endObject() .endObject()); - mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); + mapper = mapperService.merge("_doc", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping2, mapper.mappingSource().toString()); // update with an implicit value: no change String mapping3 = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .endObject() .endObject()); - mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); + mapper = mapperService.merge("_doc", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping2, mapper.mappingSource().toString()); } public void testDateDetection() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .field("date_detection", true) .endObject() .endObject()); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + DocumentMapper mapper = mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // update with a different explicit value String mapping2 = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .field("date_detection", false) .endObject() .endObject()); - mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); + mapper = mapperService.merge("_doc", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping2, mapper.mappingSource().toString()); // update with an implicit value: no change String mapping3 = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .endObject() .endObject()); - mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); + mapper = mapperService.merge("_doc", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping2, mapper.mappingSource().toString()); } public void testDateFormatters() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .field("dynamic_date_formats", Arrays.asList("yyyy-MM-dd")) .endObject() .endObject()); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + DocumentMapper mapper = mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // no update if formatters are not set explicitly String mapping2 = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .endObject() .endObject()); - mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); + mapper = mapperService.merge("_doc", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); String mapping3 = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .field("dynamic_date_formats", Arrays.asList()) .endObject() .endObject()); - mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); + mapper = mapperService.merge("_doc", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping3, mapper.mappingSource().toString()); } public void testDynamicTemplates() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .startArray("dynamic_templates") .startObject() .startObject("my_template") @@ -138,25 +138,25 @@ public void testDynamicTemplates() throws Exception { .endObject() .endObject()); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + DocumentMapper mapper = mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // no update if templates are not set explicitly String mapping2 = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .endObject() .endObject()); - mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); + mapper = mapperService.merge("_doc", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); String mapping3 = Strings.toString(XContentFactory.jsonBuilder() .startObject() - .startObject("type") + .startObject("_doc") .field("dynamic_templates", Arrays.asList()) .endObject() .endObject()); - mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); + mapper = mapperService.merge("_doc", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping3, mapper.mappingSource().toString()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index cbfbb4a05c973..25c59fb26ddb1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -121,13 +121,13 @@ public void testExcludes() throws Exception { } private void assertConflicts(String mapping1, String mapping2, DocumentMapperParser parser, String... conflicts) throws IOException { - DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping1)); - docMapper = parser.parse("type", docMapper.mappingSource()); + DocumentMapper docMapper = parser.parse("_doc", new CompressedXContent(mapping1)); + docMapper = parser.parse("_doc", docMapper.mappingSource()); if (conflicts.length == 0) { - docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping()); + docMapper.merge(parser.parse("_doc", new CompressedXContent(mapping2)).mapping()); } else { try { - docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping()); + docMapper.merge(parser.parse("_doc", new CompressedXContent(mapping2)).mapping()); fail(); } catch (IllegalArgumentException e) { for (String conflict : conflicts) { @@ -140,14 +140,14 @@ private void assertConflicts(String mapping1, String mapping2, DocumentMapperPar public void testEnabledNotUpdateable() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); // using default of true - String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc").endObject().endObject()); + String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("_source").field("enabled", false).endObject() .endObject().endObject()); assertConflicts(mapping1, mapping2, parser, "Cannot update enabled setting for [_source]"); // not changing is ok - String mapping3 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping3 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("_source").field("enabled", true).endObject() .endObject().endObject()); assertConflicts(mapping1, mapping3, parser); @@ -155,14 +155,14 @@ public void testEnabledNotUpdateable() throws Exception { public void testIncludesNotUpdateable() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); - String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc").endObject().endObject()); + String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("_source").array("includes", "foo.*").endObject() .endObject().endObject()); assertConflicts(defaultMapping, mapping1, parser, "Cannot update includes setting for [_source]"); assertConflicts(mapping1, defaultMapping, parser, "Cannot update includes setting for [_source]"); - String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("_source").array("includes", "foo.*", "bar.*").endObject() .endObject().endObject()); assertConflicts(mapping1, mapping2, parser, "Cannot update includes setting for [_source]"); @@ -173,14 +173,14 @@ public void testIncludesNotUpdateable() throws Exception { public void testExcludesNotUpdateable() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); - String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc").endObject().endObject()); + String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("_source").array("excludes", "foo.*").endObject() .endObject().endObject()); assertConflicts(defaultMapping, mapping1, parser, "Cannot update excludes setting for [_source]"); assertConflicts(mapping1, defaultMapping, parser, "Cannot update excludes setting for [_source]"); - String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("_source").array("excludes", "foo.*", "bar.*").endObject() .endObject().endObject()); assertConflicts(mapping1, mapping2, parser, "Cannot update excludes setting for [_source]"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 539d8bbdeaf59..6d73ed8cfb1bc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -106,11 +106,11 @@ protected Collection> getPlugins() { } public void testDefaults() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "text").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -138,11 +138,11 @@ public void testDefaults() throws IOException { } public void testEnableStore() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "text").field("store", true).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -159,11 +159,11 @@ public void testEnableStore() throws IOException { } public void testDisableIndex() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "text").field("index", false).endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -179,14 +179,14 @@ public void testDisableIndex() throws IOException { } public void testDisableNorms() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", "text") .field("norms", false) .endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); @@ -234,11 +234,11 @@ public void testIndexOptions() throws IOException { } public void testDefaultPositionIncrementGap() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "text").endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = indexService.mapperService().merge("type", + DocumentMapper mapper = indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); @@ -273,14 +273,14 @@ public void testDefaultPositionIncrementGap() throws IOException { public void testPositionIncrementGap() throws IOException { final int positionIncrementGap = randomIntBetween(1, 1000); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", "text") .field("position_increment_gap", positionIncrementGap) .endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = indexService.mapperService().merge("type", + DocumentMapper mapper = indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); @@ -314,7 +314,7 @@ public void testPositionIncrementGap() throws IOException { } public void testSearchAnalyzerSerialization() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", "text") @@ -323,11 +323,11 @@ public void testSearchAnalyzerSerialization() throws IOException { .endObject() .endObject().endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); // special case: default index analyzer - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", "text") @@ -336,10 +336,10 @@ public void testSearchAnalyzerSerialization() throws IOException { .endObject() .endObject().endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", "text") @@ -347,11 +347,11 @@ public void testSearchAnalyzerSerialization() throws IOException { .endObject() .endObject().endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); // special case: default search analyzer - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", "text") @@ -360,17 +360,17 @@ public void testSearchAnalyzerSerialization() throws IOException { .endObject() .endObject().endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", "text") .field("analyzer", "keyword") .endObject() .endObject().endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); @@ -384,7 +384,7 @@ public void testSearchAnalyzerSerialization() throws IOException { } public void testSearchQuoteAnalyzerSerialization() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", "text") @@ -394,11 +394,11 @@ public void testSearchQuoteAnalyzerSerialization() throws IOException { .endObject() .endObject().endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); // special case: default index/search analyzer - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", "text") @@ -408,7 +408,7 @@ public void testSearchQuoteAnalyzerSerialization() throws IOException { .endObject() .endObject().endObject().endObject()); - mapper = parser.parse("type", new CompressedXContent(mapping)); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); } @@ -488,14 +488,14 @@ public void testTermVectors() throws IOException { } public void testEagerGlobalOrdinals() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", "text") .field("eager_global_ordinals", true) .endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); FieldMapper fieldMapper = (FieldMapper) mapper.mappers().getMapper("field"); @@ -503,13 +503,13 @@ public void testEagerGlobalOrdinals() throws IOException { } public void testFielddata() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", "text") .endObject().endObject() .endObject().endObject()); - DocumentMapper disabledMapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper disabledMapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, disabledMapper.mappingSource().toString()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { FieldMapper fieldMapper = (FieldMapper) disabledMapper.mappers().getMapper("field"); @@ -517,14 +517,14 @@ public void testFielddata() throws IOException { }); assertThat(e.getMessage(), containsString("Fielddata is disabled")); - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", "text") .field("fielddata", true) .endObject().endObject() .endObject().endObject()); - DocumentMapper enabledMapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper enabledMapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, enabledMapper.mappingSource().toString()); @@ -544,7 +544,7 @@ public void testFielddata() throws IOException { } public void testFrequencyFilter() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", "text") .field("fielddata", true) @@ -555,7 +555,7 @@ public void testFrequencyFilter() throws IOException { .endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); TextFieldMapper fieldMapper = (TextFieldMapper) mapper.mappers().getMapper("field"); @@ -778,7 +778,7 @@ public void testFastPhraseMapping() throws IOException { throw new UnsupportedOperationException(); }, null); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties") .startObject("field") .field("type", "text") @@ -793,10 +793,10 @@ public void testFastPhraseMapping() throws IOException { .endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - queryShardContext.getMapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + queryShardContext.getMapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); Query q = new MatchPhraseQueryBuilder("field", "two words").toQuery(queryShardContext); assertThat(q, is(new PhraseQuery("field._index_phrase", "two words"))); @@ -862,7 +862,7 @@ protected TokenStreamComponents createComponents(String fieldName) { } { - String badConfigMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String badConfigMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", "text") .field("index", "false") @@ -870,13 +870,13 @@ protected TokenStreamComponents createComponents(String fieldName) { .endObject().endObject() .endObject().endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> parser.parse("type", new CompressedXContent(badConfigMapping)) + () -> parser.parse("_doc", new CompressedXContent(badConfigMapping)) ); assertThat(e.getMessage(), containsString("Cannot set index_phrases on unindexed field [field]")); } { - String badConfigMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String badConfigMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", "text") .field("index_options", "freqs") @@ -884,7 +884,7 @@ protected TokenStreamComponents createComponents(String fieldName) { .endObject().endObject() .endObject().endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> parser.parse("type", new CompressedXContent(badConfigMapping)) + () -> parser.parse("_doc", new CompressedXContent(badConfigMapping)) ); assertThat(e.getMessage(), containsString("Cannot set index_phrases on field [field] if positions are not enabled")); } @@ -893,7 +893,7 @@ protected TokenStreamComponents createComponents(String fieldName) { public void testIndexPrefixMapping() throws IOException { { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field") .field("type", "text") .field("analyzer", "standard") @@ -904,7 +904,7 @@ public void testIndexPrefixMapping() throws IOException { .endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); assertThat(mapper.mappers().getMapper("field._index_prefix").toString(), containsString("prefixChars=2:10")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java index 1ef68cf42d166..689dfbdcc872d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java @@ -75,7 +75,7 @@ public static void testDocValues(Function createIndex) thr assertTrue(values.advanceExact(0)); assertEquals(0, values.nextOrd()); assertEquals(SortedSetDocValues.NO_MORE_ORDS, values.nextOrd()); - assertEquals(new BytesRef("type"), values.lookupOrd(0)); + assertEquals(new BytesRef("_doc"), values.lookupOrd(0)); r.close(); dir.close(); } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java b/server/src/test/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java index 03842d11b1a40..1d1455348e051 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java @@ -42,6 +42,7 @@ protected boolean forbidPrivateIndexSettings() { return false; } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/48701") public void testCanRecoverFromStoreWithoutPeerRecoveryRetentionLease() throws Exception { /* * In a full cluster restart from a version without peer-recovery retention leases, the leases on disk will not include a lease for diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java index b3cee099984c8..fa1c070b28277 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -56,6 +56,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MergePolicyConfig; @@ -155,8 +156,8 @@ public void testCorruptIndex() throws Exception { fail("expected the command to fail as node is locked"); } catch (Exception e) { assertThat(e.getMessage(), - allOf(containsString("Failed to lock node's directory"), - containsString("is Elasticsearch still running ?"))); + allOf(containsString("failed to lock node's directory"), + containsString("is Elasticsearch still running?"))); } final Path indexDir = getPathToShardData(indexName, ShardPath.INDEX_FOLDER_NAME); @@ -478,6 +479,9 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { final Settings node1PathSettings = internalCluster().dataPathSettings(node1); final Settings node2PathSettings = internalCluster().dataPathSettings(node2); + assertBusy(() -> internalCluster().getInstances(GatewayMetaState.class) + .forEach(gw -> assertTrue(gw.allPendingAsyncStatesWritten()))); + // stop data nodes internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode(); @@ -573,8 +577,8 @@ public void testResolvePath() throws Exception { for (String nodeName : nodeNames) { final Path indexPath = indexPathByNodeName.get(nodeName); final OptionSet options = parser.parse("--dir", indexPath.toAbsolutePath().toString()); - command.findAndProcessShardPath(options, environmentByNodeName.get(nodeName), - shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath))); + command.findAndProcessShardPath(options, environmentByNodeName.get(nodeName), environmentByNodeName.get(nodeName).dataFiles(), + state, shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath))); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java index df7423fc8577e..c16e0262b3a3b 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -30,19 +30,26 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.engine.EngineException; @@ -60,6 +67,7 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; +import java.util.Objects; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -80,7 +88,9 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase { private Environment environment; private ShardPath shardPath; private IndexMetaData indexMetaData; + private ClusterState clusterState; private IndexShard indexShard; + private Path[] dataPaths; private Path translogPath; private Path indexPath; @@ -89,7 +99,7 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase { @Before public void setup() throws IOException { - shardId = new ShardId("index0", "_na_", 0); + shardId = new ShardId("index0", UUIDs.randomBase64UUID(), 0); final String nodeId = randomAlphaOfLength(10); routing = TestShardRouting.newShardRouting(shardId, nodeId, true, ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); @@ -103,11 +113,13 @@ public void setup() throws IOException { // create same directory structure as prod does Files.createDirectories(dataDir); + dataPaths = new Path[] {dataDir}; final Settings settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_INDEX_UUID, shardId.getIndex().getUUID()) .build(); final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(dataDir); @@ -135,6 +147,17 @@ public void setup() throws IOException { .putMapping("{ \"properties\": {} }"); indexMetaData = metaData.build(); + clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(MetaData.builder().put(indexMetaData, false).build()).build(); + + try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(logger, environment, Files::exists)) { + final Path[] dataPaths = Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new); + try (PersistedClusterStateService.Writer writer = new PersistedClusterStateService(dataPaths, nodeId, + xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L, true).createWriter()) { + writer.writeFullStateAndCommit(1L, clusterState); + } + } + indexShard = newStartedShard(p -> newShard(routing, shardPath, indexMetaData, null, null, new InternalEngineFactory(), () -> { }, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER), true); @@ -355,7 +378,6 @@ public void testResolveIndexDirectory() throws Exception { // index a single doc to have files on a disk indexDoc(indexShard, "_doc", "0", "{}"); flushShard(indexShard, true); - writeIndexState(); // close shard closeShards(indexShard); @@ -367,11 +389,11 @@ public void testResolveIndexDirectory() throws Exception { final OptionSet options = parser.parse("--index", shardId.getIndex().getName(), "--shard-id", Integer.toString(shardId.id())); - command.findAndProcessShardPath(options, environment, + command.findAndProcessShardPath(options, environment, dataPaths, clusterState, shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath))); final OptionSet options2 = parser.parse("--dir", indexPath.toAbsolutePath().toString()); - command.findAndProcessShardPath(options2, environment, + command.findAndProcessShardPath(options2, environment, dataPaths, clusterState, shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath))); } @@ -509,17 +531,7 @@ private int indexDocs(IndexShard indexShard, boolean flushLast) throws IOExcepti logger.info("--> indexed {} docs, {} to keep", numDocs, numDocsToKeep); - writeIndexState(); return numDocsToKeep; } - private void writeIndexState() throws IOException { - // create _state of IndexMetaData - try(NodeEnvironment nodeEnvironment = new NodeEnvironment(environment.settings(), environment)) { - final Path[] paths = nodeEnvironment.indexPaths(indexMetaData.getIndex()); - IndexMetaData.FORMAT.writeAndCleanup(indexMetaData, paths); - logger.info("--> index metadata persisted to {} ", Arrays.toString(paths)); - } - } - } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java index 342ac746b9d31..11684445dcee8 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java @@ -52,14 +52,11 @@ public void testCurrentHeaderVersion() throws Exception { } final TranslogCorruptedException mismatchUUID = expectThrows(TranslogCorruptedException.class, () -> { try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { - TranslogHeader.read(UUIDs.randomBase64UUID(), translogFile, channel); + TranslogHeader.read(randomValueOtherThan(translogUUID, UUIDs::randomBase64UUID), translogFile, channel); } }); assertThat(mismatchUUID.getMessage(), containsString("this translog file belongs to a different translog")); - int corruptions = between(1, 10); - for (int i = 0; i < corruptions && Files.size(translogFile) > 0; i++) { - TestTranslog.corruptFile(logger, random(), translogFile, false); - } + TestTranslog.corruptFile(logger, random(), translogFile, false); final TranslogCorruptedException corruption = expectThrows(TranslogCorruptedException.class, () -> { try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { TranslogHeader.read(randomBoolean() ? outHeader.getTranslogUUID() : UUIDs.randomBase64UUID(), translogFile, channel); diff --git a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 5a4fc6e07a873..9d7a77583f48d 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -432,19 +432,18 @@ protected long getShardWritingBytes(IndexShard shard) { } }; int iterations = randomIntBetween(10, 100); + ThreadPoolStats.Stats beforeStats = getRefreshThreadPoolStats(); for (int i = 0; i < iterations; i++) { controller.forceCheck(); } assertBusy(() -> { ThreadPoolStats.Stats stats = getRefreshThreadPoolStats(); - assertThat(stats.getQueue(), equalTo(0)); - assertThat(stats.getActive(), equalTo(1)); + assertThat(stats.getCompleted(), equalTo(beforeStats.getCompleted() + iterations - 1)); }); refreshLatch.get().countDown(); // allow refresh assertBusy(() -> { ThreadPoolStats.Stats stats = getRefreshThreadPoolStats(); - assertThat(stats.getQueue(), equalTo(0)); - assertThat(stats.getActive(), equalTo(0)); + assertThat(stats.getCompleted(), equalTo(beforeStats.getCompleted() + iterations)); }); assertThat(shard.refreshStats().getTotal(), equalTo(refreshStats.getTotal() + 1)); closeShards(shard); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 436126930e3d1..b3fdd53ee8f15 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -122,7 +122,7 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem }; indicesService.removeIndex(idx, DELETED, "simon says"); try { - IndexService index = indicesService.createIndex(metaData, Arrays.asList(countingListener)); + IndexService index = indicesService.createIndex(metaData, Arrays.asList(countingListener), false); assertEquals(3, counter.get()); idx = index.index(); ShardRouting newRouting = shardRouting; diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index bd35f0f1783ca..ea41f14a53af3 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -223,13 +223,11 @@ public void testDeleteIndexStore() throws Exception { ClusterService clusterService = getInstanceFromNode(ClusterService.class); IndexMetaData firstMetaData = clusterService.state().metaData().index("test"); assertTrue(test.hasShard(0)); + ShardPath firstPath = ShardPath.loadShardPath(logger, getNodeEnvironment(), new ShardId(test.index(), 0), + test.getIndexSettings().customDataPath()); - try { - indicesService.deleteIndexStore("boom", firstMetaData, clusterService.state()); - fail(); - } catch (IllegalStateException ex) { - // all good - } + expectThrows(IllegalStateException.class, () -> indicesService.deleteIndexStore("boom", firstMetaData)); + assertTrue(firstPath.exists()); GatewayMetaState gwMetaState = getInstanceFromNode(GatewayMetaState.class); MetaData meta = gwMetaState.getMetaData(); @@ -237,37 +235,25 @@ public void testDeleteIndexStore() throws Exception { assertNotNull(meta.index("test")); assertAcked(client().admin().indices().prepareDelete("test")); + assertFalse(firstPath.exists()); + meta = gwMetaState.getMetaData(); assertNotNull(meta); assertNull(meta.index("test")); - test = createIndex("test"); client().prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().admin().indices().prepareFlush("test").get(); assertHitCount(client().prepareSearch("test").get(), 1); IndexMetaData secondMetaData = clusterService.state().metaData().index("test"); assertAcked(client().admin().indices().prepareClose("test")); - ShardPath path = ShardPath.loadShardPath(logger, getNodeEnvironment(), new ShardId(test.index(), 0), + ShardPath secondPath = ShardPath.loadShardPath(logger, getNodeEnvironment(), new ShardId(test.index(), 0), test.getIndexSettings().customDataPath()); - assertTrue(path.exists()); + assertTrue(secondPath.exists()); - try { - indicesService.deleteIndexStore("boom", secondMetaData, clusterService.state()); - fail(); - } catch (IllegalStateException ex) { - // all good - } - - assertTrue(path.exists()); + expectThrows(IllegalStateException.class, () -> indicesService.deleteIndexStore("boom", secondMetaData)); + assertTrue(secondPath.exists()); - // now delete the old one and make sure we resolve against the name - try { - indicesService.deleteIndexStore("boom", firstMetaData, clusterService.state()); - fail(); - } catch (IllegalStateException ex) { - // all good - } assertAcked(client().admin().indices().prepareOpen("test")); ensureGreen("test"); } @@ -563,7 +549,7 @@ public void testGetEngineFactory() throws IOException { .numberOfShards(1) .numberOfReplicas(0) .build(); - final IndexService indexService = indicesService.createIndex(indexMetaData, Collections.emptyList()); + final IndexService indexService = indicesService.createIndex(indexMetaData, Collections.emptyList(), false); if (value != null && value) { assertThat(indexService.getEngineFactory(), instanceOf(FooEnginePlugin.FooEngineFactory.class)); } else { @@ -589,7 +575,7 @@ public void testConflictingEngineFactories() { final IndicesService indicesService = getIndicesService(); final IllegalStateException e = - expectThrows(IllegalStateException.class, () -> indicesService.createIndex(indexMetaData, Collections.emptyList())); + expectThrows(IllegalStateException.class, () -> indicesService.createIndex(indexMetaData, Collections.emptyList(), false)); final String pattern = ".*multiple engine factories provided for \\[foobar/.*\\]: \\[.*FooEngineFactory\\],\\[.*BarEngineFactory\\].*"; assertThat(e, hasToString(new RegexMatcher(pattern))); diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index 675305a17cce6..3c8455224e646 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -228,14 +228,13 @@ public void testUnderscoreInAnalyzerName() throws IOException { } } - public void testStandardFilterBWC() throws IOException { + public void testStandardFilterBWC() { Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.CURRENT); final Settings settings = Settings.builder().put("index.analysis.analyzer.my_standard.tokenizer", "standard") .put("index.analysis.analyzer.my_standard.filter", "standard") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).put(IndexMetaData.SETTING_VERSION_CREATED, version) .build(); - IndexAnalyzers analyzers = getIndexAnalyzers(settings); - IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> analyzers.get("my_standard").tokenStream("", "")); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> getIndexAnalyzers(settings)); assertThat(exc.getMessage(), equalTo("The [standard] token filter has been removed.")); } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 80cf443e5007e..d7b94e26eff9e 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -195,7 +195,8 @@ protected class MockIndicesService implements AllocatedIndices buildInIndexListener) throws IOException { + List buildInIndexListener, + boolean writeDanglingIndices) throws IOException { MockIndexService indexService = new MockIndexService(new IndexSettings(indexMetaData, Settings.EMPTY)); indices = Maps.copyMapWithAddedEntry(indices, indexMetaData.getIndexUUID(), indexService); return indexService; diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 74b94ad3a16b4..901409739c1d6 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -110,6 +110,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.junit.Assert.assertThat; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyList; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doAnswer; @@ -163,7 +164,7 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th // MetaDataCreateIndexService creates indices using its IndicesService instance to check mappings -> fake it here try { @SuppressWarnings("unchecked") final List listeners = anyList(); - when(indicesService.createIndex(any(IndexMetaData.class), listeners)) + when(indicesService.createIndex(any(IndexMetaData.class), listeners, anyBoolean())) .then(invocationOnMock -> { IndexService indexService = mock(IndexService.class); IndexMetaData indexMetaData = (IndexMetaData)invocationOnMock.getArguments()[0]; diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 71af6ac7f040f..4fc8f4395e56c 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -514,7 +514,6 @@ private IndicesClusterStateService createIndicesClusterStateService(DiscoveryNod null, null, null, - null, primaryReplicaSyncer, RetentionLeaseSyncer.EMPTY, client) { diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index 5b4b80aab8c57..ed442fd9cb51a 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -18,63 +18,34 @@ */ package org.elasticsearch.indices.flush; -import org.apache.lucene.index.Term; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; -import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.InternalEngine; -import org.elasticsearch.index.engine.InternalEngineTests; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardTestCase; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexingMemoryController; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalTestCluster; -import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class FlushIT extends ESIntegTestCase { @@ -136,253 +107,6 @@ public void testRejectIllegalFlushParameters() { .actionGet().getShardFailures(), emptyArray()); } - public void testSyncedFlush() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(2); - prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)).get(); - ensureGreen(); - - final Index index = client().admin().cluster().prepareState().get().getState().metaData().index("test").getIndex(); - - IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - - ShardsSyncedFlushResult result; - if (randomBoolean()) { - logger.info("--> sync flushing shard 0"); - result = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), new ShardId(index, 0)); - } else { - logger.info("--> sync flushing index [test]"); - SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get(); - result = indicesResult.getShardsResultPerIndex().get("test").get(0); - } - assertFalse(result.failed()); - assertThat(result.totalShards(), equalTo(indexStats.getShards().length)); - assertThat(result.successfulShards(), equalTo(indexStats.getShards().length)); - - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - String syncId = result.syncId(); - for (ShardStats shardStats : indexStats.getShards()) { - final String shardSyncId = shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID); - assertThat(shardSyncId, equalTo(syncId)); - } - - // now, start new node and relocate a shard there and see if sync id still there - String newNodeName = internalCluster().startNode(); - ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - ShardRouting shardRouting = clusterState.getRoutingTable().index("test").shard(0).iterator().next(); - String currentNodeName = clusterState.nodes().resolveNode(shardRouting.currentNodeId()).getName(); - assertFalse(currentNodeName.equals(newNodeName)); - internalCluster().client().admin().cluster().prepareReroute() - .add(new MoveAllocationCommand("test", 0, currentNodeName, newNodeName)).get(); - - client().admin().cluster().prepareHealth() - .setWaitForNoRelocatingShards(true) - .get(); - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build()).get(); - ensureGreen("test"); - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, internalCluster().numDataNodes() - 1).build()).get(); - ensureGreen("test"); - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - } - - public void testSyncedFlushWithConcurrentIndexing() throws Exception { - - internalCluster().ensureAtLeastNumDataNodes(3); - createIndex("test"); - - client().admin().indices().prepareUpdateSettings("test").setSettings( - Settings.builder() - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) - .put("index.refresh_interval", -1) - .put("index.number_of_replicas", internalCluster().numDataNodes() - 1)) - .get(); - ensureGreen(); - final AtomicBoolean stop = new AtomicBoolean(false); - final AtomicInteger numDocs = new AtomicInteger(0); - Thread indexingThread = new Thread() { - @Override - public void run() { - while (stop.get() == false) { - client().prepareIndex().setIndex("test").setSource("{}", XContentType.JSON).get(); - numDocs.incrementAndGet(); - } - } - }; - indexingThread.start(); - - IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - logger.info("--> trying sync flush"); - SyncedFlushResponse syncedFlushResult = client().admin().indices().prepareSyncedFlush("test").get(); - logger.info("--> sync flush done"); - stop.set(true); - indexingThread.join(); - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - assertFlushResponseEqualsShardStats(indexStats.getShards(), syncedFlushResult.getShardsResultPerIndex().get("test")); - refresh(); - assertThat(client().prepareSearch().setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs.get())); - logger.info("indexed {} docs", client().prepareSearch().setSize(0).get().getHits().getTotalHits().value); - logClusterState(); - internalCluster().fullRestart(); - ensureGreen(); - assertThat(client().prepareSearch().setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs.get())); - } - - private void assertFlushResponseEqualsShardStats(ShardStats[] shardsStats, List syncedFlushResults) { - - for (final ShardStats shardStats : shardsStats) { - for (final ShardsSyncedFlushResult shardResult : syncedFlushResults) { - if (shardStats.getShardRouting().getId() == shardResult.shardId().getId()) { - for (Map.Entry singleResponse : - shardResult.shardResponses().entrySet()) { - if (singleResponse.getKey().currentNodeId().equals(shardStats.getShardRouting().currentNodeId())) { - if (singleResponse.getValue().success()) { - logger.info("{} sync flushed on node {}", singleResponse.getKey().shardId(), - singleResponse.getKey().currentNodeId()); - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } else { - logger.info("{} sync flush failed for on node {}", singleResponse.getKey().shardId(), - singleResponse.getKey().currentNodeId()); - assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - } - } - } - } - } - } - - public void testUnallocatedShardsDoesNotHang() throws InterruptedException { - // create an index but disallow allocation - prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE).setSettings(Settings.builder() - .put("index.routing.allocation.include._name", "nonexistent")).get(); - - // this should not hang but instead immediately return with empty result set - List shardsResult = client().admin().indices().prepareSyncedFlush("test").get() - .getShardsResultPerIndex().get("test"); - // just to make sure the test actually tests the right thing - int numShards = client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test") - .getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1); - assertThat(shardsResult.size(), equalTo(numShards)); - assertThat(shardsResult.get(0).failureReason(), equalTo("no active shards")); - } - - private void indexDoc(Engine engine, String id) throws IOException { - final ParsedDocument doc = InternalEngineTests.createParsedDoc(id, null); - final Engine.IndexResult indexResult = engine.index(new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc, - ((InternalEngine) engine).getProcessedLocalCheckpoint() + 1, 1L, 1L, null, Engine.Operation.Origin.REPLICA, System.nanoTime(), - -1L, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); - assertThat(indexResult.getFailure(), nullValue()); - engine.syncTranslog(); - } - - public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); - final int numberOfReplicas = internalCluster().numDataNodes() - 1; - assertAcked( - prepareCreate("test").setSettings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)).get() - ); - ensureGreen(); - final Index index = clusterService().state().metaData().index("test").getIndex(); - final ShardId shardId = new ShardId(index, 0); - final int numDocs = between(1, 10); - for (int i = 0; i < numDocs; i++) { - indexDoc("test", Integer.toString(i)); - } - final List indexShards = internalCluster().nodesInclude("test").stream() - .map(node -> internalCluster().getInstance(IndicesService.class, node).getShardOrNull(shardId)) - .collect(Collectors.toList()); - // Index extra documents to one replica - synced-flush should fail on that replica. - final IndexShard outOfSyncReplica = randomValueOtherThanMany(s -> s.routingEntry().primary(), () -> randomFrom(indexShards)); - final int extraDocs = between(1, 10); - for (int i = 0; i < extraDocs; i++) { - indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i); - } - final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1)); - assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas)); - assertThat(partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason, equalTo( - "ongoing indexing operations: num docs on replica [" + (numDocs + extraDocs) + "]; num docs on primary [" + numDocs + "]")); - // Index extra documents to all shards - synced-flush should be ok. - for (IndexShard indexShard : indexShards) { - // Do reindex documents to the out of sync replica to avoid trigger merges - if (indexShard != outOfSyncReplica) { - for (int i = 0; i < extraDocs; i++) { - indexDoc(IndexShardTestCase.getEngine(indexShard), "extra_" + i); - } - } - } - final ShardsSyncedFlushResult fullResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(fullResult.totalShards(), equalTo(numberOfReplicas + 1)); - assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1)); - } - - public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); - final int numberOfReplicas = internalCluster().numDataNodes() - 1; - assertAcked( - prepareCreate("test").setSettings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)).get() - ); - ensureGreen(); - final Index index = clusterService().state().metaData().index("test").getIndex(); - final ShardId shardId = new ShardId(index, 0); - final int numDocs = between(1, 10); - for (int i = 0; i < numDocs; i++) { - indexDoc("test", Integer.toString(i)); - } - final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1)); - // Do not renew synced-flush - final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1)); - assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId())); - // Shards were updated, renew synced flush. - final int moreDocs = between(1, 10); - for (int i = 0; i < moreDocs; i++) { - indexDoc("test", "more-" + i); - } - final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1)); - assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId()))); - // Manually remove or change sync-id, renew synced flush. - IndexShard shard = internalCluster().getInstance(IndicesService.class, randomFrom(internalCluster().nodesInclude("test"))) - .getShardOrNull(shardId); - if (randomBoolean()) { - // Change the existing sync-id of a single shard. - shard.syncFlush(UUIDs.randomBase64UUID(random()), shard.commitStats().getRawCommitId()); - assertThat(shard.commitStats().syncId(), not(equalTo(thirdSeal.syncId()))); - } else { - // Flush will create a new commit without sync-id - shard.flush(new FlushRequest(shardId.getIndexName()).force(true).waitIfOngoing(true)); - assertThat(shard.commitStats().syncId(), nullValue()); - } - final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1)); - assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId()))); - } - public void testFlushOnInactive() throws Exception { final String indexName = "flush_on_inactive"; List dataNodes = internalCluster().startDataOnlyNodes(2, Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java deleted file mode 100644 index d695e5e612de1..0000000000000 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.indices.flush; - -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardNotFoundException; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; - -public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { - - public void testModificationPreventsFlushing() throws InterruptedException { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - final ClusterState state = getInstanceFromNode(ClusterService.class).state(); - final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state); - final List activeShards = shardRoutingTable.activeShards(); - assertEquals("exactly one active shard", 1, activeShards.size()); - Map preSyncedResponses = - SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId); - assertEquals("exactly one commit id", 1, preSyncedResponses.size()); - client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); - String syncId = UUIDs.randomBase64UUID(); - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - assertEquals(syncId, syncedFlushResult.syncId()); - assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0))); - assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success()); - assertEquals("pending operations", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason()); - - // pull another commit and make sure we can't sync-flush with the old one - SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId); - listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener); - listener.latch.await(); - assertNull(listener.error); - syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - assertEquals(syncId, syncedFlushResult.syncId()); - assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0))); - assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success()); - assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason()); - } - - public void testSingleShardSuccess() throws InterruptedException { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.attemptSyncedFlush(shardId, listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(1, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - SyncedFlushService.ShardSyncedFlushResponse response = syncedFlushResult.shardResponses().values().iterator().next(); - assertTrue(response.success()); - } - - public void testSyncFailsIfOperationIsInFlight() throws Exception { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - // wait for the GCP sync spawned from the index request above to complete to avoid that request disturbing the check below - assertBusy(() -> { - assertEquals(0, shard.getLastSyncedGlobalCheckpoint()); - assertEquals(0, shard.getActiveOperationsCount()); - }); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - PlainActionFuture fut = new PlainActionFuture<>(); - shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.WRITE, ""); - try (Releasable operationLock = fut.get()) { - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.attemptSyncedFlush(shardId, listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertNotEquals(0, syncedFlushResult.totalShards()); - assertEquals("[1] ongoing operations on primary", syncedFlushResult.failureReason()); - } - } - - public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException { - createIndex("test", Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .build()); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); - assertNotNull(shard); - final ShardId shardId = shard.shardId(); - - final SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener(); - flushService.attemptSyncedFlush(new ShardId(shard.shardId().getIndex(), 1), listener); - listener.latch.await(); - assertNotNull(listener.error); - assertNull(listener.result); - assertEquals(ShardNotFoundException.class, listener.error.getClass()); - assertEquals("no such shard", listener.error.getMessage()); - - assertAcked(client().admin().indices().prepareClose("test")); - listener = new SyncedFlushUtil.LatchedListener(); - flushService.attemptSyncedFlush(shardId, listener); - listener.latch.await(); - assertNotNull(listener.error); - assertNull(listener.result); - assertEquals("closed", listener.error.getMessage()); - - listener = new SyncedFlushUtil.LatchedListener(); - flushService.attemptSyncedFlush(new ShardId("index not found", "_na_", 0), listener); - listener.latch.await(); - assertNotNull(listener.error); - assertNull(listener.result); - assertEquals("no such index [index not found]", listener.error.getMessage()); - } - - public void testFailAfterIntermediateCommit() throws InterruptedException { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - final ClusterState state = getInstanceFromNode(ClusterService.class).state(); - final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state); - final List activeShards = shardRoutingTable.activeShards(); - assertEquals("exactly one active shard", 1, activeShards.size()); - Map preSyncedResponses = - SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId); - assertEquals("exactly one commit id", 1, preSyncedResponses.size()); - if (randomBoolean()) { - client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); - } - client().admin().indices().prepareFlush("test").setForce(true).get(); - String syncId = UUIDs.randomBase64UUID(); - final SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - assertEquals(syncId, syncedFlushResult.syncId()); - assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0))); - assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success()); - assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason()); - } - - public void testFailWhenCommitIsMissing() throws InterruptedException { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - final ClusterState state = getInstanceFromNode(ClusterService.class).state(); - final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state); - final List activeShards = shardRoutingTable.activeShards(); - assertEquals("exactly one active shard", 1, activeShards.size()); - Map preSyncedResponses = - SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId); - assertEquals("exactly one commit id", 1, preSyncedResponses.size()); - preSyncedResponses.clear(); // wipe it... - String syncId = UUIDs.randomBase64UUID(); - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - assertEquals(syncId, syncedFlushResult.syncId()); - assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0))); - assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success()); - assertEquals("no commit id from pre-sync flush", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason()); - } - - -} diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java deleted file mode 100644 index ffb494570a5b9..0000000000000 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.indices.flush; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.test.InternalTestCluster; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicReference; - -import static org.elasticsearch.test.ESTestCase.assertBusy; - -/** Utils for SyncedFlush */ -public class SyncedFlushUtil { - - private SyncedFlushUtil() { - - } - - /** - * Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)} - */ - public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) throws Exception { - /* - * When the last indexing operation is completed, we will fire a global checkpoint sync. - * Since a global checkpoint sync request is a replication request, it will acquire an index - * shard permit on the primary when executing. If this happens at the same time while we are - * issuing the synced-flush, the synced-flush request will fail as it thinks there are - * in-flight operations. We can avoid such situation by continuing issuing another synced-flush - * if the synced-flush failed due to the ongoing operations on the primary. - */ - SyncedFlushService service = cluster.getInstance(SyncedFlushService.class); - AtomicReference> listenerHolder = new AtomicReference<>(); - assertBusy(() -> { - LatchedListener listener = new LatchedListener<>(); - listenerHolder.set(listener); - service.attemptSyncedFlush(shardId, listener); - listener.latch.await(); - if (listener.result != null && listener.result.failureReason() != null - && listener.result.failureReason().contains("ongoing operations on primary")) { - throw new AssertionError(listener.result.failureReason()); // cause the assert busy to retry - } - }); - if (listenerHolder.get().error != null) { - throw ExceptionsHelper.convertToElastic(listenerHolder.get().error); - } - return listenerHolder.get().result; - } - - public static final class LatchedListener implements ActionListener { - public volatile T result; - public volatile Exception error; - public final CountDownLatch latch = new CountDownLatch(1); - - @Override - public void onResponse(T result) { - this.result = result; - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - error = e; - latch.countDown(); - } - } - - /** - * Blocking version of {@link SyncedFlushService#sendPreSyncRequests(List, ClusterState, ShardId, ActionListener)} - */ - public static Map sendPreSyncRequests(SyncedFlushService service, - List activeShards, - ClusterState state, - ShardId shardId) { - LatchedListener> listener = new LatchedListener<>(); - service.sendPreSyncRequests(activeShards, state, shardId, listener); - try { - listener.latch.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - if (listener.error != null) { - throw ExceptionsHelper.convertToElastic(listener.error); - } - return listener.result; - } -} diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java index ba77877f7f67d..5dbb34df24a54 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.indices.recovery; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.InternalTestCluster; @@ -29,16 +30,18 @@ import static org.elasticsearch.cluster.metadata.IndexGraveyard.SETTING_MAX_TOMBSTONES; import static org.elasticsearch.gateway.DanglingIndicesState.AUTO_IMPORT_DANGLING_INDICES_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; @ClusterScope(numDataNodes = 0, scope = ESIntegTestCase.Scope.TEST) public class DanglingIndicesIT extends ESIntegTestCase { private static final String INDEX_NAME = "test-idx-1"; - private Settings buildSettings(boolean importDanglingIndices) { + private Settings buildSettings(boolean writeDanglingIndices, boolean importDanglingIndices) { return Settings.builder() // Don't keep any indices in the graveyard, so that when we delete an index, // it's definitely considered to be dangling. .put(SETTING_MAX_TOMBSTONES.getKey(), 0) + .put(IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING.getKey(), writeDanglingIndices) .put(AUTO_IMPORT_DANGLING_INDICES_SETTING.getKey(), importDanglingIndices) .build(); } @@ -48,10 +51,21 @@ private Settings buildSettings(boolean importDanglingIndices) { * the cluster, so long as the recovery setting is enabled. */ public void testDanglingIndicesAreRecoveredWhenSettingIsEnabled() throws Exception { - final Settings settings = buildSettings(true); + final Settings settings = buildSettings(true, true); internalCluster().startNodes(3, settings); createIndex(INDEX_NAME, Settings.builder().put("number_of_replicas", 2).build()); + ensureGreen(INDEX_NAME); + assertBusy(() -> internalCluster().getInstances(IndicesService.class).forEach( + indicesService -> assertTrue(indicesService.allPendingDanglingIndicesWritten()))); + + boolean refreshIntervalChanged = randomBoolean(); + if (refreshIntervalChanged) { + client().admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings( + Settings.builder().put("index.refresh_interval", "42s").build()).get(); + assertBusy(() -> internalCluster().getInstances(IndicesService.class).forEach( + indicesService -> assertTrue(indicesService.allPendingDanglingIndicesWritten()))); + } if (randomBoolean()) { client().admin().indices().prepareClose(INDEX_NAME).get(); @@ -63,12 +77,17 @@ public void testDanglingIndicesAreRecoveredWhenSettingIsEnabled() throws Excepti @Override public Settings onNodeStopped(String nodeName) throws Exception { + ensureClusterSizeConsistency(); assertAcked(client().admin().indices().prepareDelete(INDEX_NAME)); return super.onNodeStopped(nodeName); } }); assertBusy(() -> assertTrue("Expected dangling index " + INDEX_NAME + " to be recovered", indexExists(INDEX_NAME))); + if (refreshIntervalChanged) { + assertThat(client().admin().indices().prepareGetSettings(INDEX_NAME).get().getSetting(INDEX_NAME, "index.refresh_interval"), + equalTo("42s")); + } ensureGreen(INDEX_NAME); } @@ -77,15 +96,49 @@ public Settings onNodeStopped(String nodeName) throws Exception { * the cluster when the recovery setting is disabled. */ public void testDanglingIndicesAreNotRecoveredWhenSettingIsDisabled() throws Exception { - internalCluster().startNodes(3, buildSettings(false)); + internalCluster().startNodes(3, buildSettings(true, false)); + + createIndex(INDEX_NAME, Settings.builder().put("number_of_replicas", 2).build()); + ensureGreen(INDEX_NAME); + assertBusy(() -> internalCluster().getInstances(IndicesService.class).forEach( + indicesService -> assertTrue(indicesService.allPendingDanglingIndicesWritten()))); + + // Restart node, deleting the index in its absence, so that there is a dangling index to recover + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ensureClusterSizeConsistency(); + assertAcked(client().admin().indices().prepareDelete(INDEX_NAME)); + return super.onNodeStopped(nodeName); + } + }); + + // Since index recovery is async, we can't prove index recovery will never occur, just that it doesn't occur within some reasonable + // amount of time + assertFalse( + "Did not expect dangling index " + INDEX_NAME + " to be recovered", + waitUntil(() -> indexExists(INDEX_NAME), 1, TimeUnit.SECONDS) + ); + } + + /** + * Check that when dangling indices are not written, then they cannot be recovered into the cluster. + */ + public void testDanglingIndicesAreNotRecoveredWhenNotWritten() throws Exception { + internalCluster().startNodes(3, buildSettings(false, true)); createIndex(INDEX_NAME, Settings.builder().put("number_of_replicas", 2).build()); + ensureGreen(INDEX_NAME); + internalCluster().getInstances(IndicesService.class).forEach( + indicesService -> assertTrue(indicesService.allPendingDanglingIndicesWritten())); // Restart node, deleting the index in its absence, so that there is a dangling index to recover internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override public Settings onNodeStopped(String nodeName) throws Exception { + ensureClusterSizeConsistency(); assertAcked(client().admin().indices().prepareDelete(INDEX_NAME)); return super.onNodeStopped(nodeName); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 414df8d648c51..a8b3d82a02989 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -62,6 +62,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.gateway.ReplicaShardAllocatorIT; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; @@ -78,7 +79,6 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; -import org.elasticsearch.indices.flush.SyncedFlushUtil; import org.elasticsearch.indices.recovery.RecoveryState.Stage; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.node.RecoverySettingsChunkSizePlugin; @@ -109,7 +109,6 @@ import java.util.Collection; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Semaphore; @@ -118,7 +117,6 @@ import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.IntStream; -import java.util.stream.Stream; import java.util.stream.StreamSupport; import static java.util.Collections.singletonMap; @@ -329,8 +327,19 @@ public void testCancelNewShardRecoveryAndUsesExistingShardCopy() throws Exceptio final String nodeA = internalCluster().startNode(); logger.info("--> create index on node: {}", nodeA); - createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT) - .getShards()[0].getStats().getStore().size(); + createIndex(INDEX_NAME, Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms").build()); + + int numDocs = randomIntBetween(10, 200); + final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex(INDEX_NAME). + setSource("foo-int", randomInt(), "foo-string", randomAlphaOfLength(32), "foo-float", randomFloat()); + } + indexRandom(randomBoolean(), docs); logger.info("--> start node B"); // force a shard recovery from nodeA to nodeB @@ -346,8 +355,7 @@ public void testCancelNewShardRecoveryAndUsesExistingShardCopy() throws Exceptio logger.info("--> start node C"); final String nodeC = internalCluster().startNode(); - // do sync flush to gen sync id - assertThat(client().admin().indices().prepareSyncedFlush(INDEX_NAME).get().failedShards(), equalTo(0)); + ReplicaShardAllocatorIT.ensureActivePeerRecoveryRetentionLeasesAdvanced(INDEX_NAME); // hold peer recovery on phase 2 after nodeB down CountDownLatch phase1ReadyBlocked = new CountDownLatch(1); @@ -1077,73 +1085,6 @@ public void testOngoingRecoveryAndMasterFailOver() throws Exception { ensureGreen(indexName); } - public void testRecoveryFlushReplica() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(3); - String indexName = "test-index"; - createIndex(indexName, Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", 1).build()); - int numDocs = randomIntBetween(0, 10); - indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, numDocs) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); - assertAcked(client().admin().indices().prepareUpdateSettings(indexName) - .setSettings(Settings.builder().put("index.number_of_replicas", 1))); - ensureGreen(indexName); - ShardId shardId = null; - for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) { - shardId = shardStats.getShardRouting().shardId(); - if (shardStats.getShardRouting().primary() == false) { - assertThat(shardStats.getCommitStats().getNumDocs(), equalTo(numDocs)); - SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( - shardStats.getCommitStats().getUserData().entrySet()); - assertThat(commitInfo.localCheckpoint, equalTo(shardStats.getSeqNoStats().getLocalCheckpoint())); - assertThat(commitInfo.maxSeqNo, equalTo(shardStats.getSeqNoStats().getMaxSeqNo())); - } - } - SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertBusy(() -> assertThat(client().admin().indices().prepareSyncedFlush(indexName).get().failedShards(), equalTo(0))); - assertAcked(client().admin().indices().prepareUpdateSettings(indexName) - .setSettings(Settings.builder().put("index.number_of_replicas", 2))); - ensureGreen(indexName); - // Recovery should keep syncId if no indexing activity on the primary after synced-flush. - Set syncIds = Stream.of(client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) - .map(shardStats -> shardStats.getCommitStats().syncId()) - .collect(Collectors.toSet()); - assertThat(syncIds, hasSize(1)); - } - - public void testRecoveryUsingSyncedFlushWithoutRetentionLease() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(2); - String indexName = "test-index"; - createIndex(indexName, Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 1) - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "24h") // do not reallocate the lost shard - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), "100ms") // expire leases quickly - .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") // sync frequently - .build()); - int numDocs = randomIntBetween(0, 10); - indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, numDocs) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); - ensureGreen(indexName); - - final ShardId shardId = new ShardId(resolveIndex(indexName), 0); - assertThat(SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId).successfulShards(), equalTo(2)); - - final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - final ShardRouting shardToResync = randomFrom(clusterState.routingTable().shardRoutingTable(shardId).activeShards()); - internalCluster().restartNode(clusterState.nodes().get(shardToResync.currentNodeId()).getName(), - new InternalTestCluster.RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - assertBusy(() -> assertFalse(client().admin().indices().prepareStats(indexName).get() - .getShards()[0].getRetentionLeaseStats().retentionLeases().contains( - ReplicationTracker.getPeerRecoveryRetentionLeaseId(shardToResync)))); - return super.onNodeStopped(nodeName); - } - }); - - ensureGreen(indexName); - } - public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); List nodes = randomSubsetOf(2, StreamSupport.stream(clusterService().state().nodes().getDataNodes().spliterator(), false) diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java index 1000a3df4c8d4..ff5a3b3097a61 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -404,11 +404,8 @@ public void testRecoverExistingReplica() throws Exception { indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, randomIntBetween(0, 50)) .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); ensureGreen(indexName); - if (randomBoolean()) { - client().admin().indices().prepareFlush(indexName).get(); - } else { - client().admin().indices().prepareSyncedFlush(indexName).get(); - } + client().admin().indices().prepareFlush(indexName).get(); + // index more documents while one shard copy is offline internalCluster().restartNode(dataNodes.get(1), new InternalTestCluster.RestartCallback() { @Override diff --git a/server/src/test/java/org/elasticsearch/ingest/CompoundProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/CompoundProcessorTests.java index 377679afc43f7..75f1d4d7afb6b 100644 --- a/server/src/test/java/org/elasticsearch/ingest/CompoundProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/CompoundProcessorTests.java @@ -285,11 +285,12 @@ public void testBreakOnFailure() throws Exception { public void testFailureProcessorIsInvokedOnFailure() { TestProcessor onFailureProcessor = new TestProcessor(null, "on_failure", ingestDocument -> { Map ingestMetadata = ingestDocument.getIngestMetadata(); - assertThat(ingestMetadata.entrySet(), hasSize(4)); + assertThat(ingestMetadata.entrySet(), hasSize(5)); assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD), equalTo("failure!")); assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo("test-processor")); assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD), nullValue()); assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PIPELINE_FIELD), equalTo("2")); + assertThat(ingestMetadata.get("pipeline"), equalTo("1")); }); Pipeline pipeline2 = new Pipeline("2", null, null, new CompoundProcessor(new TestProcessor(new RuntimeException("failure!")))); diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java index aebcc28e77d5e..74dba097db792 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java @@ -20,17 +20,22 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.TemplateScript; import org.elasticsearch.test.ESTestCase; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.LongSupplier; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -205,6 +210,58 @@ pipeline3Id, null, null, new CompoundProcessor( assertThat(pipeline3Stats.getIngestFailedCount(), equalTo(1L)); } + public void testIngestPipelineMetadata() { + IngestService ingestService = createIngestService(); + + final int numPipelines = 16; + Pipeline firstPipeline = null; + for (int i = 0; i < numPipelines; i++) { + String pipelineId = Integer.toString(i); + List processors = new ArrayList<>(); + processors.add(new AbstractProcessor(null) { + @Override + public IngestDocument execute(final IngestDocument ingestDocument) throws Exception { + ingestDocument.appendFieldValue("pipelines", ingestDocument.getIngestMetadata().get("pipeline")); + return ingestDocument; + } + + @Override + public String getType() { + return null; + } + + }); + if (i < (numPipelines - 1)) { + TemplateScript.Factory pipelineName = new TestTemplateService.MockTemplateScript.Factory(Integer.toString(i + 1)); + processors.add(new PipelineProcessor(null, pipelineName, ingestService)); + } + + + Pipeline pipeline = new Pipeline(pipelineId, null, null, new CompoundProcessor(false, processors, List.of())); + when(ingestService.getPipeline(pipelineId)).thenReturn(pipeline); + if (firstPipeline == null) { + firstPipeline = pipeline; + } + } + + IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + IngestDocument[] docHolder = new IngestDocument[1]; + Exception[] errorHolder = new Exception[1]; + testIngestDocument.executePipeline(firstPipeline, (doc, e) -> { + docHolder[0] = doc; + errorHolder[0] = e; + }); + assertThat(docHolder[0], notNullValue()); + assertThat(errorHolder[0], nullValue()); + + IngestDocument ingestDocument = docHolder[0]; + List pipelines = ingestDocument.getFieldValue("pipelines", List.class); + assertThat(pipelines.size(), equalTo(numPipelines)); + for (int i = 0; i < numPipelines; i++) { + assertThat(pipelines.get(i), equalTo(Integer.toString(i))); + } + } + static IngestService createIngestService() { IngestService ingestService = mock(IngestService.class); ScriptService scriptService = mock(ScriptService.class); diff --git a/server/src/test/java/org/elasticsearch/ingest/TrackingResultProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/TrackingResultProcessorTests.java index c66d4742b991b..ef4613ce2ffd1 100644 --- a/server/src/test/java/org/elasticsearch/ingest/TrackingResultProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/TrackingResultProcessorTests.java @@ -220,6 +220,7 @@ pipelineId, null, null, new CompoundProcessor( trackingProcessor.execute(ingestDocument, (result, e) -> {}); SimulateProcessorResult expectedResult = new SimulateProcessorResult(actualProcessor.getTag(), ingestDocument); + expectedResult.getIngestDocument().getIngestMetadata().put("pipeline", pipelineId); verify(ingestService, Mockito.atLeast(1)).getPipeline(pipelineId); assertThat(resultList.size(), equalTo(3)); @@ -287,6 +288,7 @@ pipelineId2, null, null, new CompoundProcessor( SimulateProcessorResult expectedResult = new SimulateProcessorResult(actualProcessor.getTag(), ingestDocument); + expectedResult.getIngestDocument().getIngestMetadata().put("pipeline", pipelineId1); verify(ingestService, Mockito.atLeast(1)).getPipeline(pipelineId1); verify(ingestService, Mockito.atLeast(1)).getPipeline(pipelineId2); @@ -355,6 +357,7 @@ pipelineId2, null, null, new CompoundProcessor( SimulateProcessorResult expectedResult = new SimulateProcessorResult(actualProcessor.getTag(), ingestDocument); + expectedResult.getIngestDocument().getIngestMetadata().put("pipeline", pipelineId1); verify(ingestService, Mockito.atLeast(1)).getPipeline(pipelineId1); verify(ingestService, Mockito.never()).getPipeline(pipelineId2); @@ -406,6 +409,7 @@ pipelineId, null, null, new CompoundProcessor( trackingProcessor.execute(ingestDocument, (result, e) -> {}); SimulateProcessorResult expectedResult = new SimulateProcessorResult(actualProcessor.getTag(), ingestDocument); + expectedResult.getIngestDocument().getIngestMetadata().put("pipeline", pipelineId); verify(ingestService, Mockito.atLeast(2)).getPipeline(pipelineId); assertThat(resultList.size(), equalTo(4)); @@ -482,6 +486,7 @@ pipelineId, null, null, new CompoundProcessor( trackingProcessor.execute(ingestDocument, (result, e) -> {}); SimulateProcessorResult expectedResult = new SimulateProcessorResult(actualProcessor.getTag(), ingestDocument); + expectedResult.getIngestDocument().getIngestMetadata().put("pipeline", pipelineId); verify(ingestService, Mockito.atLeast(2)).getPipeline(pipelineId); assertThat(resultList.size(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java index 12adfb49120a7..0d15856745793 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.repositories; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContent; @@ -105,7 +106,8 @@ public void testAddSnapshots() { builder.put(indexId, 0, "2"); } RepositoryData newRepoData = repositoryData.addSnapshot(newSnapshot, - randomFrom(SnapshotState.SUCCESS, SnapshotState.PARTIAL, SnapshotState.FAILED), builder.build()); + randomFrom(SnapshotState.SUCCESS, SnapshotState.PARTIAL, SnapshotState.FAILED), + randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), builder.build()); // verify that the new repository data has the new snapshot and its indices assertTrue(newRepoData.getSnapshotIds().contains(newSnapshot)); for (IndexId indexId : indices) { @@ -122,17 +124,19 @@ public void testInitIndices() { final int numSnapshots = randomIntBetween(1, 30); final Map snapshotIds = new HashMap<>(numSnapshots); final Map snapshotStates = new HashMap<>(numSnapshots); + final Map snapshotVersions = new HashMap<>(numSnapshots); for (int i = 0; i < numSnapshots; i++) { final SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()); snapshotIds.put(snapshotId.getUUID(), snapshotId); snapshotStates.put(snapshotId.getUUID(), randomFrom(SnapshotState.values())); + snapshotVersions.put(snapshotId.getUUID(), randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); } RepositoryData repositoryData = new RepositoryData(EMPTY_REPO_GEN, snapshotIds, - Collections.emptyMap(), Collections.emptyMap(), ShardGenerations.EMPTY); + Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), ShardGenerations.EMPTY); // test that initializing indices works Map> indices = randomIndices(snapshotIds); RepositoryData newRepoData = - new RepositoryData(repositoryData.getGenId(), snapshotIds, snapshotStates, indices, ShardGenerations.EMPTY); + new RepositoryData(repositoryData.getGenId(), snapshotIds, snapshotStates, snapshotVersions, indices, ShardGenerations.EMPTY); List expected = new ArrayList<>(repositoryData.getSnapshotIds()); Collections.sort(expected); List actual = new ArrayList<>(newRepoData.getSnapshotIds()); @@ -168,7 +172,8 @@ public void testResolveIndexId() { public void testGetSnapshotState() { final SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()); final SnapshotState state = randomFrom(SnapshotState.values()); - final RepositoryData repositoryData = RepositoryData.EMPTY.addSnapshot(snapshotId, state, ShardGenerations.EMPTY); + final RepositoryData repositoryData = RepositoryData.EMPTY.addSnapshot(snapshotId, state, + randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), ShardGenerations.EMPTY); assertEquals(state, repositoryData.getSnapshotState(snapshotId)); assertNull(repositoryData.getSnapshotState(new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()))); } @@ -187,9 +192,11 @@ public void testIndexThatReferencesAnUnknownSnapshot() throws IOException { Map snapshotIds = new HashMap<>(); Map snapshotStates = new HashMap<>(); + Map snapshotVersions = new HashMap<>(); for (SnapshotId snapshotId : parsedRepositoryData.getSnapshotIds()) { snapshotIds.put(snapshotId.getUUID(), snapshotId); snapshotStates.put(snapshotId.getUUID(), parsedRepositoryData.getSnapshotState(snapshotId)); + snapshotVersions.put(snapshotId.getUUID(), parsedRepositoryData.getVersion(snapshotId)); } final IndexId corruptedIndexId = randomFrom(parsedRepositoryData.getIndices().values()); @@ -211,7 +218,7 @@ public void testIndexThatReferencesAnUnknownSnapshot() throws IOException { assertNotNull(corruptedIndexId); RepositoryData corruptedRepositoryData = new RepositoryData(parsedRepositoryData.getGenId(), snapshotIds, snapshotStates, - indexSnapshots, shardGenBuilder.build()); + snapshotVersions, indexSnapshots, shardGenBuilder.build()); final XContentBuilder corruptedBuilder = XContentBuilder.builder(xContent); corruptedRepositoryData.snapshotsToXContent(corruptedBuilder, true); @@ -280,7 +287,8 @@ public static RepositoryData generateRandomRepoData() { builder.put(someIndex, j, uuid); } } - repositoryData = repositoryData.addSnapshot(snapshotId, randomFrom(SnapshotState.values()), builder.build()); + repositoryData = repositoryData.addSnapshot(snapshotId, randomFrom(SnapshotState.values()), + randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), builder.build()); } return repositoryData; } diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 13102182cd7b0..3f0e80a93d263 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories.blobstore; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -259,7 +260,7 @@ private RepositoryData addRandomSnapshotsToRepoData(RepositoryData repoData, boo builder.put(new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()), 0, "1"); } repoData = repoData.addSnapshot(snapshotId, - randomFrom(SnapshotState.SUCCESS, SnapshotState.PARTIAL, SnapshotState.FAILED), builder.build()); + randomFrom(SnapshotState.SUCCESS, SnapshotState.PARTIAL, SnapshotState.FAILED), Version.CURRENT, builder.build()); } return repoData; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java index e414b86403a09..9909495c230f0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java @@ -19,9 +19,22 @@ package org.elasticsearch.search.aggregations.bucket; +import org.elasticsearch.Version; +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.common.geo.GeoBoundingBoxTests; +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; +import org.elasticsearch.test.VersionUtils; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; public class GeoHashGridTests extends BaseAggregationTestCase { @@ -39,7 +52,25 @@ protected GeoHashGridAggregationBuilder createTestAggregatorBuilder() { if (randomBoolean()) { factory.shardSize(randomIntBetween(1, Integer.MAX_VALUE)); } + if (randomBoolean()) { + factory.setGeoBoundingBox(GeoBoundingBoxTests.randomBBox()); + } return factory; } + public void testSerializationPreBounds() throws Exception { + Version noBoundsSupportVersion = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_5_0); + GeoHashGridAggregationBuilder builder = createTestAggregatorBuilder(); + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(Version.V_7_6_0); + builder.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), + new NamedWriteableRegistry(Collections.emptyList()))) { + in.setVersion(noBoundsSupportVersion); + GeoHashGridAggregationBuilder readBuilder = new GeoHashGridAggregationBuilder(in); + assertThat(readBuilder.geoBoundingBox(), equalTo(new GeoBoundingBox( + new GeoPoint(Double.NaN, Double.NaN), new GeoPoint(Double.NaN, Double.NaN)))); + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoTileGridTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoTileGridTests.java new file mode 100644 index 0000000000000..dc2a72f387f11 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoTileGridTests.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket; + +import org.elasticsearch.Version; +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.common.geo.GeoBoundingBoxTests; +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.search.aggregations.BaseAggregationTestCase; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; +import org.elasticsearch.test.VersionUtils; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; + +public class GeoTileGridTests extends BaseAggregationTestCase { + + @Override + protected GeoTileGridAggregationBuilder createTestAggregatorBuilder() { + String name = randomAlphaOfLengthBetween(3, 20); + GeoTileGridAggregationBuilder factory = new GeoTileGridAggregationBuilder(name); + if (randomBoolean()) { + factory.precision(randomIntBetween(0, GeoTileUtils.MAX_ZOOM)); + } + if (randomBoolean()) { + factory.size(randomIntBetween(1, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + factory.shardSize(randomIntBetween(1, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + factory.setGeoBoundingBox(GeoBoundingBoxTests.randomBBox()); + } + return factory; + } + + public void testSerializationPreBounds() throws Exception { + Version noBoundsSupportVersion = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_5_0); + GeoTileGridAggregationBuilder builder = createTestAggregatorBuilder(); + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(Version.V_7_6_0); + builder.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), + new NamedWriteableRegistry(Collections.emptyList()))) { + in.setVersion(noBoundsSupportVersion); + GeoTileGridAggregationBuilder readBuilder = new GeoTileGridAggregationBuilder(in); + assertThat(readBuilder.geoBoundingBox(), equalTo(new GeoBoundingBox( + new GeoPoint(Double.NaN, Double.NaN), new GeoPoint(Double.NaN, Double.NaN)))); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 2c7d1de83f7b5..8b8864d3ecfdd 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -22,10 +22,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -65,7 +62,6 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; -import static java.util.Collections.singletonList; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; @@ -88,92 +84,14 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(CustomSignificanceHeuristicPlugin.class); + return Arrays.asList(TestScriptPlugin.class); } public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString(); } - public void testPlugin() throws Exception { - String type = randomBoolean() ? "text" : "long"; - String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}"; - SharedSignificantTermsTestMethods.index01Docs(type, settings, this); - SearchRequestBuilder request; - if ("text".equals(type) && randomBoolean()) { - // Use significant_text on text fields but occasionally run with alternative of - // significant_terms on legacy fieldData=true too. - request = client().prepareSearch(INDEX_NAME) - .addAggregation( - terms("class") - .field(CLASS_FIELD) - .subAggregation((significantText("sig_terms", TEXT_FIELD)) - .significanceHeuristic(new SimpleHeuristic()) - .minDocCount(1) - ) - ); - }else - { - request = client().prepareSearch(INDEX_NAME) - .addAggregation( - terms("class") - .field(CLASS_FIELD) - .subAggregation((significantTerms("sig_terms")) - .field(TEXT_FIELD) - .significanceHeuristic(new SimpleHeuristic()) - .minDocCount(1) - ) - ); - } - - SearchResponse response = request.get(); - assertSearchResponse(response); - StringTerms classes = response.getAggregations().get("class"); - assertThat(classes.getBuckets().size(), equalTo(2)); - for (Terms.Bucket classBucket : classes.getBuckets()) { - Map aggs = classBucket.getAggregations().asMap(); - assertTrue(aggs.containsKey("sig_terms")); - SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms"); - assertThat(agg.getBuckets().size(), equalTo(2)); - Iterator bucketIterator = agg.iterator(); - SignificantTerms.Bucket sigBucket = bucketIterator.next(); - String term = sigBucket.getKeyAsString(); - String classTerm = classBucket.getKeyAsString(); - assertTrue(term.equals(classTerm)); - assertThat(sigBucket.getSignificanceScore(), closeTo(2.0, 1.e-8)); - sigBucket = bucketIterator.next(); - assertThat(sigBucket.getSignificanceScore(), closeTo(1.0, 1.e-8)); - } - - // we run the same test again but this time we do not call assertSearchResponse() before the assertions - // the reason is that this would trigger toXContent and we would like to check that this has no potential side effects - - response = request.get(); - - classes = (StringTerms) response.getAggregations().get("class"); - assertThat(classes.getBuckets().size(), equalTo(2)); - for (Terms.Bucket classBucket : classes.getBuckets()) { - Map aggs = classBucket.getAggregations().asMap(); - assertTrue(aggs.containsKey("sig_terms")); - SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms"); - assertThat(agg.getBuckets().size(), equalTo(2)); - Iterator bucketIterator = agg.iterator(); - SignificantTerms.Bucket sigBucket = bucketIterator.next(); - String term = sigBucket.getKeyAsString(); - String classTerm = classBucket.getKeyAsString(); - assertTrue(term.equals(classTerm)); - assertThat(sigBucket.getSignificanceScore(), closeTo(2.0, 1.e-8)); - sigBucket = bucketIterator.next(); - assertThat(sigBucket.getSignificanceScore(), closeTo(1.0, 1.e-8)); - } - } - - public static class CustomSignificanceHeuristicPlugin extends MockScriptPlugin implements SearchPlugin { - @Override - public List> getSignificanceHeuristics() { - return singletonList(new SignificanceHeuristicSpec<>(SimpleHeuristic.NAME, SimpleHeuristic::new, SimpleHeuristic.PARSER)); - } - + public static class TestScriptPlugin extends MockScriptPlugin implements SearchPlugin { @Override public Map, Object>> pluginScripts() { Map, Object>> scripts = new HashMap<>(); @@ -203,65 +121,6 @@ private static long longValue(Object value) { } } - public static class SimpleHeuristic extends SignificanceHeuristic { - public static final String NAME = "simple"; - public static final ObjectParser PARSER = new ObjectParser<>(NAME, SimpleHeuristic::new); - - public SimpleHeuristic() { - } - - /** - * Read from a stream. - */ - public SimpleHeuristic(StreamInput in) throws IOException { - // Nothing to read - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - // Nothing to write - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(NAME).endObject(); - return builder; - } - - @Override - public int hashCode() { - return 1; - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - return true; - } - - /** - * @param subsetFreq The frequency of the term in the selected sample - * @param subsetSize The size of the selected sample (typically number of docs) - * @param supersetFreq The frequency of the term in the superset from which the sample was taken - * @param supersetSize The size of the superset from which the sample was taken (typically number of docs) - * @return a "significance" score - */ - @Override - public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) { - return subsetFreq / subsetSize > supersetFreq / supersetSize ? 2.0 : 1.0; - } - } - public void testXContentResponse() throws Exception { String type = randomBoolean() ? "text" : "long"; String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}"; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java index c78a55b253297..c36ff50b7a36d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java @@ -19,8 +19,10 @@ package org.elasticsearch.search.aggregations.bucket.composite; +import org.elasticsearch.common.geo.GeoBoundingBoxTests; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.sort.SortOrder; @@ -54,7 +56,10 @@ private DateHistogramValuesSourceBuilder randomDateHistogramSourceBuilder() { private GeoTileGridValuesSourceBuilder randomGeoTileGridValuesSourceBuilder() { GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10)); if (randomBoolean()) { - geoTile.precision(randomIntBetween(1, 12)); + geoTile.precision(randomIntBetween(0, GeoTileUtils.MAX_ZOOM)); + } + if (randomBoolean()) { + geoTile.geoBoundingBox(GeoBoundingBoxTests.randomBBox()); } return geoTile; } @@ -90,9 +95,11 @@ private HistogramValuesSourceBuilder randomHistogramSourceBuilder() { @Override protected CompositeAggregationBuilder createTestAggregatorBuilder() { int numSources = randomIntBetween(1, 10); + numSources = 1; List> sources = new ArrayList<>(); for (int i = 0; i < numSources; i++) { int type = randomIntBetween(0, 3); + type = 3; switch (type) { case 0: sources.add(randomTermsSourceBuilder()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java index 6f9e0f697da25..17b63e0ee6603 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java @@ -19,7 +19,21 @@ package org.elasticsearch.search.aggregations.bucket.composite; +import org.elasticsearch.Version; +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.common.geo.GeoBoundingBoxTests; +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; public class GeoTileGridValuesSourceBuilderTests extends ESTestCase { @@ -28,4 +42,22 @@ public void testSetFormat() { expectThrows(IllegalArgumentException.class, () -> builder.format("format")); } + public void testBWCBounds() throws IOException { + Version noBoundsSupportVersion = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_5_0); + GeoTileGridValuesSourceBuilder builder = new GeoTileGridValuesSourceBuilder("name"); + if (randomBoolean()) { + builder.geoBoundingBox(GeoBoundingBoxTests.randomBBox()); + } + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(Version.V_7_6_0); + builder.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), + new NamedWriteableRegistry(Collections.emptyList()))) { + in.setVersion(noBoundsSupportVersion); + GeoTileGridValuesSourceBuilder readBuilder = new GeoTileGridValuesSourceBuilder(in); + assertThat(readBuilder.geoBoundingBox(), equalTo(new GeoBoundingBox( + new GeoPoint(Double.NaN, Double.NaN), new GeoPoint(Double.NaN, Double.NaN)))); + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java index 4edd694a3ac03..67ac4d9163dcb 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java @@ -47,6 +47,9 @@ import java.util.stream.IntStream; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLengthBetween; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.stream.Collectors.toList; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -238,8 +241,7 @@ public void testReduceSame() throws IOException { for (int i = 0; i < numSame; i++) { toReduce.add(result); } - InternalComposite finalReduce = (InternalComposite) result.reduce(toReduce, - new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, true)); + InternalComposite finalReduce = (InternalComposite) result.reduce(toReduce, reduceContext()); assertThat(finalReduce.getBuckets().size(), equalTo(result.getBuckets().size())); Iterator expectedIt = result.getBuckets().iterator(); for (InternalComposite.InternalBucket bucket : finalReduce.getBuckets()) { @@ -249,6 +251,30 @@ public void testReduceSame() throws IOException { } } + /** + * Check that reducing with an unmapped index produces useful formats. + */ + public void testReduceUnmapped() throws IOException { + var mapped = createTestInstance(randomAlphaOfLength(10), emptyList(), emptyMap(), InternalAggregations.EMPTY); + var rawFormats = formats.stream().map(f -> DocValueFormat.RAW).collect(toList()); + var unmapped = new InternalComposite(mapped.getName(), mapped.getSize(), sourceNames, + rawFormats, emptyList(), null, reverseMuls, true, emptyList(), emptyMap()); + List toReduce = Arrays.asList(unmapped, mapped); + Collections.shuffle(toReduce, random()); + InternalComposite finalReduce = (InternalComposite) unmapped.reduce(toReduce, reduceContext()); + assertThat(finalReduce.getBuckets().size(), equalTo(mapped.getBuckets().size())); + if (false == mapped.getBuckets().isEmpty()) { + assertThat(finalReduce.getFormats(), equalTo(mapped.getFormats())); + } + var expectedIt = mapped.getBuckets().iterator(); + for (var bucket : finalReduce.getBuckets()) { + InternalComposite.InternalBucket expectedBucket = expectedIt.next(); + assertThat(bucket.getRawKey(), equalTo(expectedBucket.getRawKey())); + assertThat(bucket.getDocCount(), equalTo(expectedBucket.getDocCount())); + assertThat(bucket.getFormats(), equalTo(expectedBucket.getFormats())); + } + } + public void testCompareCompositeKeyBiggerFieldName() { InternalComposite.ArrayMap key1 = createMap( Arrays.asList("field1", "field2"), @@ -381,4 +407,8 @@ private InternalComposite.ArrayMap createMap(List fields, Comparable[] v values ); } + + private InternalAggregation.ReduceContext reduceContext() { + return new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, true); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index 047903bc86100..15d6f503d03f6 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -28,6 +28,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.common.geo.GeoBoundingBoxTests; +import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.aggregations.Aggregator; @@ -44,6 +47,8 @@ import java.util.Set; import java.util.function.Consumer; +import static org.hamcrest.Matchers.equalTo; + public abstract class GeoGridAggregatorTestCase extends AggregatorTestCase { private static final String FIELD_NAME = "location"; @@ -64,18 +69,18 @@ public abstract class GeoGridAggregatorTestCase protected abstract GeoGridAggregationBuilder createBuilder(String name); public void testNoDocs() throws IOException { - testCase(new MatchAllDocsQuery(), FIELD_NAME, randomPrecision(), iw -> { - // Intentionally not writing any docs - }, geoGrid -> { + testCase(new MatchAllDocsQuery(), FIELD_NAME, randomPrecision(), null, geoGrid -> { assertEquals(0, geoGrid.getBuckets().size()); + }, iw -> { + // Intentionally not writing any docs }); } public void testFieldMissing() throws IOException { - testCase(new MatchAllDocsQuery(), "wrong_field", randomPrecision(), iw -> { - iw.addDocument(Collections.singleton(new LatLonDocValuesField(FIELD_NAME, 10D, 10D))); - }, geoGrid -> { + testCase(new MatchAllDocsQuery(), "wrong_field", randomPrecision(), null, geoGrid -> { assertEquals(0, geoGrid.getBuckets().size()); + }, iw -> { + iw.addDocument(Collections.singleton(new LatLonDocValuesField(FIELD_NAME, 10D, 10D))); }); } @@ -83,7 +88,13 @@ public void testWithSeveralDocs() throws IOException { int precision = randomPrecision(); int numPoints = randomIntBetween(8, 128); Map expectedCountPerGeoHash = new HashMap<>(); - testCase(new MatchAllDocsQuery(), FIELD_NAME, precision, iw -> { + testCase(new MatchAllDocsQuery(), FIELD_NAME, precision, null, geoHashGrid -> { + assertEquals(expectedCountPerGeoHash.size(), geoHashGrid.getBuckets().size()); + for (GeoGrid.Bucket bucket : geoHashGrid.getBuckets()) { + assertEquals((long) expectedCountPerGeoHash.get(bucket.getKeyAsString()), bucket.getDocCount()); + } + assertTrue(AggregationInspectionHelper.hasValue(geoHashGrid)); + }, iw -> { List points = new ArrayList<>(); Set distinctHashesPerDoc = new HashSet<>(); for (int pointId = 0; pointId < numPoints; pointId++) { @@ -112,17 +123,72 @@ public void testWithSeveralDocs() throws IOException { if (points.size() != 0) { iw.addDocument(points); } - }, geoHashGrid -> { - assertEquals(expectedCountPerGeoHash.size(), geoHashGrid.getBuckets().size()); - for (GeoGrid.Bucket bucket : geoHashGrid.getBuckets()) { - assertEquals((long) expectedCountPerGeoHash.get(bucket.getKeyAsString()), bucket.getDocCount()); + }); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/51103") + public void testBounds() throws IOException { + final int numDocs = randomIntBetween(64, 256); + final GeoGridAggregationBuilder builder = createBuilder("_name"); + + expectThrows(IllegalArgumentException.class, () -> builder.precision(-1)); + expectThrows(IllegalArgumentException.class, () -> builder.precision(30)); + + GeoBoundingBox bbox = GeoBoundingBoxTests.randomBBox(); + + int in = 0, out = 0; + List docs = new ArrayList<>(); + while (in + out < numDocs) { + if (bbox.left() > bbox.right()) { + if (randomBoolean()) { + double lonWithin = randomBoolean() ? + randomDoubleBetween(bbox.left(), 180.0, true) + : randomDoubleBetween(-180.0, bbox.right(), true); + double latWithin = randomDoubleBetween(bbox.bottom(), bbox.top(), true); + in++; + docs.add(new LatLonDocValuesField(FIELD_NAME, latWithin, lonWithin)); + } else { + double lonOutside = randomDoubleBetween(bbox.left(), bbox.right(), true); + double latOutside = randomDoubleBetween(bbox.top(), -90, false); + out++; + docs.add(new LatLonDocValuesField(FIELD_NAME, latOutside, lonOutside)); + } + } else { + if (randomBoolean()) { + double lonWithin = randomDoubleBetween(bbox.left(), bbox.right(), true); + double latWithin = randomDoubleBetween(bbox.bottom(), bbox.top(), true); + in++; + docs.add(new LatLonDocValuesField(FIELD_NAME, latWithin, lonWithin)); + } else { + double lonOutside = GeoUtils.normalizeLon(randomDoubleBetween(bbox.right(), 180.001, false)); + double latOutside = GeoUtils.normalizeLat(randomDoubleBetween(bbox.top(), 90.001, false)); + out++; + docs.add(new LatLonDocValuesField(FIELD_NAME, latOutside, lonOutside)); + } + } + + } + + final long numDocsInBucket = in; + final int precision = randomPrecision(); + + testCase(new MatchAllDocsQuery(), FIELD_NAME, precision, bbox, geoGrid -> { + assertTrue(AggregationInspectionHelper.hasValue(geoGrid)); + long docCount = 0; + for (int i = 0; i < geoGrid.getBuckets().size(); i++) { + docCount += geoGrid.getBuckets().get(i).getDocCount(); + } + assertThat(docCount, equalTo(numDocsInBucket)); + }, iw -> { + for (LatLonDocValuesField docField : docs) { + iw.addDocument(Collections.singletonList(docField)); } - assertTrue(AggregationInspectionHelper.hasValue(geoHashGrid)); }); } - private void testCase(Query query, String field, int precision, CheckedConsumer buildIndex, - Consumer> verify) throws IOException { + private void testCase(Query query, String field, int precision, GeoBoundingBox geoBoundingBox, + Consumer> verify, + CheckedConsumer buildIndex) throws IOException { Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); buildIndex.accept(indexWriter); @@ -133,6 +199,11 @@ private void testCase(Query query, String field, int precision, CheckedConsumer< GeoGridAggregationBuilder aggregationBuilder = createBuilder("_name").field(field); aggregationBuilder.precision(precision); + if (geoBoundingBox != null) { + aggregationBuilder.setGeoBoundingBox(geoBoundingBox); + assertThat(aggregationBuilder.geoBoundingBox(), equalTo(geoBoundingBox)); + } + MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType(); fieldType.setHasDocValues(true); fieldType.setName(FIELD_NAME); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java index 824a069203856..5075ed9f02aa2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.containsString; @@ -113,4 +115,19 @@ public void testParseErrorOnPrecisionOutOfRange() throws Exception { assertEquals("Invalid geohash aggregation precision of 13. Must be between 1 and 12.", ex.getCause().getMessage()); } } + + public void testParseValidBounds() throws Exception { + Rectangle bbox = GeometryTestUtils.randomRectangle(); + XContentParser stParser = createParser(JsonXContent.jsonXContent, + "{\"field\":\"my_loc\", \"precision\": 5, \"size\": 500, \"shard_size\": 550," + "\"bounds\": { " + + "\"top\": " + bbox.getMaxY() + "," + + "\"bottom\": " + bbox.getMinY() + "," + + "\"left\": " + bbox.getMinX() + "," + + "\"right\": " + bbox.getMaxX() + "}" + + "}"); + XContentParser.Token token = stParser.nextToken(); + assertSame(XContentParser.Token.START_OBJECT, token); + // can create a factory + assertNotNull(GeoHashGridAggregationBuilder.parse("geohash_grid", stParser)); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java index 6544344543e34..85b2306403230 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java @@ -46,5 +46,4 @@ public void testPrecision() { builder.precision(precision); assertEquals(precision, builder.precision()); } - } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java index 6f15263a53b11..aa5253564fb49 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.containsString; @@ -70,4 +72,19 @@ public void testParseErrorOnPrecisionOutOfRange() throws Exception { assertEquals("Invalid geotile_grid precision of 30. Must be between 0 and 29.", ex.getCause().getMessage()); } } + + public void testParseValidBounds() throws Exception { + Rectangle bbox = GeometryTestUtils.randomRectangle(); + XContentParser stParser = createParser(JsonXContent.jsonXContent, + "{\"field\":\"my_loc\", \"precision\": 5, \"size\": 500, \"shard_size\": 550," + "\"bounds\": { " + + "\"top\": " + bbox.getMaxY() + "," + + "\"bottom\": " + bbox.getMinY() + "," + + "\"left\": " + bbox.getMinX() + "," + + "\"right\": " + bbox.getMaxX() + "}" + + "}"); + XContentParser.Token token = stParser.nextToken(); + assertSame(XContentParser.Token.START_OBJECT, token); + // can create a factory + assertNotNull(GeoTileGridAggregationBuilder.parse("geotile_grid", stParser)); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java index ddfd2c8c82c31..9a2d5a411d4ad 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java @@ -164,7 +164,7 @@ static AggregatorFactory getRandomSequentiallyOrderedParentAgg() throws IOExcept new AggregatorFactories.Builder(), Collections.emptyMap()); break; case 1: - factory = new DateHistogramAggregatorFactory("name", mock(ValuesSourceConfig.class), 0L, + factory = new DateHistogramAggregatorFactory("name", mock(ValuesSourceConfig.class), mock(InternalOrder.class), false, 0L, mock(Rounding.class), mock(Rounding.class), mock(ExtendedBounds.class), mock(QueryShardContext.class), mock(AggregatorFactory.class), new AggregatorFactories.Builder(), Collections.emptyMap()); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index 36c0d7ea12375..9a542fd762ad9 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -163,7 +163,7 @@ public void testUnknownArrayNameExpection() throws IOException { XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"bad_fieldname\" : [ \"field1\" 1 \"field2\" ]\n" + "}\n"); - assertEquals("[2:5] [highlight] unknown field [bad_fieldname], parser not found", e.getMessage()); + assertEquals("[2:5] [highlight] unknown field [bad_fieldname]", e.getMessage()); } { @@ -176,7 +176,7 @@ public void testUnknownArrayNameExpection() throws IOException { "}\n"); assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]")); assertThat(e.getCause().getMessage(), containsString("[fields] failed to parse field [body]")); - assertEquals("[4:9] [highlight_field] unknown field [bad_fieldname], parser not found", e.getCause().getCause().getMessage()); + assertEquals("[4:9] [highlight_field] unknown field [bad_fieldname]", e.getCause().getCause().getMessage()); } } @@ -194,7 +194,7 @@ public void testUnknownFieldnameExpection() throws IOException { XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"bad_fieldname\" : \"value\"\n" + "}\n"); - assertEquals("[2:5] [highlight] unknown field [bad_fieldname], parser not found", e.getMessage()); + assertEquals("[2:5] [highlight] unknown field [bad_fieldname]", e.getMessage()); } { @@ -207,7 +207,7 @@ public void testUnknownFieldnameExpection() throws IOException { "}\n"); assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]")); assertThat(e.getCause().getMessage(), containsString("[fields] failed to parse field [body]")); - assertEquals("[4:9] [highlight_field] unknown field [bad_fieldname], parser not found", e.getCause().getCause().getMessage()); + assertEquals("[4:9] [highlight_field] unknown field [bad_fieldname]", e.getCause().getCause().getMessage()); } } @@ -219,7 +219,7 @@ public void testUnknownObjectFieldnameExpection() throws IOException { XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"bad_fieldname\" : { \"field\" : \"value\" }\n \n" + "}\n"); - assertEquals("[2:5] [highlight] unknown field [bad_fieldname], parser not found", e.getMessage()); + assertEquals("[2:5] [highlight] unknown field [bad_fieldname]", e.getMessage()); } { @@ -232,7 +232,7 @@ public void testUnknownObjectFieldnameExpection() throws IOException { "}\n"); assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]")); assertThat(e.getCause().getMessage(), containsString("[fields] failed to parse field [body]")); - assertEquals("[4:9] [highlight_field] unknown field [bad_fieldname], parser not found", e.getCause().getCause().getMessage()); + assertEquals("[4:9] [highlight_field] unknown field [bad_fieldname]", e.getCause().getCause().getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java index 302bffd668997..dfcc65afb5c65 100644 --- a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java @@ -254,7 +254,7 @@ public void testUnknownFieldsExpection() throws IOException { "}\n"; try (XContentParser parser = createParser(rescoreElement)) { XContentParseException e = expectThrows(XContentParseException.class, () -> RescorerBuilder.parseFromXContent(parser)); - assertEquals("[3:17] [query] unknown field [bad_fieldname], parser not found", e.getMessage()); + assertEquals("[3:17] [query] unknown field [bad_fieldname]", e.getMessage()); } rescoreElement = "{\n" + diff --git a/server/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java index 750ec4f34dfa5..19351275f352c 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java @@ -316,7 +316,7 @@ public void testUnknownOptionFails() throws IOException { parser.nextToken(); XContentParseException e = expectThrows(XContentParseException.class, () -> FieldSortBuilder.fromXContent(parser, "")); - assertEquals("[1:18] [field_sort] unknown field [reverse], parser not found", e.getMessage()); + assertEquals("[1:18] [field_sort] unknown field [reverse]", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java index 2384d697e38dc..dbf78f6a4117d 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java @@ -228,7 +228,7 @@ public void testParseBadFieldNameExceptions() throws IOException { parser.nextToken(); XContentParseException e = expectThrows(XContentParseException.class, () -> ScriptSortBuilder.fromXContent(parser, null)); - assertEquals("[1:15] [_script] unknown field [bad_field], parser not found", e.getMessage()); + assertEquals("[1:15] [_script] unknown field [bad_field]", e.getMessage()); } } @@ -241,7 +241,7 @@ public void testParseBadFieldNameExceptionsOnStartObject() throws IOException { parser.nextToken(); XContentParseException e = expectThrows(XContentParseException.class, () -> ScriptSortBuilder.fromXContent(parser, null)); - assertEquals("[1:15] [_script] unknown field [bad_field], parser not found", e.getMessage()); + assertEquals("[1:15] [_script] unknown field [bad_field]", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java b/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java index 5bff24b934837..6b2d5b520f245 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java @@ -172,7 +172,7 @@ public void testIllegalXContent() throws IOException { // test unknown field directGenerator = "{ \"unknown_param\" : \"f1\" }"; assertIllegalXContent(directGenerator, IllegalArgumentException.class, - "[direct_generator] unknown field [unknown_param], parser not found"); + "[direct_generator] unknown field [unknown_param]"); // test bad value for field (e.g. size expects an int) directGenerator = "{ \"size\" : \"xxl\" }"; diff --git a/server/src/test/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/test/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index 9abb107521042..4f26f31182e8d 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.snapshots; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.PlainActionFuture; @@ -27,18 +28,25 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.threadpool.ThreadPool; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Collections; import java.util.Locale; +import java.util.function.Function; +import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; @@ -263,11 +271,24 @@ public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { logger.info("--> delete root level snapshot metadata blob for snapshot [{}]", snapshotToCorrupt); Files.delete(repo.resolve(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotToCorrupt.getUUID()))); + logger.info("--> strip version information from index-N blob"); + final RepositoryData withoutVersions = new RepositoryData(repositoryData.getGenId(), + repositoryData.getSnapshotIds().stream().collect(Collectors.toMap( + SnapshotId::getUUID, Function.identity())), + repositoryData.getSnapshotIds().stream().collect(Collectors.toMap( + SnapshotId::getUUID, repositoryData::getSnapshotState)), + Collections.emptyMap(), Collections.emptyMap(), ShardGenerations.EMPTY); + + Files.write(repo.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + withoutVersions.getGenId()), + BytesReference.toBytes(BytesReference.bytes(withoutVersions.snapshotsToXContent(XContentFactory.jsonBuilder(), + true))), StandardOpenOption.TRUNCATE_EXISTING); + logger.info("--> verify that repo is assumed in old metadata format"); final SnapshotsService snapshotsService = internalCluster().getCurrentMasterNodeInstance(SnapshotsService.class); final ThreadPool threadPool = internalCluster().getCurrentMasterNodeInstance(ThreadPool.class); assertThat(PlainActionFuture.get(f -> threadPool.generic().execute( - ActionRunnable.supply(f, () -> snapshotsService.hasOldVersionSnapshots(repoName, repositoryData, null)))), is(true)); + ActionRunnable.supply(f, () -> snapshotsService.hasOldVersionSnapshots(repoName, getRepositoryData(repository), null)))), + is(true)); logger.info("--> verify that snapshot with missing root level metadata can be deleted"); assertAcked(client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotToCorrupt.getName()).get()); @@ -276,6 +297,50 @@ public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { assertThat(PlainActionFuture.get(f -> threadPool.generic().execute( ActionRunnable.supply(f, () -> snapshotsService.hasOldVersionSnapshots(repoName, getRepositoryData(repository), null)))), is(false)); + final RepositoryData finalRepositoryData = getRepositoryData(repository); + for (SnapshotId snapshotId : finalRepositoryData.getSnapshotIds()) { + assertThat(finalRepositoryData.getVersion(snapshotId), is(Version.CURRENT)); + } + } + + public void testMountCorruptedRepositoryData() throws Exception { + disableRepoConsistencyCheck("This test intentionally corrupts the repository contents"); + Client client = client(); + + Path repo = randomRepoPath(); + final String repoName = "test-repo"; + logger.info("--> creating repository at {}", repo.toAbsolutePath()); + assertAcked(client.admin().cluster().preparePutRepository(repoName) + .setType("fs").setSettings(Settings.builder() + .put("location", repo) + .put("compress", false))); + + final String snapshot = "test-snap"; + + logger.info("--> creating snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repoName, snapshot) + .setWaitForCompletion(true).setIndices("test-idx-*").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + logger.info("--> corrupt index-N blob"); + final Repository repository = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName); + final RepositoryData repositoryData = getRepositoryData(repository); + Files.write(repo.resolve("index-" + repositoryData.getGenId()), randomByteArrayOfLength(randomIntBetween(1, 100))); + + logger.info("--> verify loading repository data throws RepositoryException"); + expectThrows(RepositoryException.class, () -> getRepositoryData(repository)); + + logger.info("--> mount repository path in a new repository"); + final String otherRepoName = "other-repo"; + assertAcked(client.admin().cluster().preparePutRepository(otherRepoName) + .setType("fs").setSettings(Settings.builder() + .put("location", repo) + .put("compress", false))); + final Repository otherRepo = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(otherRepoName); + + logger.info("--> verify loading repository data from newly mounted repository throws RepositoryException"); + expectThrows(RepositoryException.class, () -> getRepositoryData(otherRepo)); } private void assertRepositoryBlocked(Client client, String repo, String existingSnapshot) { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 9b8950c813b4f..d36a31a30f59b 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -154,7 +154,6 @@ import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; -import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.recovery.PeerRecoverySourceService; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; @@ -885,6 +884,7 @@ private Environment createEnvironment(String nodeName) { .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo").toAbsolutePath()) .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)) + .put(MappingUpdatedAction.INDICES_MAX_IN_FLIGHT_UPDATES_SETTING.getKey(), 1000) // o.w. some tests might block .build()); } @@ -1201,7 +1201,6 @@ public void onFailure(final Exception e) { new NodeMappingRefreshAction(transportService, metaDataMappingService), repositoriesService, mock(SearchService.class), - new SyncedFlushService(indicesService, clusterService, transportService, indexNameExpressionResolver), new PeerRecoverySourceService(transportService, indicesService, recoverySettings), snapshotShardsService, new PrimaryReplicaSyncer( diff --git a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java index 0aa2c88142945..4654918d898f5 100644 --- a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java @@ -126,7 +126,7 @@ public void testProxyStrategyWillOpenNewConnectionsOnDisconnect() throws Excepti try (RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy(clusterAlias, localService, remoteConnectionManager, - numOfConnections, address1.toString(), alternatingResolver(address1, address2, useAddress1), false)) { + numOfConnections, address1.toString(), alternatingResolver(address1, address2, useAddress1), null)) { assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address2))); @@ -206,7 +206,7 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro try (RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy(clusterAlias, localService, remoteConnectionManager, - numOfConnections, address1.toString(), alternatingResolver(address1, address2, useAddress1), false)) { + numOfConnections, address1.toString(), alternatingResolver(address1, address2, useAddress1), null)) { assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address2))); @@ -255,7 +255,7 @@ public void testProxyStrategyWillResolveAddressesEachConnect() throws Exception int numOfConnections = randomIntBetween(4, 8); try (RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy(clusterAlias, localService, remoteConnectionManager, - numOfConnections, address.toString(), addressSupplier, false)) { + numOfConnections, address.toString(), addressSupplier, null)) { PlainActionFuture connectFuture = PlainActionFuture.newFuture(); strategy.connect(connectFuture); connectFuture.actionGet(); @@ -357,13 +357,13 @@ public void testServerNameAttributes() { localService.start(); localService.acceptIncomingRequests(); - String serverName = "localhost:" + address1.getPort(); + String address = "localhost:" + address1.getPort(); ConnectionManager connectionManager = new ConnectionManager(profile, localService.transport); int numOfConnections = randomIntBetween(4, 8); try (RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy(clusterAlias, localService, remoteConnectionManager, - numOfConnections, serverName, true)) { + numOfConnections, address, "localhost")) { assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); PlainActionFuture connectFuture = PlainActionFuture.newFuture(); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 195e979c56dc5..8e061a06ec823 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -56,7 +56,6 @@ import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.test.transport.StubbableConnectionManager; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -84,7 +83,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; public class RemoteClusterConnectionTests extends ESTestCase { @@ -546,51 +544,24 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted public void testGetConnection() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); - MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { + MockTransportService disconnectedTransport = startTransport("disconnected_node", knownNodes, Version.CURRENT)) { - DiscoveryNode connectedNode = seedTransport.getLocalDiscoNode(); - assertThat(connectedNode, notNullValue()); - knownNodes.add(connectedNode); + DiscoveryNode seedNode = seedTransport.getLocalNode(); + knownNodes.add(seedNode); - DiscoveryNode disconnectedNode = discoverableTransport.getLocalDiscoNode(); - assertThat(disconnectedNode, notNullValue()); - knownNodes.add(disconnectedNode); + DiscoveryNode disconnectedNode = disconnectedTransport.getLocalNode(); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { - Transport.Connection seedConnection = new CloseableConnection() { - @Override - public DiscoveryNode getNode() { - return connectedNode; - } - - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws TransportException { - // no-op - } - }; - - ConnectionManager delegate = new ConnectionManager(Settings.EMPTY, service.transport); - StubbableConnectionManager connectionManager = new StubbableConnectionManager(delegate, Settings.EMPTY, service.transport); - - connectionManager.setDefaultNodeConnectedBehavior((cm, node) -> connectedNode.equals(node)); - - connectionManager.addGetConnectionBehavior(connectedNode.getAddress(), (cm, discoveryNode) -> seedConnection); - - connectionManager.addGetConnectionBehavior(disconnectedNode.getAddress(), (cm, discoveryNode) -> { - throw new NodeNotConnectedException(discoveryNode, ""); - }); - service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; - Settings settings = buildRandomSettings(clusterAlias, addresses(connectedNode)); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, connectionManager)) { + Settings settings = buildRandomSettings(clusterAlias, addresses(seedNode)); + try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service)) { PlainActionFuture.get(fut -> connection.ensureConnected(ActionListener.map(fut, x -> null))); for (int i = 0; i < 10; i++) { //always a direct connection as the remote node is already connected - Transport.Connection remoteConnection = connection.getConnection(connectedNode); - assertSame(seedConnection, remoteConnection); + Transport.Connection remoteConnection = connection.getConnection(seedNode); + assertEquals(seedNode, remoteConnection.getNode()); } for (int i = 0; i < 10; i++) { // we don't use the transport service connection manager so we will get a proxy connection for the local node @@ -599,7 +570,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, assertThat(remoteConnection.getNode(), equalTo(service.getLocalNode())); } for (int i = 0; i < 10; i++) { - //always a proxy connection as the target node is not connected + // always a proxy connection as the target node is not connected Transport.Connection remoteConnection = connection.getConnection(disconnectedNode); assertThat(remoteConnection, instanceOf(RemoteConnectionManager.ProxyConnection.class)); assertThat(remoteConnection.getNode(), sameInstance(disconnectedNode)); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index df9638d17835a..5f034dea82de0 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; import org.elasticsearch.cluster.coordination.LinearizabilityChecker.History; import org.elasticsearch.cluster.coordination.LinearizabilityChecker.SequentialSpec; -import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -57,14 +56,15 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.SeedHostsProvider; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.ClusterStateUpdaters; import org.elasticsearch.gateway.GatewayService; -import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.MockGatewayMetaState; +import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.disruption.DisruptableMockTransport; @@ -741,17 +741,17 @@ class MockPersistedState implements CoordinationState.PersistedState { try { if (oldState.nodeEnvironment != null) { nodeEnvironment = oldState.nodeEnvironment; - final MetaStateService metaStateService = new MetaStateService(nodeEnvironment, xContentRegistry()); final MetaData updatedMetaData = adaptGlobalMetaData.apply(oldState.getLastAcceptedState().metaData()); - if (updatedMetaData != oldState.getLastAcceptedState().metaData()) { - metaStateService.writeGlobalStateAndUpdateManifest("update global state", updatedMetaData); - } final long updatedTerm = adaptCurrentTerm.apply(oldState.getCurrentTerm()); - if (updatedTerm != oldState.getCurrentTerm()) { - final Manifest manifest = metaStateService.loadManifestOrEmpty(); - metaStateService.writeManifestAndCleanup("update term", - new Manifest(updatedTerm, manifest.getClusterStateVersion(), manifest.getGlobalGeneration(), - manifest.getIndexGenerations())); + if (updatedMetaData != oldState.getLastAcceptedState().metaData() || updatedTerm != oldState.getCurrentTerm()) { + try (PersistedClusterStateService.Writer writer = + new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + deterministicTaskQueue::getCurrentTimeMillis) + .createWriter()) { + writer.writeFullStateAndCommit(updatedTerm, + ClusterState.builder(oldState.getLastAcceptedState()).metaData(updatedMetaData).build()); + } } final MockGatewayMetaState gatewayMetaState = new MockGatewayMetaState(newLocalNode); gatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry()); @@ -854,6 +854,11 @@ public void setLastAcceptedState(ClusterState clusterState) { @Override public void close() { assertTrue(openPersistedStates.remove(this)); + try { + delegate.close(); + } catch (IOException e) { + throw new AssertionError("unexpected", e); + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java index b73a90b428485..e224c2bb40603 100644 --- a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java +++ b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java @@ -20,17 +20,23 @@ package org.elasticsearch.gateway; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.plugins.MetaDataUpgrader; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -48,9 +54,10 @@ public MockGatewayMetaState(DiscoveryNode localNode) { } @Override - void upgradeMetaData(Settings settings, MetaStateService metaStateService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, - MetaDataUpgrader metaDataUpgrader) { + MetaData upgradeMetaDataForNode(MetaData metaData, MetaDataIndexUpgradeService metaDataIndexUpgradeService, + MetaDataUpgrader metaDataUpgrader) { // MetaData upgrade is tested in GatewayMetaStateTests, we override this method to NOP to make mocking easier + return metaData; } @Override @@ -65,7 +72,14 @@ public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXCont final ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()) .thenReturn(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - start(settings, transportService, clusterService, new MetaStateService(nodeEnvironment, xContentRegistry), - null, null); + final MetaStateService metaStateService = mock(MetaStateService.class); + try { + when(metaStateService.loadFullState()).thenReturn(new Tuple<>(Manifest.empty(), MetaData.builder().build())); + } catch (IOException e) { + throw new AssertionError(e); + } + start(settings, transportService, clusterService, metaStateService, + null, null, new PersistedClusterStateService(nodeEnvironment, xContentRegistry, BigArrays.NON_RECYCLING_INSTANCE, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L)); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java index f0118d3c0b699..ecce14dfd460c 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java @@ -88,7 +88,7 @@ public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId ind @Override public void getRepositoryData(ActionListener listener) { final IndexId indexId = new IndexId(indexName, "blah"); - listener.onResponse(new RepositoryData(EMPTY_REPO_GEN, Collections.emptyMap(), Collections.emptyMap(), + listener.onResponse(new RepositoryData(EMPTY_REPO_GEN, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.singletonMap(indexId, emptySet()), ShardGenerations.EMPTY)); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 2fd1bf450f51c..ba06640b41342 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -189,7 +189,7 @@ public void testUnknownObjectException() throws IOException { if (expectedException == false) { throw new AssertionError("unexpected exception when parsing query:\n" + testQuery, e); } - assertThat(e.getMessage(), containsString("unknown field [newField], parser not found")); + assertThat(e.getMessage(), containsString("unknown field [newField]")); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 236779306baba..038efa4d4159f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -164,7 +164,6 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.elasticsearch.client.Requests.syncedFlushRequest; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; @@ -1401,13 +1400,8 @@ private void postIndexAsyncActions(String[] indices, List inFlig client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute( new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } else if (maybeFlush && rarely()) { - if (randomBoolean()) { - client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute( - new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); - } else { - client().admin().indices().syncedFlush(syncedFlushRequest(indices).indicesOptions(IndicesOptions.lenientExpandOpen()), - new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); - } + client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute( + new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } else if (rarely()) { client().admin().indices().prepareForceMerge(indices) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index fe9cc9f8449f6..7e325b61db9b7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -69,6 +69,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.internal.io.IOUtils; @@ -78,7 +79,6 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineTestCase; @@ -115,7 +115,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -470,6 +469,8 @@ private static Settings getRandomNodeSettings(long seed) { if (random.nextBoolean()) { builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), timeValueSeconds(RandomNumbers.randomIntBetween(random, 10, 30)).getStringRep()); + builder.put(MappingUpdatedAction.INDICES_MAX_IN_FLIGHT_UPDATES_SETTING.getKey(), + RandomNumbers.randomIntBetween(random, 1, 10)); } // turning on the real memory circuit breaker leads to spurious test failures. As have no full control over heap usage, we @@ -1141,40 +1142,10 @@ public void beforeIndexDeletion() throws Exception { // and not all docs have been purged after the test) and inherit from // ElasticsearchIntegrationTest must override beforeIndexDeletion() to avoid failures. assertNoPendingIndexOperations(); - //check that shards that have same sync id also contain same number of documents - assertSameSyncIdSameDocs(); assertOpenTranslogReferences(); assertNoSnapshottedIndexCommit(); } - private void assertSameSyncIdSameDocs() { - Map docsOnShards = new HashMap<>(); - final Collection nodesAndClients = nodes.values(); - for (NodeAndClient nodeAndClient : nodesAndClients) { - IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); - for (IndexService indexService : indexServices) { - for (IndexShard indexShard : indexService) { - try { - CommitStats commitStats = indexShard.commitStats(); - String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID); - if (syncId != null) { - long liveDocsOnShard = commitStats.getNumDocs(); - if (docsOnShards.get(syncId) != null) { - assertThat("sync id is equal but number of docs does not match on node " - + nodeAndClient.name + ". expected " + docsOnShards.get(syncId) + " but got " - + liveDocsOnShard, docsOnShards.get(syncId), equalTo(liveDocsOnShard)); - } else { - docsOnShards.put(syncId, liveDocsOnShard); - } - } - } catch (AlreadyClosedException e) { - // the engine is closed or if the shard is recovering - } - } - } - } - } - private void assertNoPendingIndexOperations() throws Exception { assertBusy(() -> { for (NodeAndClient nodeAndClient : nodes.values()) { @@ -1548,7 +1519,9 @@ private synchronized void startAndPublishNodesAndClients(List nod } catch (InterruptedException e) { throw new AssertionError("interrupted while starting nodes", e); } catch (ExecutionException e) { - throw new RuntimeException("failed to start nodes", e); + RuntimeException re = FutureUtils.rethrowExecutionException(e); + re.addSuppressed(new RuntimeException("failed to start nodes")); + throw re; } nodeAndClients.forEach(this::publishNode); diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java index 61b4349ba3fe3..d9fe62b8f478c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java @@ -32,6 +32,8 @@ import java.util.Random; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -56,11 +58,23 @@ public class LongGCDisruption extends SingleNodeDisruption { private Set suspendedThreads; private Thread blockDetectionThread; + private final AtomicBoolean sawSlowSuspendBug = new AtomicBoolean(false); + public LongGCDisruption(Random random, String disruptedNode) { super(random); this.disruptedNode = disruptedNode; } + /** + * Checks if during disruption we ran into a known JVM issue that makes {@link Thread#suspend()} calls block for multiple seconds + * was observed. + * @see JDK-8218446 + * @return true if during thread suspending a call to {@link Thread#suspend()} took more than 3s + */ + public boolean sawSlowSuspendBug() { + return sawSlowSuspendBug.get(); + } + @Override public synchronized void startDisrupting() { if (suspendedThreads == null) { @@ -251,7 +265,11 @@ protected boolean suspendThreads(Set nodeThreads) { * assuming that it is safe. */ boolean definitelySafe = true; + final long startTime = System.nanoTime(); thread.suspend(); + if (System.nanoTime() - startTime > TimeUnit.SECONDS.toNanos(3L)) { + sawSlowSuspendBug.set(true); + } // double check the thread is not in a shared resource like logging; if so, let it go and come back safe: for (StackTraceElement stackElement : thread.getStackTrace()) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index a1968332c734b..6ec594a0fdafa 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -1212,4 +1212,61 @@ protected static Version minimumNodeVersion() throws IOException { assertNotNull(minVersion); return minVersion; } + + protected void syncedFlush(String indexName) throws Exception { + final List deprecationMessages = List.of( + "Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead."); + final List transitionMessages = List.of( + "Synced flush was removed and a normal flush was performed instead. This transition will be removed in a future version."); + final WarningsHandler warningsHandler; + if (minimumNodeVersion().onOrAfter(Version.V_8_0_0)) { + warningsHandler = warnings -> warnings.equals(transitionMessages) == false; + } else if (minimumNodeVersion().onOrAfter(Version.V_7_6_0)) { + warningsHandler = warnings -> warnings.equals(deprecationMessages) == false && warnings.equals(transitionMessages) == false; + } else if (nodeVersions.stream().anyMatch(n -> n.onOrAfter(Version.V_8_0_0))) { + warningsHandler = warnings -> warnings.isEmpty() == false && warnings.equals(transitionMessages) == false; + } else { + warningsHandler = warnings -> warnings.isEmpty() == false; + } + // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. + // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. + assertBusy(() -> { + try { + final Request request = new Request("POST", indexName + "/_flush/synced"); + request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warningsHandler)); + Response resp = client().performRequest(request); + if (nodeVersions.stream().allMatch(v -> v.before(Version.V_8_0_0))) { + Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); + assertThat(result.get("failed"), equalTo(0)); + } + } catch (ResponseException ex) { + if (ex.getResponse().getStatusLine().getStatusCode() == RestStatus.CONFLICT.getStatus() + && ex.getResponse().getWarnings().equals(transitionMessages)) { + logger.info("a normal flush was performed instead"); + } else { + throw new AssertionError(ex); // cause assert busy to retry + } + } + }); + // ensure the global checkpoint is synced; otherwise we might trim the commit with syncId + ensureGlobalCheckpointSynced(indexName); + } + + @SuppressWarnings("unchecked") + private void ensureGlobalCheckpointSynced(String index) throws Exception { + assertBusy(() -> { + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + List> shardStats = (List>) XContentMapValues.extractValue("indices." + index + ".shards.0", stats); + shardStats.stream() + .map(shard -> (Map) XContentMapValues.extractValue("seq_no", shard)) + .filter(Objects::nonNull) + .forEach(seqNoStat -> { + long globalCheckpoint = ((Number) XContentMapValues.extractValue("global_checkpoint", seqNoStat)).longValue(); + long localCheckpoint = ((Number) XContentMapValues.extractValue("local_checkpoint", seqNoStat)).longValue(); + long maxSeqNo = ((Number) XContentMapValues.extractValue("max_seq_no", seqNoStat)).longValue(); + assertThat(shardStats.toString(), localCheckpoint, equalTo(maxSeqNo)); + assertThat(shardStats.toString(), globalCheckpoint, equalTo(maxSeqNo)); + }); + }, 60, TimeUnit.SECONDS); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 109a725672098..770cae55bcfc7 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -2357,7 +2357,7 @@ public String executor() { TransportStats transportStats = serviceC.transport.getStats(); // we did a single round-trip to do the initial handshake assertEquals(1, transportStats.getRxCount()); assertEquals(1, transportStats.getTxCount()); - assertEquals(138, transportStats.getRxSize().getBytes()); + assertEquals(25, transportStats.getRxSize().getBytes()); assertEquals(51, transportStats.getTxSize().getBytes()); }); serviceC.sendRequest(connection, "internal:action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, @@ -2367,7 +2367,7 @@ public String executor() { TransportStats transportStats = serviceC.transport.getStats(); // request has ben send assertEquals(1, transportStats.getRxCount()); assertEquals(2, transportStats.getTxCount()); - assertEquals(138, transportStats.getRxSize().getBytes()); + assertEquals(25, transportStats.getRxSize().getBytes()); assertEquals(111, transportStats.getTxSize().getBytes()); }); sendResponseLatch.countDown(); @@ -2375,7 +2375,7 @@ public String executor() { stats = serviceC.transport.getStats(); // response has been received assertEquals(2, stats.getRxCount()); assertEquals(2, stats.getTxCount()); - assertEquals(163, stats.getRxSize().getBytes()); + assertEquals(50, stats.getRxSize().getBytes()); assertEquals(111, stats.getTxSize().getBytes()); } finally { serviceC.close(); @@ -2472,7 +2472,7 @@ public String executor() { TransportStats transportStats = serviceC.transport.getStats(); // request has been sent assertEquals(1, transportStats.getRxCount()); assertEquals(1, transportStats.getTxCount()); - assertEquals(138, transportStats.getRxSize().getBytes()); + assertEquals(25, transportStats.getRxSize().getBytes()); assertEquals(51, transportStats.getTxSize().getBytes()); }); serviceC.sendRequest(connection, "internal:action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, @@ -2482,7 +2482,7 @@ public String executor() { TransportStats transportStats = serviceC.transport.getStats(); // request has been sent assertEquals(1, transportStats.getRxCount()); assertEquals(2, transportStats.getTxCount()); - assertEquals(138, transportStats.getRxSize().getBytes()); + assertEquals(25, transportStats.getRxSize().getBytes()); assertEquals(111, transportStats.getTxSize().getBytes()); }); sendResponseLatch.countDown(); @@ -2497,7 +2497,7 @@ public String executor() { String failedMessage = "Unexpected read bytes size. The transport exception that was received=" + exception; // 49 bytes are the non-exception message bytes that have been received. It should include the initial // handshake message and the header, version, etc bytes in the exception message. - assertEquals(failedMessage, 166 + streamOutput.bytes().length(), stats.getRxSize().getBytes()); + assertEquals(failedMessage, 53 + streamOutput.bytes().length(), stats.getRxSize().getBytes()); assertEquals(111, stats.getTxSize().getBytes()); } finally { serviceC.close(); diff --git a/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java b/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java index 2021c1d7fab08..992b9617fb9d9 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.test.disruption; -import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.Nullable; import org.elasticsearch.test.ESTestCase; @@ -115,8 +114,6 @@ protected long getSuspendingTimeoutInMillis() { * but does keep retrying until all threads can be safely paused */ public void testNotBlockingUnsafeStackTraces() throws Exception { - assumeFalse("https://github.com/elastic/elasticsearch/issues/50047", - JavaVersion.current().equals(JavaVersion.parse("11")) || JavaVersion.current().equals(JavaVersion.parse("12"))); final String nodeName = "test_node"; LongGCDisruption disruption = new LongGCDisruption(random(), nodeName) { @Override @@ -149,7 +146,14 @@ protected Pattern[] getUnsafeClasses() { threads[i].start(); } // make sure some threads are under lock - disruption.startDisrupting(); + try { + disruption.startDisrupting(); + } catch (RuntimeException e) { + if (e.getMessage().contains("suspending node threads took too long") && disruption.sawSlowSuspendBug()) { + return; + } + throw new AssertionError(e); + } long first = ops.get(); assertThat(lockedExecutor.lock.isLocked(), equalTo(false)); // no threads should own the lock Thread.sleep(100); @@ -157,6 +161,7 @@ protected Pattern[] getUnsafeClasses() { disruption.stopDisrupting(); assertBusy(() -> assertThat(ops.get(), greaterThan(first))); } finally { + disruption.stopDisrupting(); stop.set(true); for (final Thread thread : threads) { thread.join(); diff --git a/x-pack/docs/en/security/authentication/oidc-guide.asciidoc b/x-pack/docs/en/security/authentication/oidc-guide.asciidoc index 1e8f4f1c2f50b..b4e647b4a3c97 100644 --- a/x-pack/docs/en/security/authentication/oidc-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/oidc-guide.asciidoc @@ -420,14 +420,14 @@ through either the NOTE: You cannot use <> to grant roles to users authenticating via OpenID Connect. -This is an example of a simple role mapping that grants the `kibana_user` role +This is an example of a simple role mapping that grants the `example_role` role to any user who authenticates against the `oidc1` OpenID Connect realm: [source,console] -------------------------------------------------- -PUT /_security/role_mapping/oidc-kibana +PUT /_security/role_mapping/oidc-example { - "roles": [ "kibana_user" ], + "roles": [ "example_role" ], <1> "enabled": true, "rules": { "field": { "realm.name": "oidc1" } @@ -435,6 +435,10 @@ PUT /_security/role_mapping/oidc-kibana } -------------------------------------------------- +<1> The `example_role` role is *not* a builtin Elasticsearch role. +This example assumes that you have created a custom role of your own, with +appropriate access to your <> and +{kibana-ref}/kibana-privileges.html#kibana-feature-privileges[Kibana features]. The user properties that are mapped via the realm configuration are used to process role mapping rules, and these rules determine which roles a user is granted. diff --git a/x-pack/docs/en/security/authentication/realm-chains.asciidoc b/x-pack/docs/en/security/authentication/realm-chains.asciidoc index 314c9dbe491d0..a7d7166239aac 100644 --- a/x-pack/docs/en/security/authentication/realm-chains.asciidoc +++ b/x-pack/docs/en/security/authentication/realm-chains.asciidoc @@ -34,32 +34,25 @@ The following snippet configures a realm chain that includes the `file` and [source,yaml] ---------------------------------------- -xpack.security.authc: - realms: - - file: - type: file +xpack.security.authc.realms: + file.file1: order: 0 - native: - type: native + native.native1: order: 1 - ldap1: - type: ldap + ldap.ldap1: order: 2 enabled: false url: 'url_to_ldap1' ... - ldap2: - type: ldap + ldap.ldap2: order: 3 url: 'url_to_ldap2' ... - ad1: - type: active_directory + active_directory.ad1: order: 4 url: 'url_to_ad' ---------------------------------------- diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 0776d314dfae5..c100ef161c755 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -639,14 +639,14 @@ through either the NOTE: You cannot use <> to grant roles to users authenticating via SAML. -This is an example of a simple role mapping that grants the `kibana_user` role +This is an example of a simple role mapping that grants the `example_role` role to any user who authenticates against the `saml1` realm: [source,console] -------------------------------------------------- -PUT /_security/role_mapping/saml-kibana +PUT /_security/role_mapping/saml-example { - "roles": [ "kibana_user" ], + "roles": [ "example_role" ], <1> "enabled": true, "rules": { "field": { "realm.name": "saml1" } @@ -654,6 +654,10 @@ PUT /_security/role_mapping/saml-kibana } -------------------------------------------------- +<1> The `example_role` role is *not* a builtin Elasticsearch role. +This example assumes that you have created a custom role of your own, with +appropriate access to your <> and +{kibana-ref}/kibana-privileges.html#kibana-feature-privileges[Kibana features]. The attributes that are mapped via the realm configuration are used to process role mapping rules, and these rules determine which roles a user is granted. diff --git a/x-pack/docs/en/security/authorization/built-in-roles.asciidoc b/x-pack/docs/en/security/authorization/built-in-roles.asciidoc index 55d12709124f4..60043e62f94ed 100644 --- a/x-pack/docs/en/security/authorization/built-in-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/built-in-roles.asciidoc @@ -72,10 +72,12 @@ NOTE: This role does *not* provide the ability to create indices; those privileg must be defined in a separate role. [[built-in-roles-kibana-dashboard]] `kibana_dashboard_only_user` :: -Grants access to the {kib} Dashboard and read-only permissions to Kibana. -This role does not have access to editing tools in {kib}. For more -information, see -{kibana-ref}/xpack-dashboard-only-mode.html[{kib} Dashboard Only Mode]. +(This role is deprecated, please use +{kibana-ref}/kibana-privileges.html#kibana-feature-privileges[{kib} feature privileges] +instead). +Grants read-only access to the {kib} Dashboard in every +{kibana-ref}/xpack-spaces.html[space in {kib}]. +This role does not have access to editing tools in {kib}. [[built-in-roles-kibana-system]] `kibana_system` :: Grants access necessary for the {kib} system user to read from and write to the @@ -87,9 +89,15 @@ see {kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]. NOTE: This role should not be assigned to users as the granted permissions may change between releases. +[[built-in-roles-kibana-admin]] `kibana_admin`:: +Grants access to all features in {kib}. For more information on {kib} authorization, +see {kibana-ref}/xpack-security-authorization.html[Kibana authorization]. + [[built-in-roles-kibana-user]] `kibana_user`:: -Grants access to all features in {kib}. For more information on Kibana authorization, -see {kibana-ref}/xpack-security-authorization.html[Kibana Authorization]. +(This role is deprecated, please use the +<> role instead.) +Grants access to all features in {kib}. For more information on {kib} authorization, +see {kibana-ref}/xpack-security-authorization.html[Kibana authorization]. [[built-in-roles-logstash-admin]] `logstash_admin` :: Grants access to the `.logstash*` indices for managing configurations. @@ -127,7 +135,8 @@ Grants the minimum privileges required for any user of {monitoring} other than t required to use {kib}. This role grants access to the monitoring indices and grants privileges necessary for reading basic cluster information. This role also includes all {kibana-ref}/kibana-privileges.html[Kibana privileges] for the {stack-monitor-features}. -Monitoring users should also be assigned the `kibana_user` role. +Monitoring users should also be assigned the `kibana_admin` role, or another role +with {kibana-ref}/xpack-security-authorization.html[access to the {kib} instance]. [[built-in-roles-remote-monitoring-agent]] `remote_monitoring_agent`:: Grants the minimum privileges required to write data into the monitoring indices @@ -140,9 +149,10 @@ Grants the minimum privileges required to collect monitoring data for the {stack [[built-in-roles-reporting-user]] `reporting_user`:: Grants the specific privileges required for users of {reporting} other than those required to use {kib}. This role grants access to the reporting indices; each -user has access to only their own reports. Reporting users should also be -assigned the `kibana_user` role and a role that grants them access to the data -that will be used to generate reports. +user has access to only their own reports. +Reporting users should also be assigned additional roles that grant +{kibana-ref}/xpack-security-authorization.html[access to {kib}] as well as read +access to the <> that will be used to generate reports. [[built-in-roles-snapshot-user]] `snapshot_user`:: Grants the necessary privileges to create snapshots of **all** the indices and diff --git a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster-kibana.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster-kibana.asciidoc index 95e5d188f0084..f9bcd86889c17 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster-kibana.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster-kibana.asciidoc @@ -31,8 +31,9 @@ NOTE: If you configure the local cluster as another remote in {es}, the `logstash_reader` role on your local cluster also needs to grant the `read_cross_cluster` privilege. -. Assign your {kib} users the `kibana_user` role and your `logstash_reader` -role. +. Assign your {kib} users a role that grants +{kibana-ref}/xpack-security-authorization.html[access to {kib}] +as well as your `logstash_reader` role. . On the remote cluster, create a `logstash_reader` role that grants the `read_cross_cluster` privilege and `read` and `view_index_metadata` privileges diff --git a/x-pack/docs/en/security/get-started-security.asciidoc b/x-pack/docs/en/security/get-started-security.asciidoc index dca72e6e1d4ba..82f443016021f 100644 --- a/x-pack/docs/en/security/get-started-security.asciidoc +++ b/x-pack/docs/en/security/get-started-security.asciidoc @@ -168,15 +168,16 @@ Select a role to see more information about its privileges. For example, select the `kibana_system` role to see its list of cluster and index privileges. To learn more, see <>. -Let's assign the `kibana_user` role to your user. Go back to the -*Management / Security / Users* page and select your user. Add the `kibana_user` +Let's assign the `kibana_admin` role to your user. Go back to the +*Management / Security / Users* page and select your user. Add the `kibana_admin` role and save the change. For example: [role="screenshot"] image::security/images/assign-role.jpg["Assigning a role to a user in Kibana"] -This user now has access to all features in {kib}. For more information about granting -access to Kibana see {kibana-ref}/xpack-security-authorization.html[Kibana Authorization]. +This user now has administrative access to all features in {kib}. +For more information about granting access to Kibana see +{kibana-ref}/xpack-security-authorization.html[Kibana authorization]. If you completed all of the steps in {stack-gs}/get-started-elastic-stack.html[Getting started with the {stack}], you should diff --git a/x-pack/docs/en/watcher/actions.asciidoc b/x-pack/docs/en/watcher/actions.asciidoc index e140e622d304c..6e0a8c6efb377 100644 --- a/x-pack/docs/en/watcher/actions.asciidoc +++ b/x-pack/docs/en/watcher/actions.asciidoc @@ -69,14 +69,14 @@ PUT _watcher/watch/error_logs_alert } }, "condition" : { - "compare" : { "ctx.payload.hits.total.value" : { "gt" : 5 }} + "compare" : { "ctx.payload.hits.total" : { "gt" : 5 }} }, "actions" : { "email_administrator" : { "throttle_period": "15m", <1> "email" : { <2> "to" : "sys.admino@host.domain", - "subject" : "Encountered {{ctx.payload.hits.total.value}} errors", + "subject" : "Encountered {{ctx.payload.hits.total}} errors", "body" : "Too many error in the system, see attached data", "attachments" : { "attached_data" : { @@ -119,14 +119,14 @@ PUT _watcher/watch/log_event_watch } }, "condition" : { - "compare" : { "ctx.payload.hits.total.value" : { "gt" : 5 }} + "compare" : { "ctx.payload.hits.total" : { "gt" : 5 }} }, "throttle_period" : "15m", <1> "actions" : { "email_administrator" : { "email" : { "to" : "sys.admino@host.domain", - "subject" : "Encountered {{ctx.payload.hits.total.value}} errors", + "subject" : "Encountered {{ctx.payload.hits.total}} errors", "body" : "Too many error in the system, see attached data", "attachments" : { "attached_data" : { @@ -144,7 +144,7 @@ PUT _watcher/watch/log_event_watch "host" : "pager.service.domain", "port" : 1234, "path" : "/{{watch_id}}", - "body" : "Encountered {{ctx.payload.hits.total.value}} errors" + "body" : "Encountered {{ctx.payload.hits.total}} errors" } } } @@ -265,13 +265,13 @@ PUT _watcher/watch/log_event_watch } }, "condition" : { - "compare" : { "ctx.payload.hits.total.value" : { "gt" : 0 } } + "compare" : { "ctx.payload.hits.total" : { "gt" : 0 } } }, "actions" : { "email_administrator" : { "email" : { "to" : "sys.admino@host.domain", - "subject" : "Encountered {{ctx.payload.hits.total.value}} errors", + "subject" : "Encountered {{ctx.payload.hits.total}} errors", "body" : "Too many error in the system, see attached data", "attachments" : { "attached_data" : { @@ -285,14 +285,14 @@ PUT _watcher/watch/log_event_watch }, "notify_pager" : { "condition": { <1> - "compare" : { "ctx.payload.hits.total.value" : { "gt" : 5 } } + "compare" : { "ctx.payload.hits.total" : { "gt" : 5 } } }, "webhook" : { "method" : "POST", "host" : "pager.service.domain", "port" : 1234, "path" : "/{{watch_id}}", - "body" : "Encountered {{ctx.payload.hits.total.value}} errors" + "body" : "Encountered {{ctx.payload.hits.total}} errors" } } } diff --git a/x-pack/docs/en/watcher/actions/email.asciidoc b/x-pack/docs/en/watcher/actions/email.asciidoc index adda53b810d15..373904af0669b 100644 --- a/x-pack/docs/en/watcher/actions/email.asciidoc +++ b/x-pack/docs/en/watcher/actions/email.asciidoc @@ -31,7 +31,7 @@ the watch payload in the email body: "email" : { <2> "to" : "username@example.org", <3> "subject" : "Watcher Notification", <4> - "body" : "{{ctx.payload.hits.total.value}} error logs found" <5> + "body" : "{{ctx.payload.hits.total}} error logs found" <5> } } } diff --git a/x-pack/docs/en/watcher/condition/compare.asciidoc b/x-pack/docs/en/watcher/condition/compare.asciidoc index f6fd06a11beec..466cbe05ef659 100644 --- a/x-pack/docs/en/watcher/condition/compare.asciidoc +++ b/x-pack/docs/en/watcher/condition/compare.asciidoc @@ -44,7 +44,7 @@ condition returns `true` if the number of the total hits in the { "condition" : { "compare" : { - "ctx.payload.hits.total.value" : { <1> + "ctx.payload.hits.total" : { <1> "gte" : 5 <2> } } diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityAggregatorTests.java index 98f5600b245e7..e5b4b2ce0bf07 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityAggregatorTests.java @@ -131,7 +131,7 @@ public void testParentValidations() throws IOException { // Date Histogram aggBuilders.clear(); aggBuilders.add(new CumulativeCardinalityPipelineAggregationBuilder("cumulative_card", "sum")); - parent = new DateHistogramAggregatorFactory("name", valuesSource, 0L, + parent = new DateHistogramAggregatorFactory("name", valuesSource, mock(InternalOrder.class), false, 0L, mock(Rounding.class), mock(Rounding.class), mock(ExtendedBounds.class), mock(QueryShardContext.class), mock(AggregatorFactory.class), new AggregatorFactories.Builder(), Collections.emptyMap()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 20c098ee7f82a..19ee64290133c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -13,6 +13,7 @@ import org.apache.lucene.index.IndexCommit; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -231,6 +232,7 @@ public void getRepositoryData(ActionListener listener) { Map copiedSnapshotIds = new HashMap<>(); Map snapshotStates = new HashMap<>(copiedSnapshotIds.size()); + Map snapshotVersions = new HashMap<>(copiedSnapshotIds.size()); Map> indexSnapshots = new HashMap<>(copiedSnapshotIds.size()); ImmutableOpenMap remoteIndices = remoteMetaData.getIndices(); @@ -239,10 +241,11 @@ public void getRepositoryData(ActionListener listener) { SnapshotId snapshotId = new SnapshotId(LATEST, LATEST); copiedSnapshotIds.put(indexName, snapshotId); snapshotStates.put(indexName, SnapshotState.SUCCESS); + snapshotVersions.put(indexName, Version.CURRENT); Index index = remoteIndices.get(indexName).getIndex(); indexSnapshots.put(new IndexId(indexName, index.getUUID()), Collections.singleton(snapshotId)); } - return new RepositoryData(1, copiedSnapshotIds, snapshotStates, indexSnapshots, ShardGenerations.EMPTY); + return new RepositoryData(1, copiedSnapshotIds, snapshotStates, snapshotVersions, indexSnapshots, ShardGenerations.EMPTY); }); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java index 004c9ff987764..210beaaedecaf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java @@ -113,13 +113,19 @@ static boolean isBasic(String typeName) { static boolean isTrial(String typeName) { return TRIAL.getTypeName().equals(typeName); } + + static boolean isEnterprise(String typeName) { + return ENTERPRISE.getTypeName().equals(typeName); + } + } public static final int VERSION_START = 1; public static final int VERSION_NO_FEATURE_TYPE = 2; public static final int VERSION_START_DATE = 3; public static final int VERSION_CRYPTO_ALGORITHMS = 4; - public static final int VERSION_CURRENT = VERSION_CRYPTO_ALGORITHMS; + public static final int VERSION_ENTERPRISE = 5; + public static final int VERSION_CURRENT = VERSION_ENTERPRISE; /** * XContent param name to deserialize license(s) with @@ -153,13 +159,14 @@ static boolean isTrial(String typeName) { private final long expiryDate; private final long startDate; private final int maxNodes; + private final int maxResourceUnits; private final OperationMode operationMode; /** * Decouples operation mode of a license from the license type value. *

    * Note: The mode indicates features that should be made available, but it does not indicate whether the license is active! - * + *

    * The id byte is used for ordering operation modes */ public enum OperationMode { @@ -176,13 +183,16 @@ public enum OperationMode { this.id = id; } - /** Returns non-zero positive number when opMode1 is greater than opMode2 */ + /** + * Returns non-zero positive number when opMode1 is greater than opMode2 + */ public static int compare(OperationMode opMode1, OperationMode opMode2) { return Integer.compare(opMode1.id, opMode2.id); } /** * Determine the operating mode for a license type + * * @see LicenseType#resolve(License) * @see #parse(String) */ @@ -211,6 +221,7 @@ public static OperationMode resolve(LicenseType type) { * Parses an {@code OperatingMode} from a String. * The string must name an operating mode, and not a licensing level (that is, it cannot parse old style license levels * such as "dev" or "silver"). + * * @see #description() */ public static OperationMode parse(String mode) { @@ -227,8 +238,8 @@ public String description() { } } - private License(int version, String uid, String issuer, String issuedTo, long issueDate, String type, - String subscriptionType, String feature, String signature, long expiryDate, int maxNodes, long startDate) { + private License(int version, String uid, String issuer, String issuedTo, long issueDate, String type, String subscriptionType, + String feature, String signature, long expiryDate, int maxNodes, int maxResourceUnits, long startDate) { this.version = version; this.uid = uid; this.issuer = issuer; @@ -246,6 +257,7 @@ private License(int version, String uid, String issuer, String issuedTo, long is this.expiryDate = expiryDate; } this.maxNodes = maxNodes; + this.maxResourceUnits = maxResourceUnits; this.startDate = startDate; this.operationMode = OperationMode.resolve(LicenseType.resolve(this)); validate(); @@ -294,12 +306,21 @@ public long expiryDate() { } /** - * @return the maximum number of nodes this license has been issued for + * @return the maximum number of nodes this license has been issued for, or {@code -1} if this license is not node based. */ public int maxNodes() { return maxNodes; } + /** + * @return the maximum number of "resource units" this license has been issued for, or {@code -1} if this license is not resource based. + * A "resource unit" is a measure of computing power (RAM/CPU), the definition of which is maintained outside of the license format, + * or this class. + */ + public int maxResourceUnits() { + return maxResourceUnits; + } + /** * @return a string representing the entity this licenses has been issued to */ @@ -386,20 +407,39 @@ private void validate() { throw new IllegalStateException("uid can not be null"); } else if (feature == null && version == VERSION_START) { throw new IllegalStateException("feature can not be null"); - } else if (maxNodes == -1) { - throw new IllegalStateException("maxNodes has to be set"); } else if (expiryDate == -1) { throw new IllegalStateException("expiryDate has to be set"); } else if (expiryDate == LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS && LicenseType.isBasic(type) == false) { throw new IllegalStateException("only basic licenses are allowed to have no expiration"); } + + if (LicenseType.isEnterprise(type) && version < VERSION_ENTERPRISE) { + throw new IllegalStateException("license type [" + type + "] is not a valid for version [" + version + "] licenses"); + } + validateLimits(type, maxNodes, maxResourceUnits); + } + + private static void validateLimits(String type, int maxNodes, int maxResourceUnits) { + if (LicenseType.isEnterprise(type)) { + if (maxResourceUnits == -1) { + throw new IllegalStateException("maxResourceUnits must be set for enterprise licenses (type=[" + type + "])"); + } else if (maxNodes != -1) { + throw new IllegalStateException("maxNodes may not be set for enterprise licenses (type=[" + type + "])"); + } + } else { + if (maxNodes == -1) { + throw new IllegalStateException("maxNodes has to be set"); + } else if (maxResourceUnits != -1) { + throw new IllegalStateException("maxResourceUnits may only be set for enterprise licenses (not permitted for type=[" + + type + "])"); + } + } } public static License readLicense(StreamInput in) throws IOException { int version = in.readVInt(); // Version for future extensibility if (version > VERSION_CURRENT) { - throw new ElasticsearchException("Unknown license version found, please upgrade all nodes to the latest elasticsearch-license" + - " plugin"); + throw new ElasticsearchException("Unknown license version found, please upgrade all nodes to the latest elasticsearch release"); } Builder builder = builder(); builder.version(version); @@ -414,6 +454,9 @@ public static License readLicense(StreamInput in) throws IOException { } builder.expiryDate(in.readLong()); builder.maxNodes(in.readInt()); + if (version >= VERSION_ENTERPRISE) { + builder.maxResourceUnits(in.readInt()); + } builder.issuedTo(in.readString()); builder.issuer(in.readString()); builder.signature(in.readOptionalString()); @@ -436,6 +479,9 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeLong(expiryDate); out.writeInt(maxNodes); + if (version >= VERSION_ENTERPRISE) { + out.writeInt(maxResourceUnits); + } out.writeString(issuedTo); out.writeString(issuer); out.writeOptionalString(signature); @@ -496,7 +542,14 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t if (expiryDate != LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS) { builder.timeField(Fields.EXPIRY_DATE_IN_MILLIS, Fields.EXPIRY_DATE, expiryDate); } - builder.field(Fields.MAX_NODES, maxNodes); + + if (version >= VERSION_ENTERPRISE) { + builder.field(Fields.MAX_NODES, maxNodes == -1 ? null : maxNodes); + builder.field(Fields.MAX_RESOURCE_UNITS, maxResourceUnits == -1 ? null : maxResourceUnits); + } else { + builder.field(Fields.MAX_NODES, maxNodes); + } + builder.field(Fields.ISSUED_TO, issuedTo); builder.field(Fields.ISSUER, issuer); if (!licenseSpecMode && !restViewMode && signature != null) { @@ -541,6 +594,8 @@ public static License fromXContent(XContentParser parser) throws IOException { builder.startDate(parser.longValue()); } else if (Fields.MAX_NODES.equals(currentFieldName)) { builder.maxNodes(parser.intValue()); + } else if (Fields.MAX_RESOURCE_UNITS.equals(currentFieldName)) { + builder.maxResourceUnits(parser.intValue()); } else if (Fields.ISSUED_TO.equals(currentFieldName)) { builder.issuedTo(parser.text()); } else if (Fields.ISSUER.equals(currentFieldName)) { @@ -583,7 +638,7 @@ public static License fromXContent(XContentParser parser) throws IOException { throw new ElasticsearchException("malformed signature for license [" + builder.uid + "]"); } else if (version > VERSION_CURRENT) { throw new ElasticsearchException("Unknown license version found, please upgrade all nodes to the latest " + - "elasticsearch-license plugin"); + "elasticsearch-license plugin"); } // signature version is the source of truth builder.version(version); @@ -615,8 +670,7 @@ public static License fromSource(BytesReference bytes, XContentType xContentType // EMPTY is safe here because we don't call namedObject try (InputStream byteStream = bytes.streamInput(); XContentParser parser = xContentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, byteStream)) - { + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, byteStream)) { License license = null; if (parser.nextToken() == XContentParser.Token.START_OBJECT) { if (parser.nextToken() == XContentParser.Token.FIELD_NAME) { @@ -665,7 +719,7 @@ public boolean equals(Object o) { if (issueDate != license.issueDate) return false; if (expiryDate != license.expiryDate) return false; - if (startDate!= license.startDate) return false; + if (startDate != license.startDate) return false; if (maxNodes != license.maxNodes) return false; if (version != license.version) return false; if (uid != null ? !uid.equals(license.uid) : license.uid != null) return false; @@ -690,7 +744,7 @@ public int hashCode() { result = 31 * result + (feature != null ? feature.hashCode() : 0); result = 31 * result + (signature != null ? signature.hashCode() : 0); result = 31 * result + (int) (expiryDate ^ (expiryDate >>> 32)); - result = 31 * result + (int) (startDate ^ (startDate>>> 32)); + result = 31 * result + (int) (startDate ^ (startDate >>> 32)); result = 31 * result + maxNodes; result = 31 * result + version; return result; @@ -709,6 +763,7 @@ public static final class Fields { public static final String START_DATE_IN_MILLIS = "start_date_in_millis"; public static final String START_DATE = "start_date"; public static final String MAX_NODES = "max_nodes"; + public static final String MAX_RESOURCE_UNITS = "max_resource_units"; public static final String ISSUED_TO = "issued_to"; public static final String ISSUER = "issuer"; public static final String VERSION = "version"; @@ -752,6 +807,7 @@ public static class Builder { private long expiryDate = -1; private long startDate = -1; private int maxNodes = -1; + private int maxResourceUnits = -1; public Builder uid(String uid) { this.uid = uid; @@ -807,6 +863,11 @@ public Builder maxNodes(int maxNodes) { return this; } + public Builder maxResourceUnits(int maxUnits) { + this.maxResourceUnits = maxUnits; + return this; + } + public Builder signature(String signature) { if (signature != null) { this.signature = signature; @@ -821,17 +882,18 @@ public Builder startDate(long startDate) { public Builder fromLicenseSpec(License license, String signature) { return uid(license.uid()) - .version(license.version()) - .issuedTo(license.issuedTo()) - .issueDate(license.issueDate()) - .startDate(license.startDate()) - .type(license.type()) - .subscriptionType(license.subscriptionType) - .feature(license.feature) - .maxNodes(license.maxNodes()) - .expiryDate(license.expiryDate()) - .issuer(license.issuer()) - .signature(signature); + .version(license.version()) + .issuedTo(license.issuedTo()) + .issueDate(license.issueDate()) + .startDate(license.startDate()) + .type(license.type()) + .subscriptionType(license.subscriptionType) + .feature(license.feature) + .maxNodes(license.maxNodes()) + .maxResourceUnits(license.maxResourceUnits()) + .expiryDate(license.expiryDate()) + .issuer(license.issuer()) + .signature(signature); } /** @@ -840,15 +902,15 @@ public Builder fromLicenseSpec(License license, String signature) { */ public Builder fromPre20LicenseSpec(License pre20License) { return uid(pre20License.uid()) - .issuedTo(pre20License.issuedTo()) - .issueDate(pre20License.issueDate()) - .maxNodes(pre20License.maxNodes()) - .expiryDate(pre20License.expiryDate()); + .issuedTo(pre20License.issuedTo()) + .issueDate(pre20License.issueDate()) + .maxNodes(pre20License.maxNodes()) + .expiryDate(pre20License.expiryDate()); } public License build() { return new License(version, uid, issuer, issuedTo, issueDate, type, - subscriptionType, feature, signature, expiryDate, maxNodes, startDate); + subscriptionType, feature, signature, expiryDate, maxNodes, maxResourceUnits, startDate); } public Builder validate() { @@ -864,11 +926,10 @@ public Builder validate() { throw new IllegalStateException("uid can not be null"); } else if (signature == null) { throw new IllegalStateException("signature can not be null"); - } else if (maxNodes == -1) { - throw new IllegalStateException("maxNodes has to be set"); } else if (expiryDate == -1) { throw new IllegalStateException("expiryDate has to be set"); } + validateLimits(type, maxNodes, maxResourceUnits); return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index 8168445f8c3ad..515f8462f98aa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -121,6 +121,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste * Max number of nodes licensed by generated trial license */ static final int SELF_GENERATED_LICENSE_MAX_NODES = 1000; + static final int SELF_GENERATED_LICENSE_MAX_RESOURCE_UNITS = SELF_GENERATED_LICENSE_MAX_NODES; public static final String LICENSE_JOB = "licenseJob"; @@ -291,11 +292,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { } private static boolean licenseIsCompatible(License license, Version version) { - if (License.LicenseType.ENTERPRISE.getTypeName().equalsIgnoreCase(license.type())) { - return version.onOrAfter(Version.V_7_6_0); - } else { - return true; - } + final int maxVersion = LicenseUtils.getMaxLicenseVersion(version); + return license.version() <= maxVersion; } private boolean isAllowedLicenseType(License.LicenseType type) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java index dd4cb1ae6a589..66fd005614017 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java @@ -6,6 +6,7 @@ package org.elasticsearch.license; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.license.License.LicenseType; import org.elasticsearch.rest.RestStatus; @@ -46,18 +47,26 @@ public static boolean licenseNeedsExtended(License license) { * recreated with the new key */ public static boolean signatureNeedsUpdate(License license, DiscoveryNodes currentNodes) { - assert License.VERSION_CRYPTO_ALGORITHMS == License.VERSION_CURRENT : "update this method when adding a new version"; + assert License.VERSION_ENTERPRISE == License.VERSION_CURRENT : "update this method when adding a new version"; String typeName = license.type(); return (LicenseType.isBasic(typeName) || LicenseType.isTrial(typeName)) && // only upgrade signature when all nodes are ready to deserialize the new signature (license.version() < License.VERSION_CRYPTO_ALGORITHMS && - compatibleLicenseVersion(currentNodes) == License.VERSION_CRYPTO_ALGORITHMS + compatibleLicenseVersion(currentNodes) >= License.VERSION_CRYPTO_ALGORITHMS ); } public static int compatibleLicenseVersion(DiscoveryNodes currentNodes) { - assert License.VERSION_CRYPTO_ALGORITHMS == License.VERSION_CURRENT : "update this method when adding a new version"; - return License.VERSION_CRYPTO_ALGORITHMS; + return getMaxLicenseVersion(currentNodes.getMinNodeVersion()); + } + + public static int getMaxLicenseVersion(Version version) { + if (version != null && version.before(Version.V_7_6_0)) { + return License.VERSION_CRYPTO_ALGORITHMS; + } else { + assert License.VERSION_ENTERPRISE == License.VERSION_CURRENT : "update this method when adding a new version"; + return License.VERSION_ENTERPRISE; + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java index 98fb6115710e3..c2d23ebf47aa7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -75,10 +75,14 @@ public ClusterState execute(ClusterState currentState) throws Exception { License.Builder specBuilder = License.builder() .uid(UUID.randomUUID().toString()) .issuedTo(clusterName) - .maxNodes(LicenseService.SELF_GENERATED_LICENSE_MAX_NODES) .issueDate(issueDate) .type(request.getType()) .expiryDate(expiryDate); + if (License.LicenseType.isEnterprise(request.getType())) { + specBuilder.maxResourceUnits(LicenseService.SELF_GENERATED_LICENSE_MAX_RESOURCE_UNITS); + } else { + specBuilder.maxNodes(LicenseService.SELF_GENERATED_LICENSE_MAX_NODES); + } License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes()); LicensesMetaData newLicensesMetaData = new LicensesMetaData(selfGeneratedLicense, Version.CURRENT); mdBuilder.putCustom(LicensesMetaData.TYPE, newLicensesMetaData); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 8798c41406b31..3c7a0c7615945 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -116,6 +116,7 @@ import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; import org.elasticsearch.xpack.core.ml.action.PutFilterAction; import org.elasticsearch.xpack.core.ml.action.PutJobAction; +import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction; import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; @@ -235,6 +236,7 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction; import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction; import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; +import org.elasticsearch.xpack.oss.IndexFeatureSetUsage; import java.util.ArrayList; import java.util.Arrays; @@ -342,6 +344,7 @@ public List> getClientActions() { GetTrainedModelsAction.INSTANCE, DeleteTrainedModelAction.INSTANCE, GetTrainedModelsStatsAction.INSTANCE, + PutTrainedModelAction.INSTANCE, // security ClearRealmCacheAction.INSTANCE, ClearRolesCacheAction.INSTANCE, @@ -559,7 +562,8 @@ public List getNamedWriteables() { // Spatial new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SPATIAL, SpatialFeatureSetUsage::new), // data science - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new) + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX, IndexFeatureSetUsage::new) ).stream(), MlEvaluationNamedXContentProvider.getNamedWriteables().stream() ).collect(toList()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 8a74272429f87..3a836931b45a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -53,6 +53,8 @@ public final class XPackField { public static final String ANALYTICS = "analytics"; /** Name constant for the enrich plugin. */ public static final String ENRICH = "enrich"; + /** Name constant for indices. */ + public static final String INDEX = "index"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 9a2d5fd65eb6d..7a0307308b95a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -59,6 +59,7 @@ import org.elasticsearch.xpack.core.action.TransportXPackUsageAction; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.core.action.XPackUsageResponse; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.rest.action.RestReloadAnalyzersAction; @@ -68,6 +69,7 @@ import org.elasticsearch.xpack.core.ssl.SSLConfigurationReloader; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; +import org.elasticsearch.xpack.oss.IndexUsageTransportAction; import java.nio.file.Files; import java.nio.file.Path; @@ -255,6 +257,7 @@ public Collection createComponents(Client client, ClusterService cluster actions.add(new ActionHandler<>(XPackUsageAction.INSTANCE, getUsageAction())); actions.addAll(licensing.getActions()); actions.add(new ActionHandler<>(ReloadAnalyzerAction.INSTANCE, TransportReloadAnalyzersAction.class)); + actions.add(new ActionHandler<>(XPackUsageFeatureAction.INDEX, IndexUsageTransportAction.class)); return actions; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java index fe43f9661488a..15e18ef38a4f9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java @@ -40,10 +40,11 @@ public class XPackUsageFeatureAction extends ActionType ALL = Arrays.asList( SECURITY, MONITORING, WATCHER, GRAPH, MACHINE_LEARNING, LOGSTASH, SQL, ROLLUP, INDEX_LIFECYCLE, SNAPSHOT_LIFECYCLE, CCR, - TRANSFORM, FLATTENED, VECTORS, VOTING_ONLY, FROZEN_INDICES, SPATIAL, ANALYTICS + TRANSFORM, FLATTENED, VECTORS, VOTING_ONLY, FROZEN_INDICES, SPATIAL, ANALYTICS, INDEX ); private XPackUsageFeatureAction(String name) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAction.java new file mode 100644 index 0000000000000..8821e47023ebb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAction.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; + +import java.io.IOException; +import java.util.Objects; + + +public class PutTrainedModelAction extends ActionType { + + public static final PutTrainedModelAction INSTANCE = new PutTrainedModelAction(); + public static final String NAME = "cluster:admin/xpack/ml/inference/put"; + private PutTrainedModelAction() { + super(NAME, Response::new); + } + + public static class Request extends AcknowledgedRequest { + + public static Request parseRequest(String modelId, XContentParser parser) { + TrainedModelConfig.Builder builder = TrainedModelConfig.STRICT_PARSER.apply(parser, null); + + if (builder.getModelId() == null) { + builder.setModelId(modelId).build(); + } else if (!Strings.isNullOrEmpty(modelId) && !modelId.equals(builder.getModelId())) { + // If we have model_id in both URI and body, they must be identical + throw new IllegalArgumentException(Messages.getMessage(Messages.INCONSISTENT_ID, + TrainedModelConfig.MODEL_ID.getPreferredName(), + builder.getModelId(), + modelId)); + } + // Validations are done against the builder so we can build the full config object. + // This allows us to not worry about serializing a builder class between nodes. + return new Request(builder.validate(true).build()); + } + + private final TrainedModelConfig config; + + public Request(TrainedModelConfig config) { + this.config = config; + } + + public Request(StreamInput in) throws IOException { + super(in); + this.config = new TrainedModelConfig(in); + } + + public TrainedModelConfig getTrainedModelConfig() { + return config; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + config.writeTo(out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(config, request.config); + } + + @Override + public int hashCode() { + return Objects.hash(config); + } + + @Override + public final String toString() { + return Strings.toString(config); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private final TrainedModelConfig trainedModelConfig; + + public Response(TrainedModelConfig trainedModelConfig) { + this.trainedModelConfig = trainedModelConfig; + } + + public Response(StreamInput in) throws IOException { + super(in); + trainedModelConfig = new TrainedModelConfig(in); + } + + public TrainedModelConfig getResponse() { + return trainedModelConfig; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + trainedModelConfig.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return trainedModelConfig.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(trainedModelConfig, response.trainedModelConfig); + } + + @Override + public int hashCode() { + return Objects.hash(trainedModelConfig); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/BoostedTreeParams.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/BoostedTreeParams.java index 0f06b08444f53..e0890c21377ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/BoostedTreeParams.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/BoostedTreeParams.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.ml.dataframe.analyses; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -34,6 +35,7 @@ public class BoostedTreeParams implements ToXContentFragment, Writeable { public static final ParseField ETA = new ParseField("eta"); public static final ParseField MAXIMUM_NUMBER_TREES = new ParseField("maximum_number_trees"); public static final ParseField FEATURE_BAG_FRACTION = new ParseField("feature_bag_fraction"); + public static final ParseField NUM_TOP_FEATURE_IMPORTANCE_VALUES = new ParseField("num_top_feature_importance_values"); static void declareFields(AbstractObjectParser parser) { parser.declareDouble(optionalConstructorArg(), LAMBDA); @@ -41,6 +43,7 @@ static void declareFields(AbstractObjectParser parser) { parser.declareDouble(optionalConstructorArg(), ETA); parser.declareInt(optionalConstructorArg(), MAXIMUM_NUMBER_TREES); parser.declareDouble(optionalConstructorArg(), FEATURE_BAG_FRACTION); + parser.declareInt(optionalConstructorArg(), NUM_TOP_FEATURE_IMPORTANCE_VALUES); } private final Double lambda; @@ -48,12 +51,14 @@ static void declareFields(AbstractObjectParser parser) { private final Double eta; private final Integer maximumNumberTrees; private final Double featureBagFraction; + private final Integer numTopFeatureImportanceValues; public BoostedTreeParams(@Nullable Double lambda, - @Nullable Double gamma, - @Nullable Double eta, - @Nullable Integer maximumNumberTrees, - @Nullable Double featureBagFraction) { + @Nullable Double gamma, + @Nullable Double eta, + @Nullable Integer maximumNumberTrees, + @Nullable Double featureBagFraction, + @Nullable Integer numTopFeatureImportanceValues) { if (lambda != null && lambda < 0) { throw ExceptionsHelper.badRequestException("[{}] must be a non-negative double", LAMBDA.getPreferredName()); } @@ -69,15 +74,16 @@ public BoostedTreeParams(@Nullable Double lambda, if (featureBagFraction != null && (featureBagFraction <= 0 || featureBagFraction > 1.0)) { throw ExceptionsHelper.badRequestException("[{}] must be a double in (0, 1]", FEATURE_BAG_FRACTION.getPreferredName()); } + if (numTopFeatureImportanceValues != null && numTopFeatureImportanceValues < 0) { + throw ExceptionsHelper.badRequestException("[{}] must be a non-negative integer", + NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName()); + } this.lambda = lambda; this.gamma = gamma; this.eta = eta; this.maximumNumberTrees = maximumNumberTrees; this.featureBagFraction = featureBagFraction; - } - - public BoostedTreeParams() { - this(null, null, null, null, null); + this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; } BoostedTreeParams(StreamInput in) throws IOException { @@ -86,6 +92,11 @@ public BoostedTreeParams() { eta = in.readOptionalDouble(); maximumNumberTrees = in.readOptionalVInt(); featureBagFraction = in.readOptionalDouble(); + if (in.getVersion().onOrAfter(Version.V_7_6_0)) { + numTopFeatureImportanceValues = in.readOptionalInt(); + } else { + numTopFeatureImportanceValues = null; + } } @Override @@ -95,6 +106,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalDouble(eta); out.writeOptionalVInt(maximumNumberTrees); out.writeOptionalDouble(featureBagFraction); + if (out.getVersion().onOrAfter(Version.V_7_6_0)) { + out.writeOptionalInt(numTopFeatureImportanceValues); + } } @Override @@ -114,6 +128,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (featureBagFraction != null) { builder.field(FEATURE_BAG_FRACTION.getPreferredName(), featureBagFraction); } + if (numTopFeatureImportanceValues != null) { + builder.field(NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName(), numTopFeatureImportanceValues); + } return builder; } @@ -134,6 +151,9 @@ Map getParams() { if (featureBagFraction != null) { params.put(FEATURE_BAG_FRACTION.getPreferredName(), featureBagFraction); } + if (numTopFeatureImportanceValues != null) { + params.put(NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName(), numTopFeatureImportanceValues); + } return params; } @@ -146,11 +166,62 @@ public boolean equals(Object o) { && Objects.equals(gamma, that.gamma) && Objects.equals(eta, that.eta) && Objects.equals(maximumNumberTrees, that.maximumNumberTrees) - && Objects.equals(featureBagFraction, that.featureBagFraction); + && Objects.equals(featureBagFraction, that.featureBagFraction) + && Objects.equals(numTopFeatureImportanceValues, that.numTopFeatureImportanceValues); } @Override public int hashCode() { - return Objects.hash(lambda, gamma, eta, maximumNumberTrees, featureBagFraction); + return Objects.hash(lambda, gamma, eta, maximumNumberTrees, featureBagFraction, numTopFeatureImportanceValues); + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + + private Double lambda; + private Double gamma; + private Double eta; + private Integer maximumNumberTrees; + private Double featureBagFraction; + private Integer numTopFeatureImportanceValues; + + private Builder() {} + + public Builder setLambda(Double lambda) { + this.lambda = lambda; + return this; + } + + public Builder setGamma(Double gamma) { + this.gamma = gamma; + return this; + } + + public Builder setEta(Double eta) { + this.eta = eta; + return this; + } + + public Builder setMaximumNumberTrees(Integer maximumNumberTrees) { + this.maximumNumberTrees = maximumNumberTrees; + return this; + } + + public Builder setFeatureBagFraction(Double featureBagFraction) { + this.featureBagFraction = featureBagFraction; + return this; + } + + public Builder setNumTopFeatureImportanceValues(Integer numTopFeatureImportanceValues) { + this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; + return this; + } + + public BoostedTreeParams build() { + return new BoostedTreeParams(lambda, gamma, eta, maximumNumberTrees, featureBagFraction, numTopFeatureImportanceValues); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Classification.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Classification.java index 0e68d13895e25..47a02786f5d07 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Classification.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Classification.java @@ -50,11 +50,11 @@ private static ConstructingObjectParser createParser(boole lenient, a -> new Classification( (String) a[0], - new BoostedTreeParams((Double) a[1], (Double) a[2], (Double) a[3], (Integer) a[4], (Double) a[5]), - (String) a[6], - (Integer) a[7], - (Double) a[8], - (Long) a[9])); + new BoostedTreeParams((Double) a[1], (Double) a[2], (Double) a[3], (Integer) a[4], (Double) a[5], (Integer) a[6]), + (String) a[7], + (Integer) a[8], + (Double) a[9], + (Long) a[10])); parser.declareString(constructorArg(), DEPENDENT_VARIABLE); BoostedTreeParams.declareFields(parser); parser.declareString(optionalConstructorArg(), PREDICTION_FIELD_NAME); @@ -112,7 +112,7 @@ public Classification(String dependentVariable, } public Classification(String dependentVariable) { - this(dependentVariable, new BoostedTreeParams(), null, null, null, null); + this(dependentVariable, BoostedTreeParams.builder().build(), null, null, null, null); } public Classification(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java index fe2927591312a..83174a9aebfe3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java @@ -47,10 +47,10 @@ private static ConstructingObjectParser createParser(boolean l lenient, a -> new Regression( (String) a[0], - new BoostedTreeParams((Double) a[1], (Double) a[2], (Double) a[3], (Integer) a[4], (Double) a[5]), - (String) a[6], - (Double) a[7], - (Long) a[8])); + new BoostedTreeParams((Double) a[1], (Double) a[2], (Double) a[3], (Integer) a[4], (Double) a[5], (Integer) a[6]), + (String) a[7], + (Double) a[8], + (Long) a[9])); parser.declareString(constructorArg(), DEPENDENT_VARIABLE); BoostedTreeParams.declareFields(parser); parser.declareString(optionalConstructorArg(), PREDICTION_FIELD_NAME); @@ -85,7 +85,7 @@ public Regression(String dependentVariable, } public Regression(String dependentVariable) { - this(dependentVariable, new BoostedTreeParams(), null, null, null); + this(dependentVariable, BoostedTreeParams.builder().build(), null, null, null); } public Regression(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java index be4d40efc8501..95589ac8b61fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java @@ -7,6 +7,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -34,6 +35,9 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.action.ValidateActions.addValidationError; public class TrainedModelConfig implements ToXContentObject, Writeable { @@ -352,13 +356,31 @@ public static class Builder { private Long estimatedHeapMemory; private Long estimatedOperations; private LazyModelDefinition definition; - private String licenseLevel = License.OperationMode.PLATINUM.description(); + private String licenseLevel; + + public Builder() {} + + public Builder(TrainedModelConfig config) { + this.modelId = config.getModelId(); + this.createdBy = config.getCreatedBy(); + this.version = config.getVersion(); + this.createTime = config.getCreateTime(); + this.definition = config.definition == null ? null : new LazyModelDefinition(config.definition); + this.description = config.getDescription(); + this.tags = config.getTags(); + this.metadata = config.getMetadata(); + this.input = config.getInput(); + } public Builder setModelId(String modelId) { this.modelId = modelId; return this; } + public String getModelId() { + return this.modelId; + } + public Builder setCreatedBy(String createdBy) { this.createdBy = createdBy; return this; @@ -466,51 +488,96 @@ public Builder setLicenseLevel(String licenseLevel) { return this; } - // TODO move to REST level instead of here in the builder - public void validate() { - // We require a definition to be available here even though it will be stored in a different doc - ExceptionsHelper.requireNonNull(definition, DEFINITION); - ExceptionsHelper.requireNonNull(modelId, MODEL_ID); + public Builder validate() { + return validate(false); + } - if (MlStrings.isValidId(modelId) == false) { - throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.INVALID_ID, MODEL_ID.getPreferredName(), modelId)); + /** + * Runs validations against the builder. + * @return The current builder object if validations are successful + * @throws ActionRequestValidationException when there are validation failures. + */ + public Builder validate(boolean forCreation) { + // We require a definition to be available here even though it will be stored in a different doc + ActionRequestValidationException validationException = null; + if (definition == null) { + validationException = addValidationError("[" + DEFINITION.getPreferredName() + "] must not be null.", validationException); + } + if (modelId == null) { + validationException = addValidationError("[" + MODEL_ID.getPreferredName() + "] must not be null.", validationException); } - if (MlStrings.hasValidLengthForId(modelId) == false) { - throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.ID_TOO_LONG, - MODEL_ID.getPreferredName(), + if (modelId != null && MlStrings.isValidId(modelId) == false) { + validationException = addValidationError(Messages.getMessage(Messages.INVALID_ID, + TrainedModelConfig.MODEL_ID.getPreferredName(), + modelId), + validationException); + } + if (modelId != null && MlStrings.hasValidLengthForId(modelId) == false) { + validationException = addValidationError(Messages.getMessage(Messages.ID_TOO_LONG, + TrainedModelConfig.MODEL_ID.getPreferredName(), modelId, - MlStrings.ID_LENGTH_LIMIT)); + MlStrings.ID_LENGTH_LIMIT), validationException); + } + List badTags = tags.stream() + .filter(tag -> (MlStrings.isValidId(tag) && MlStrings.hasValidLengthForId(tag)) == false) + .collect(Collectors.toList()); + if (badTags.isEmpty() == false) { + validationException = addValidationError(Messages.getMessage(Messages.INFERENCE_INVALID_TAGS, + badTags, + MlStrings.ID_LENGTH_LIMIT), + validationException); + } + + for(String tag : tags) { + if (tag.equals(modelId)) { + validationException = addValidationError("none of the tags must equal the model_id", validationException); + break; + } + } + if (forCreation) { + validationException = checkIllegalSetting(version, VERSION.getPreferredName(), validationException); + validationException = checkIllegalSetting(createdBy, CREATED_BY.getPreferredName(), validationException); + validationException = checkIllegalSetting(createTime, CREATE_TIME.getPreferredName(), validationException); + validationException = checkIllegalSetting(estimatedHeapMemory, + ESTIMATED_HEAP_MEMORY_USAGE_BYTES.getPreferredName(), + validationException); + validationException = checkIllegalSetting(estimatedOperations, + ESTIMATED_OPERATIONS.getPreferredName(), + validationException); + validationException = checkIllegalSetting(licenseLevel, LICENSE_LEVEL.getPreferredName(), validationException); } - checkIllegalSetting(version, VERSION.getPreferredName()); - checkIllegalSetting(createdBy, CREATED_BY.getPreferredName()); - checkIllegalSetting(createTime, CREATE_TIME.getPreferredName()); - checkIllegalSetting(estimatedHeapMemory, ESTIMATED_HEAP_MEMORY_USAGE_BYTES.getPreferredName()); - checkIllegalSetting(estimatedOperations, ESTIMATED_OPERATIONS.getPreferredName()); - checkIllegalSetting(licenseLevel, LICENSE_LEVEL.getPreferredName()); + if (validationException != null) { + throw validationException; + } + + return this; } - private static void checkIllegalSetting(Object value, String setting) { + private static ActionRequestValidationException checkIllegalSetting(Object value, + String setting, + ActionRequestValidationException validationException) { if (value != null) { - throw ExceptionsHelper.badRequestException("illegal to set [{}] at inference model creation", setting); + return addValidationError("illegal to set [" + setting + "] at inference model creation", validationException); } + return validationException; } public TrainedModelConfig build() { return new TrainedModelConfig( modelId, - createdBy, - version, + createdBy == null ? "user" : createdBy, + version == null ? Version.CURRENT : version, description, createTime == null ? Instant.now() : createTime, definition, tags, metadata, input, - estimatedHeapMemory, - estimatedOperations, - licenseLevel); + estimatedHeapMemory == null ? 0 : estimatedHeapMemory, + estimatedOperations == null ? 0 : estimatedOperations, + licenseLevel == null ? License.OperationMode.PLATINUM.description() : licenseLevel); } } @@ -531,6 +598,13 @@ public static LazyModelDefinition fromStreamInput(StreamInput input) throws IOEx return new LazyModelDefinition(input.readString(), null); } + private LazyModelDefinition(LazyModelDefinition definition) { + if (definition != null) { + this.compressedString = definition.compressedString; + this.parsedDefinition = definition.parsedDefinition; + } + } + private LazyModelDefinition(String compressedString, TrainedModelDefinition trainedModelDefinition) { if (compressedString == null && trainedModelDefinition == null) { throw new IllegalArgumentException("unexpected null model definition"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinition.java index e176d9a288568..cf7a8b7d224c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinition.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinition.java @@ -179,6 +179,12 @@ public Builder() { this(true); } + public Builder(TrainedModelDefinition definition) { + this(true); + this.preProcessors = new ArrayList<>(definition.getPreProcessors()); + this.trainedModel = definition.trainedModel; + } + public Builder setPreProcessors(List preProcessors) { this.preProcessors = preProcessors; return this; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResults.java index 526b37314b56b..39ae4057fd9ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResults.java @@ -110,20 +110,28 @@ public String getWriteableName() { public static class TopClassEntry implements Writeable { - public final ParseField CLASSIFICATION = new ParseField("classification"); - public final ParseField PROBABILITY = new ParseField("probability"); + public final ParseField CLASS_NAME = new ParseField("class_name"); + public final ParseField CLASS_PROBABILITY = new ParseField("class_probability"); + public final ParseField CLASS_SCORE = new ParseField("class_score"); private final String classification; private final double probability; + private final double score; - public TopClassEntry(String classification, Double probability) { - this.classification = ExceptionsHelper.requireNonNull(classification, CLASSIFICATION); - this.probability = ExceptionsHelper.requireNonNull(probability, PROBABILITY); + public TopClassEntry(String classification, double probability) { + this(classification, probability, probability); + } + + public TopClassEntry(String classification, double probability, double score) { + this.classification = ExceptionsHelper.requireNonNull(classification, CLASS_NAME); + this.probability = probability; + this.score = score; } public TopClassEntry(StreamInput in) throws IOException { this.classification = in.readString(); this.probability = in.readDouble(); + this.score = in.readDouble(); } public String getClassification() { @@ -134,10 +142,15 @@ public double getProbability() { return probability; } + public double getScore() { + return score; + } + public Map asValueMap() { - Map map = new HashMap<>(2); - map.put(CLASSIFICATION.getPreferredName(), classification); - map.put(PROBABILITY.getPreferredName(), probability); + Map map = new HashMap<>(3, 1.0f); + map.put(CLASS_NAME.getPreferredName(), classification); + map.put(CLASS_PROBABILITY.getPreferredName(), probability); + map.put(CLASS_SCORE.getPreferredName(), score); return map; } @@ -145,6 +158,7 @@ public Map asValueMap() { public void writeTo(StreamOutput out) throws IOException { out.writeString(classification); out.writeDouble(probability); + out.writeDouble(score); } @Override @@ -152,13 +166,12 @@ public boolean equals(Object object) { if (object == this) { return true; } if (object == null || getClass() != object.getClass()) { return false; } TopClassEntry that = (TopClassEntry) object; - return Objects.equals(classification, that.classification) && - Objects.equals(probability, that.probability); + return Objects.equals(classification, that.classification) && probability == that.probability && score == that.score; } @Override public int hashCode() { - return Objects.hash(classification, probability); + return Objects.hash(classification, probability, score); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceHelpers.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceHelpers.java index 86bf076cd6bf1..ae5a4062a69dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceHelpers.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceHelpers.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.xpack.core.ml.inference.results.ClassificationInferenceResults; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -20,17 +21,13 @@ public final class InferenceHelpers { private InferenceHelpers() { } - public static List topClasses(List probabilities, - List classificationLabels, - int numToInclude) { - if (numToInclude == 0) { - return Collections.emptyList(); - } - int[] sortedIndices = IntStream.range(0, probabilities.size()) - .boxed() - .sorted(Comparator.comparing(probabilities::get).reversed()) - .mapToInt(i -> i) - .toArray(); + /** + * @return Tuple of the highest scored index and the top classes + */ + public static Tuple> topClasses(List probabilities, + List classificationLabels, + @Nullable double[] classificationWeights, + int numToInclude) { if (classificationLabels != null && probabilities.size() != classificationLabels.size()) { throw ExceptionsHelper @@ -38,7 +35,24 @@ public static List topClasses(List "model returned classification probabilities of size [{}] which is not equal to classification labels size [{}]", null, probabilities.size(), - classificationLabels); + classificationLabels.size()); + } + + List scores = classificationWeights == null ? + probabilities : + IntStream.range(0, probabilities.size()) + .mapToDouble(i -> probabilities.get(i) * classificationWeights[i]) + .boxed() + .collect(Collectors.toList()); + + int[] sortedIndices = IntStream.range(0, probabilities.size()) + .boxed() + .sorted(Comparator.comparing(scores::get).reversed()) + .mapToInt(i -> i) + .toArray(); + + if (numToInclude == 0) { + return Tuple.tuple(sortedIndices[0], Collections.emptyList()); } List labels = classificationLabels == null ? @@ -50,26 +64,24 @@ public static List topClasses(List List topClassEntries = new ArrayList<>(count); for(int i = 0; i < count; i++) { int idx = sortedIndices[i]; - topClassEntries.add(new ClassificationInferenceResults.TopClassEntry(labels.get(idx), probabilities.get(idx))); + topClassEntries.add(new ClassificationInferenceResults.TopClassEntry(labels.get(idx), probabilities.get(idx), scores.get(idx))); } - return topClassEntries; + return Tuple.tuple(sortedIndices[0], topClassEntries); } - public static String classificationLabel(double inferenceValue, @Nullable List classificationLabels) { - assert inferenceValue == Math.rint(inferenceValue); + public static String classificationLabel(Integer inferenceValue, @Nullable List classificationLabels) { if (classificationLabels == null) { return String.valueOf(inferenceValue); } - int label = Double.valueOf(inferenceValue).intValue(); - if (label < 0 || label >= classificationLabels.size()) { + if (inferenceValue < 0 || inferenceValue >= classificationLabels.size()) { throw ExceptionsHelper.serverError( "model returned classification value of [{}] which is not a valid index in classification labels [{}]", null, - label, + inferenceValue, classificationLabels); } - return classificationLabels.get(label); + return classificationLabels.get(inferenceValue); } public static Double toDouble(Object value) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TrainedModel.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TrainedModel.java index e206a70918096..4bbca5ed0b1d5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TrainedModel.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TrainedModel.java @@ -6,21 +6,14 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.apache.lucene.util.Accountable; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; import org.elasticsearch.xpack.core.ml.utils.NamedXContentObject; -import java.util.List; import java.util.Map; public interface TrainedModel extends NamedXContentObject, NamedWriteable, Accountable { - /** - * @return List of featureNames expected by the model. In the order that they are expected - */ - List getFeatureNames(); - /** * Infer against the provided fields * @@ -36,12 +29,6 @@ public interface TrainedModel extends NamedXContentObject, NamedWriteable, Accou */ TargetType targetType(); - /** - * @return Ordinal encoded list of classification labels. - */ - @Nullable - List classificationLabels(); - /** * Runs validations against the model. * diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java index c9bde54c460cd..a455730ae8208 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -33,6 +34,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -53,6 +55,7 @@ public class Ensemble implements LenientlyParsedTrainedModel, StrictlyParsedTrai public static final ParseField AGGREGATE_OUTPUT = new ParseField("aggregate_output"); public static final ParseField TARGET_TYPE = new ParseField("target_type"); public static final ParseField CLASSIFICATION_LABELS = new ParseField("classification_labels"); + public static final ParseField CLASSIFICATION_WEIGHTS = new ParseField("classification_weights"); private static final ObjectParser LENIENT_PARSER = createParser(true); private static final ObjectParser STRICT_PARSER = createParser(false); @@ -77,6 +80,7 @@ private static ObjectParser createParser(boolean lenient AGGREGATE_OUTPUT); parser.declareString(Ensemble.Builder::setTargetType, TARGET_TYPE); parser.declareStringArray(Ensemble.Builder::setClassificationLabels, CLASSIFICATION_LABELS); + parser.declareDoubleArray(Ensemble.Builder::setClassificationWeights, CLASSIFICATION_WEIGHTS); return parser; } @@ -93,17 +97,22 @@ public static Ensemble fromXContentLenient(XContentParser parser) { private final OutputAggregator outputAggregator; private final TargetType targetType; private final List classificationLabels; + private final double[] classificationWeights; Ensemble(List featureNames, List models, OutputAggregator outputAggregator, TargetType targetType, - @Nullable List classificationLabels) { + @Nullable List classificationLabels, + @Nullable double[] classificationWeights) { this.featureNames = Collections.unmodifiableList(ExceptionsHelper.requireNonNull(featureNames, FEATURE_NAMES)); this.models = Collections.unmodifiableList(ExceptionsHelper.requireNonNull(models, TRAINED_MODELS)); this.outputAggregator = ExceptionsHelper.requireNonNull(outputAggregator, AGGREGATE_OUTPUT); this.targetType = ExceptionsHelper.requireNonNull(targetType, TARGET_TYPE); this.classificationLabels = classificationLabels == null ? null : Collections.unmodifiableList(classificationLabels); + this.classificationWeights = classificationWeights == null ? + null : + Arrays.copyOf(classificationWeights, classificationWeights.length); } public Ensemble(StreamInput in) throws IOException { @@ -116,11 +125,11 @@ public Ensemble(StreamInput in) throws IOException { } else { this.classificationLabels = null; } - } - - @Override - public List getFeatureNames() { - return featureNames; + if (in.readBoolean()) { + this.classificationWeights = in.readDoubleArray(); + } else { + this.classificationWeights = null; + } } @Override @@ -153,25 +162,22 @@ private InferenceResults buildResults(List processedInferences, Inferenc return new RegressionInferenceResults(outputAggregator.aggregate(processedInferences), config); case CLASSIFICATION: ClassificationConfig classificationConfig = (ClassificationConfig) config; - List topClasses = InferenceHelpers.topClasses( + assert classificationWeights == null || processedInferences.size() == classificationWeights.length; + // Adjust the probabilities according to the thresholds + Tuple> topClasses = InferenceHelpers.topClasses( processedInferences, classificationLabels, + classificationWeights, classificationConfig.getNumTopClasses()); - double value = outputAggregator.aggregate(processedInferences); - return new ClassificationInferenceResults(outputAggregator.aggregate(processedInferences), - classificationLabel(value, classificationLabels), - topClasses, + return new ClassificationInferenceResults((double)topClasses.v1(), + classificationLabel(topClasses.v1(), classificationLabels), + topClasses.v2(), config); default: throw new UnsupportedOperationException("unsupported target_type [" + targetType + "] for inference on ensemble model"); } } - @Override - public List classificationLabels() { - return classificationLabels; - } - @Override public String getWriteableName() { return NAME.getPreferredName(); @@ -187,6 +193,10 @@ public void writeTo(StreamOutput out) throws IOException { if (classificationLabels != null) { out.writeStringCollection(classificationLabels); } + out.writeBoolean(classificationWeights != null); + if (classificationWeights != null) { + out.writeDoubleArray(classificationWeights); + } } @Override @@ -208,6 +218,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (classificationLabels != null) { builder.field(CLASSIFICATION_LABELS.getPreferredName(), classificationLabels); } + if (classificationWeights != null) { + builder.field(CLASSIFICATION_WEIGHTS.getPreferredName(), classificationWeights); + } builder.endObject(); return builder; } @@ -221,12 +234,18 @@ public boolean equals(Object o) { && Objects.equals(models, that.models) && Objects.equals(targetType, that.targetType) && Objects.equals(classificationLabels, that.classificationLabels) - && Objects.equals(outputAggregator, that.outputAggregator); + && Objects.equals(outputAggregator, that.outputAggregator) + && Arrays.equals(classificationWeights, that.classificationWeights); } @Override public int hashCode() { - return Objects.hash(featureNames, models, outputAggregator, targetType, classificationLabels); + return Objects.hash(featureNames, + models, + outputAggregator, + targetType, + classificationLabels, + Arrays.hashCode(classificationWeights)); } @Override @@ -246,9 +265,16 @@ public void validate() { outputAggregator.expectedValueSize(), models.size()); } - if ((this.targetType == TargetType.CLASSIFICATION) != (this.classificationLabels != null)) { + if ((this.classificationLabels != null || this.classificationWeights != null) && (this.targetType != TargetType.CLASSIFICATION)) { throw ExceptionsHelper.badRequestException( - "[target_type] should be [classification] if [classification_labels] is provided, and vice versa"); + "[target_type] should be [classification] if [classification_labels] or [classification_weights] are provided"); + } + if (classificationWeights != null && + classificationLabels != null && + classificationWeights.length != classificationLabels.size()) { + throw ExceptionsHelper.badRequestException( + "[classification_weights] and [classification_labels] should be the same length if both are provided" + ); } this.models.forEach(TrainedModel::validate); } @@ -271,6 +297,9 @@ public long ramBytesUsed() { size += RamUsageEstimator.sizeOfCollection(featureNames); size += RamUsageEstimator.sizeOfCollection(classificationLabels); size += RamUsageEstimator.sizeOfCollection(models); + if (classificationWeights != null) { + size += RamUsageEstimator.sizeOf(classificationWeights); + } size += outputAggregator.ramBytesUsed(); return size; } @@ -291,6 +320,7 @@ public static class Builder { private OutputAggregator outputAggregator = new WeightedSum(); private TargetType targetType = TargetType.REGRESSION; private List classificationLabels; + private double[] classificationWeights; private boolean modelsAreOrdered; private Builder (boolean modelsAreOrdered) { @@ -330,6 +360,11 @@ public Builder setClassificationLabels(List classificationLabels) { return this; } + public Builder setClassificationWeights(List classificationWeights) { + this.classificationWeights = classificationWeights.stream().mapToDouble(Double::doubleValue).toArray(); + return this; + } + private void setOutputAggregatorFromParser(List outputAggregators) { if (outputAggregators.size() != 1) { throw ExceptionsHelper.badRequestException("[{}] must have exactly one aggregator defined.", @@ -352,7 +387,7 @@ public Ensemble build() { if (modelsAreOrdered == false && trainedModels != null && trainedModels.size() > 1) { throw ExceptionsHelper.badRequestException("[trained_models] needs to be an array of objects"); } - return new Ensemble(featureNames, trainedModels, outputAggregator, targetType, classificationLabels); + return new Ensemble(featureNames, trainedModels, outputAggregator, targetType, classificationLabels, classificationWeights); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/langident/LangIdentNeuralNetwork.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/langident/LangIdentNeuralNetwork.java index b9edeb7885504..7de2c8f060500 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/langident/LangIdentNeuralNetwork.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/langident/LangIdentNeuralNetwork.java @@ -8,6 +8,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -25,13 +26,10 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; -import java.util.stream.IntStream; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xpack.core.ml.inference.utils.Statistics.softMax; @@ -105,11 +103,6 @@ public LangIdentNeuralNetwork(StreamInput in) throws IOException { this.softmaxLayer = new LangNetLayer(in); } - @Override - public List getFeatureNames() { - return Collections.singletonList(embeddedVectorFeatureName); - } - @Override public InferenceResults infer(Map fields, InferenceConfig config) { if (config instanceof ClassificationConfig == false) { @@ -134,20 +127,17 @@ public InferenceResults infer(Map fields, InferenceConfig config List probabilities = softMax(Arrays.stream(scores).boxed().collect(Collectors.toList())); - int maxIndex = IntStream.range(0, probabilities.size()) - .boxed() - .max(Comparator.comparing(probabilities::get)) - .orElseThrow(() -> ExceptionsHelper.serverError("Unexpected null value while searching for max probability")); - - assert maxIndex >= 0 && maxIndex < LANGUAGE_NAMES.size() : "Invalid language predicted. Predicted language index " + maxIndex; ClassificationConfig classificationConfig = (ClassificationConfig) config; - List topClasses = InferenceHelpers.topClasses( + Tuple> topClasses = InferenceHelpers.topClasses( probabilities, LANGUAGE_NAMES, + null, classificationConfig.getNumTopClasses()); - return new ClassificationInferenceResults(maxIndex, - LANGUAGE_NAMES.get(maxIndex), - topClasses, + assert topClasses.v1() >= 0 && topClasses.v1() < LANGUAGE_NAMES.size() : + "Invalid language predicted. Predicted language index " + topClasses.v1(); + return new ClassificationInferenceResults(topClasses.v1(), + LANGUAGE_NAMES.get(topClasses.v1()), + topClasses.v2(), classificationConfig); } @@ -156,11 +146,6 @@ public TargetType targetType() { return TargetType.CLASSIFICATION; } - @Override - public List classificationLabels() { - return LANGUAGE_NAMES; - } - @Override public void validate() { } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/Tree.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/Tree.java index 831838e0f7df2..527307597a597 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/Tree.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/Tree.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.CachedSupplier; @@ -114,11 +115,6 @@ public String getName() { return NAME.getPreferredName(); } - @Override - public List getFeatureNames() { - return featureNames; - } - public List getNodes() { return nodes; } @@ -152,11 +148,15 @@ private InferenceResults buildResult(Double value, InferenceConfig config) { switch (targetType) { case CLASSIFICATION: ClassificationConfig classificationConfig = (ClassificationConfig) config; - List topClasses = InferenceHelpers.topClasses( + Tuple> topClasses = InferenceHelpers.topClasses( classificationProbability(value), classificationLabels, + null, classificationConfig.getNumTopClasses()); - return new ClassificationInferenceResults(value, classificationLabel(value, classificationLabels), topClasses, config); + return new ClassificationInferenceResults(value, + classificationLabel(topClasses.v1(), classificationLabels), + topClasses.v2(), + config); case REGRESSION: return new RegressionInferenceResults(value, config); default: @@ -197,11 +197,6 @@ private List classificationProbability(double inferenceValue) { return list; } - @Override - public List classificationLabels() { - return classificationLabels; - } - @Override public String getWriteableName() { return NAME.getPreferredName(); @@ -270,9 +265,9 @@ public long estimatedNumOperations() { } private void checkTargetType() { - if ((this.targetType == TargetType.CLASSIFICATION) != (this.classificationLabels != null)) { + if (this.classificationLabels != null && this.targetType != TargetType.CLASSIFICATION) { throw ExceptionsHelper.badRequestException( - "[target_type] should be [classification] if [classification_labels] is provided, and vice versa"); + "[target_type] should be [classification] if [classification_labels] are provided"); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 1b80d1963598e..ef0fcd4fdb172 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -95,6 +95,10 @@ public final class Messages { public static final String INFERENCE_TOO_MANY_DEFINITIONS_REQUESTED = "Getting model definition is not supported when getting more than one model"; public static final String INFERENCE_WARNING_ALL_FIELDS_MISSING = "Model [{0}] could not be inferred as all fields were missing"; + public static final String INFERENCE_INVALID_TAGS = "Invalid tags {0}; must only can contain lowercase alphanumeric (a-z and 0-9), " + + "hyphens or underscores, must start and end with alphanumeric, and must be less than {1} characters."; + public static final String INFERENCE_TAGS_AND_MODEL_IDS_UNIQUE = "The provided tags {0} must not match existing model_ids."; + public static final String INFERENCE_MODEL_ID_AND_TAGS_UNIQUE = "The provided model_id {0} must not match existing tags."; public static final String JOB_AUDIT_DATAFEED_DATA_SEEN_AGAIN = "Datafeed has started retrieving data again"; public static final String JOB_AUDIT_CREATED = "Job created"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index b9de87ef93de0..b64a12e087ea9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -471,6 +471,9 @@ public static void addDataFrameAnalyticsFields(XContentBuilder builder) throws I .startObject(BoostedTreeParams.FEATURE_BAG_FRACTION.getPreferredName()) .field(TYPE, DOUBLE) .endObject() + .startObject(BoostedTreeParams.NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName()) + .field(TYPE, INTEGER) + .endObject() .startObject(Regression.PREDICTION_FIELD_NAME.getPreferredName()) .field(TYPE, KEYWORD) .endObject() @@ -499,6 +502,9 @@ public static void addDataFrameAnalyticsFields(XContentBuilder builder) throws I .startObject(BoostedTreeParams.FEATURE_BAG_FRACTION.getPreferredName()) .field(TYPE, DOUBLE) .endObject() + .startObject(BoostedTreeParams.NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName()) + .field(TYPE, INTEGER) + .endObject() .startObject(Classification.PREDICTION_FIELD_NAME.getPreferredName()) .field(TYPE, KEYWORD) .endObject() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java index 8eacdcb0e78e4..968df76d5ed91 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java @@ -322,6 +322,7 @@ public final class ReservedFieldNames { BoostedTreeParams.ETA.getPreferredName(), BoostedTreeParams.MAXIMUM_NUMBER_TREES.getPreferredName(), BoostedTreeParams.FEATURE_BAG_FRACTION.getPreferredName(), + BoostedTreeParams.NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName(), ElasticsearchMappings.CONFIG_TYPE, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityExtension.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityExtension.java index 9f0eb474a59c8..b45d4ad99ccbd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityExtension.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityExtension.java @@ -7,10 +7,15 @@ import org.apache.lucene.util.SPIClassIterator; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; @@ -28,6 +33,26 @@ */ public interface SecurityExtension { + /** + * This interface provides access to components (clients and services) that may be used + * within custom realms and role providers. + */ + interface SecurityComponents { + /** Global settings for the current node */ + Settings settings(); + /** Provides access to key filesystem paths */ + Environment environment(); + /** An internal client for retrieving information/data from this cluster */ + Client client(); + /** The Elasticsearch thread pools */ + ThreadPool threadPool(); + /** Provides the ability to monitor files for changes */ + ResourceWatcherService resourceWatcherService(); + /** Access to listen to changes in cluster state and settings */ + ClusterService clusterService(); + /** Provides support for mapping users' roles from groups and metadata */ + UserRoleMapper roleMapper(); + } /** * Returns authentication realm implementations added by this extension. * @@ -35,9 +60,9 @@ public interface SecurityExtension { * is a {@link Realm.Factory} which will construct * that realm for use in authentication when that realm type is configured. * - * @param resourceWatcherService Use to watch configuration files for changes + * @param components Access to components that may be used to build realms */ - default Map getRealms(ResourceWatcherService resourceWatcherService) { + default Map getRealms(SecurityComponents components) { return Collections.emptyMap(); } @@ -46,8 +71,10 @@ default Map getRealms(ResourceWatcherService resourceWatc * * Only one installed extension may have an authentication failure handler. If more than * one extension returns a non-null handler, an error is raised. + * + * @param components Access to components that may be used to build the handler */ - default AuthenticationFailureHandler getAuthenticationFailureHandler() { + default AuthenticationFailureHandler getAuthenticationFailureHandler(SecurityComponents components) { return null; } @@ -72,11 +99,10 @@ default AuthenticationFailureHandler getAuthenticationFailureHandler() { * * By default, an empty list is returned. * - * @param settings The configured settings for the node - * @param resourceWatcherService Use to watch configuration files for changes + * @param components Access to components that may be used to build roles */ default List, ActionListener>> - getRolesProviders(Settings settings, ResourceWatcherService resourceWatcherService) { + getRolesProviders(SecurityComponents components) { return Collections.emptyList(); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingRealm.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CachingRealm.java similarity index 93% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingRealm.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CachingRealm.java index 6089c8f9a70fb..9f561956b9106 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingRealm.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CachingRealm.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.security.authc.support; +package org.elasticsearch.xpack.core.security.authc.support; import org.elasticsearch.xpack.core.security.authc.Realm; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UserRoleMapper.java similarity index 99% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UserRoleMapper.java index 6ceb9629d6b4f..8a1b14cef483d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UserRoleMapper.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.security.authc.support; +package org.elasticsearch.xpack.core.security.authc.support; import com.unboundid.ldap.sdk.DN; import com.unboundid.ldap.sdk.LDAPException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java index bea4bbb1cc8fa..ec524420cdffe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java @@ -119,17 +119,13 @@ public FieldValue(Object value) { private static CharacterRunAutomaton buildAutomaton(Object value) { if (value instanceof String) { final String str = (String) value; - if (Regex.isSimpleMatchPattern(str) || isLuceneRegex(str)) { + if (Regex.isSimpleMatchPattern(str) || Automatons.isLuceneRegex(str)) { return new CharacterRunAutomaton(Automatons.patterns(str)); } } return null; } - private static boolean isLuceneRegex(String str) { - return str.length() > 1 && str.charAt(0) == '/' && str.charAt(str.length() - 1) == '/'; - } - public Object getValue() { return value; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index 6dfc2204c008c..832e9c9365e94 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.authz; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -23,6 +24,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.security.support.Validation; @@ -532,6 +534,7 @@ private static RoleDescriptor.IndicesPrivileges parseIndex(String roleName, XCon throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. {} requires {} if {} is given", roleName, Fields.FIELD_PERMISSIONS, Fields.GRANT_FIELDS, Fields.EXCEPT_FIELDS); } + checkIfExceptFieldsIsSubsetOfGrantedFields(roleName, grantedFields, deniedFields); return RoleDescriptor.IndicesPrivileges.builder() .indices(names) .privileges(privileges) @@ -542,6 +545,14 @@ private static RoleDescriptor.IndicesPrivileges parseIndex(String roleName, XCon .build(); } + private static void checkIfExceptFieldsIsSubsetOfGrantedFields(String roleName, String[] grantedFields, String[] deniedFields) { + try { + FieldPermissions.buildPermittedFieldsAutomaton(grantedFields, deniedFields); + } catch (ElasticsearchSecurityException e) { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}] - {}", e, roleName, e.getMessage()); + } + } + private static ApplicationResourcePrivileges[] parseApplicationPrivileges(String roleName, XContentParser parser) throws IOException { if (parser.currentToken() != XContentParser.Token.START_ARRAY) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java index 6b47c3da2fb58..d99303bafc473 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java @@ -23,13 +23,16 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; import java.util.List; @@ -38,44 +41,102 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantReadWriteLock; /** * This is a cache for {@link BitSet} instances that are used with the {@link DocumentSubsetReader}. * It is bounded by memory size and access time. * + * DLS uses {@link BitSet} instances to track which documents should be visible to the user ("live") and which should not ("dead"). + * This means that there is a bit for each document in a Lucene index (ES shard). + * Consequently, an index with 10 million document will use more than 1Mb of bitset memory for every unique DLS query, and an index + * with 1 billion documents will use more than 100Mb of memory per DLS query. + * Because DLS supports templating queries based on user metadata, there may be many distinct queries in use for each index, even if + * there is only a single active role. + * + * The primary benefit of the cache is to avoid recalculating the "live docs" (visible documents) when a user performs multiple + * consecutive queries across one or more large indices. Given the memory examples above, the cache is only useful if it can hold at + * least 1 large (100Mb or more ) {@code BitSet} during a user's active session, and ideally should be capable of support multiple + * simultaneous users with distinct DLS queries. + * + * For this reason the default memory usage (weight) for the cache set to 10% of JVM heap ({@link #CACHE_SIZE_SETTING}), so that it + * automatically scales with the size of the Elasticsearch deployment, and can provide benefit to most use cases without needing + * customisation. On a 32Gb heap, a 10% cache would be 3.2Gb which is large enough to store BitSets representing 25 billion docs. + * + * However, because queries can be templated by user metadata and that metadata can change frequently, it is common for the + * effetively lifetime of a single DLS query to be relatively short. We do not want to sacrifice 10% of heap to a cache that is storing + * BitSets that are not longer needed, so we set the TTL on this cache to be 2 hours ({@link #CACHE_TTL_SETTING}). This time has been + * chosen so that it will retain BitSets that are in active use during a user's session, but not be an ongoing drain on memory. + * * @see org.elasticsearch.index.cache.bitset.BitsetFilterCache */ public final class DocumentSubsetBitsetCache implements IndexReader.ClosedListener, Closeable, Accountable { /** - * The TTL defaults to 1 week. We depend on the {@code max_bytes} setting to keep the cache to a sensible size, by evicting LRU - * entries, however there is benefit in reclaiming memory by expiring bitsets that have not be used for some period of time. - * Because {@link org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission.Group#query} can be templated, it is - * not uncommon for a query to only be used for a relatively short period of time (e.g. because a user's metadata changed, or because - * that user is an infrequent user of Elasticsearch). This access time expiry helps free up memory in those circumstances even if the - * cache is never filled. + * The TTL defaults to 2 hours. We default to a large cache size ({@link #CACHE_SIZE_SETTING}), and aggressively + * expire unused entries so that the cache does not hold on to memory unnecessarily. */ static final Setting CACHE_TTL_SETTING = - Setting.timeSetting("xpack.security.dls.bitset.cache.ttl", TimeValue.timeValueHours(24 * 7), Property.NodeScope); + Setting.timeSetting("xpack.security.dls.bitset.cache.ttl", TimeValue.timeValueHours(2), Property.NodeScope); - static final Setting CACHE_SIZE_SETTING = Setting.byteSizeSetting("xpack.security.dls.bitset.cache.size", - new ByteSizeValue(50, ByteSizeUnit.MB), Property.NodeScope); + /** + * The size defaults to 10% of heap so that it automatically scales up with larger node size + */ + static final Setting CACHE_SIZE_SETTING = Setting.memorySizeSetting("xpack.security.dls.bitset.cache.size", + "10%", Property.NodeScope); private static final BitSet NULL_MARKER = new FixedBitSet(0); private final Logger logger; + + /** + * When a {@link BitSet} is evicted from {@link #bitsetCache}, we need to also remove it from {@link #keysByIndex}. + * We use a {@link ReentrantReadWriteLock} to control atomicity here - the "read" side represents potential insertions to the + * {@link #bitsetCache}, the "write" side represents removals from {@link #keysByIndex}. + * The risk (that {@link Cache} does not provide protection for) is that an entry is removed from the cache, and then immediately + * re-populated, before we process the removal event. To protect against that we need to check the state of the {@link #bitsetCache} + * but we need exclusive ("write") access while performing that check and updating the values in {@link #keysByIndex}. + */ + private final ReleasableLock cacheEvictionLock; + private final ReleasableLock cacheModificationLock; + private final ExecutorService cleanupExecutor; + + private final long maxWeightBytes; private final Cache bitsetCache; private final Map> keysByIndex; + private final AtomicLong cacheFullWarningTime; - public DocumentSubsetBitsetCache(Settings settings) { + public DocumentSubsetBitsetCache(Settings settings, ThreadPool threadPool) { + this(settings, threadPool.executor(ThreadPool.Names.GENERIC)); + } + + /** + * @param settings The global settings object for this node + * @param cleanupExecutor An executor on which the cache cleanup tasks can be run. Due to the way the cache is structured internally, + * it is sometimes necessary to run an asynchronous task to synchronize the internal state. + */ + protected DocumentSubsetBitsetCache(Settings settings, ExecutorService cleanupExecutor) { this.logger = LogManager.getLogger(getClass()); + + final ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + this.cacheEvictionLock = new ReleasableLock(readWriteLock.writeLock()); + this.cacheModificationLock = new ReleasableLock(readWriteLock.readLock()); + this.cleanupExecutor = cleanupExecutor; + final TimeValue ttl = CACHE_TTL_SETTING.get(settings); - final ByteSizeValue size = CACHE_SIZE_SETTING.get(settings); + this.maxWeightBytes = CACHE_SIZE_SETTING.get(settings).getBytes(); this.bitsetCache = CacheBuilder.builder() .setExpireAfterAccess(ttl) - .setMaximumWeight(size.getBytes()) - .weigher((key, bitSet) -> bitSet == NULL_MARKER ? 0 : bitSet.ramBytesUsed()).build(); + .setMaximumWeight(maxWeightBytes) + .weigher((key, bitSet) -> bitSet == NULL_MARKER ? 0 : bitSet.ramBytesUsed()) + .removalListener(this::onCacheEviction) + .build(); + this.keysByIndex = new ConcurrentHashMap<>(); + this.cacheFullWarningTime = new AtomicLong(0); } @Override @@ -88,6 +149,31 @@ public void onClose(IndexReader.CacheKey ownerCoreCacheKey) { } } + /** + * Cleanup (synchronize) the internal state when an object is removed from the primary cache + */ + private void onCacheEviction(RemovalNotification notification) { + final BitsetCacheKey bitsetKey = notification.getKey(); + final IndexReader.CacheKey indexKey = bitsetKey.index; + if (keysByIndex.getOrDefault(indexKey, Set.of()).contains(bitsetKey) == false) { + // If the bitsetKey isn't in the lookup map, then there's nothing to synchronize + return; + } + // We push this to a background thread, so that it reduces the risk of blocking searches, but also so that the lock management is + // simpler - this callback is likely to take place on a thread that is actively adding something to the cache, and is therefore + // holding the read ("update") side of the lock. It is not possible to upgrade a read lock to a write ("eviction") lock, but we + // need to acquire that lock here. + cleanupExecutor.submit(() -> { + try (ReleasableLock ignored = cacheEvictionLock.acquire()) { + // it's possible for the key to be back in the cache if it was immediately repopulated after it was evicted, so check + if (bitsetCache.get(bitsetKey) == null) { + // key is no longer in the cache, make sure it is no longer in the lookup map either. + keysByIndex.getOrDefault(indexKey, Set.of()).remove(bitsetKey); + } + } + }); + } + @Override public void close() { clear("close"); @@ -96,7 +182,8 @@ public void close() { public void clear(String reason) { logger.debug("clearing all DLS bitsets because [{}]", reason); // Due to the order here, it is possible than a new entry could be added _after_ the keysByIndex map is cleared - // but _before_ the cache is cleared. This would mean it sits orphaned in keysByIndex, but this is not a issue. + // but _before_ the cache is cleared. This should get fixed up in the "onCacheEviction" callback, but if anything slips through + // and sits orphaned in keysByIndex, it will not be a significant issue. // When the index is closed, the key will be removed from the map, and there will not be a corresponding item // in the cache, which will make the cache-invalidate a no-op. // Since the entry is not in the cache, if #getBitSet is called, it will be loaded, and the new key will be added @@ -130,31 +217,57 @@ public BitSet getBitSet(final Query query, final LeafReaderContext context) thro final IndexReader.CacheKey indexKey = coreCacheHelper.getKey(); final BitsetCacheKey cacheKey = new BitsetCacheKey(indexKey, query); - final BitSet bitSet = bitsetCache.computeIfAbsent(cacheKey, ignore1 -> { - // This ensures all insertions into the set are guarded by ConcurrentHashMap's atomicity guarantees. - keysByIndex.compute(indexKey, (ignore2, set) -> { - if (set == null) { - set = Sets.newConcurrentHashSet(); + try (ReleasableLock ignored = cacheModificationLock.acquire()) { + final BitSet bitSet = bitsetCache.computeIfAbsent(cacheKey, ignore1 -> { + // This ensures all insertions into the set are guarded by ConcurrentHashMap's atomicity guarantees. + keysByIndex.compute(indexKey, (ignore2, set) -> { + if (set == null) { + set = Sets.newConcurrentHashSet(); + } + set.add(cacheKey); + return set; + }); + final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); + final IndexSearcher searcher = new IndexSearcher(topLevelContext); + searcher.setQueryCache(null); + final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); + Scorer s = weight.scorer(context); + if (s == null) { + // A cache loader is not allowed to return null, return a marker object instead. + return NULL_MARKER; + } else { + final BitSet bs = BitSet.of(s.iterator(), context.reader().maxDoc()); + final long bitSetBytes = bs.ramBytesUsed(); + if (bitSetBytes > this.maxWeightBytes) { + logger.warn("built a DLS BitSet that uses [{}] bytes; the DLS BitSet cache has a maximum size of [{}] bytes;" + + " this object cannot be cached and will need to be rebuilt for each use;" + + " consider increasing the value of [{}]", + bitSetBytes, maxWeightBytes, CACHE_SIZE_SETTING.getKey()); + } else if (bitSetBytes + bitsetCache.weight() > maxWeightBytes) { + maybeLogCacheFullWarning(); + } + return bs; } - set.add(cacheKey); - return set; }); - final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); - final IndexSearcher searcher = new IndexSearcher(topLevelContext); - searcher.setQueryCache(null); - final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); - Scorer s = weight.scorer(context); - if (s == null) { - // A cache loader is not allowed to return null, return a marker object instead. - return NULL_MARKER; + if (bitSet == NULL_MARKER) { + return null; } else { - return BitSet.of(s.iterator(), context.reader().maxDoc()); + return bitSet; } - }); - if (bitSet == NULL_MARKER) { - return null; - } else { - return bitSet; + } + } + + private void maybeLogCacheFullWarning() { + final long nextLogTime = cacheFullWarningTime.get(); + final long now = System.currentTimeMillis(); + if (nextLogTime > now) { + return; + } + final long nextCheck = now + TimeUnit.MINUTES.toMillis(30); + if (cacheFullWarningTime.compareAndSet(nextLogTime, nextCheck)) { + logger.info( + "the Document Level Security BitSet cache is full which may impact performance; consider increasing the value of [{}]", + CACHE_SIZE_SETTING.getKey()); } } @@ -203,4 +316,27 @@ public String toString() { return getClass().getSimpleName() + "(" + index + "," + query + ")"; } } + + /** + * This method verifies that the two internal data structures ({@link #bitsetCache} and {@link #keysByIndex}) are consistent with one + * another. This method is only called by tests. + */ + void verifyInternalConsistency() { + this.bitsetCache.keys().forEach(bck -> { + final Set set = this.keysByIndex.get(bck.index); + if (set == null) { + throw new IllegalStateException("Key [" + bck + "] is in the cache, but there is no entry for [" + bck.index + + "] in the lookup map"); + } + if (set.contains(bck) == false) { + throw new IllegalStateException("Key [" + bck + "] is in the cache, but the lookup entry for [" + bck.index + + "] does not contain that key"); + } + }); + this.keysByIndex.values().stream().flatMap(Set::stream).forEach(bck -> { + if (this.bitsetCache.get(bck) == null) { + throw new IllegalStateException("Key [" + bck + "] is in the lookup map, but is not in the cache"); + } + }); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java index f58367dc43886..f745629b60199 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java @@ -119,12 +119,16 @@ public static Automaton initializePermittedFieldsAutomaton(FieldPermissionsDefin assert groups.size() > 0 : "there must always be a single group for field inclusion/exclusion"; List automatonList = groups.stream() - .map(g -> FieldPermissions.initializePermittedFieldsAutomaton(g.getGrantedFields(), g.getExcludedFields())) + .map(g -> FieldPermissions.buildPermittedFieldsAutomaton(g.getGrantedFields(), g.getExcludedFields())) .collect(Collectors.toList()); return Automatons.unionAndMinimize(automatonList); } - private static Automaton initializePermittedFieldsAutomaton(final String[] grantedFields, final String[] deniedFields) { + /** + * Construct a single automaton to represent the set of {@code grantedFields} except for the {@code deniedFields}. + * @throws ElasticsearchSecurityException If {@code deniedFields} is not a subset of {@code grantedFields}. + */ + public static Automaton buildPermittedFieldsAutomaton(final String[] grantedFields, final String[] deniedFields) { Automaton grantedFieldsAutomaton; if (grantedFields == null || Arrays.stream(grantedFields).anyMatch(Regex::isMatchAllPattern)) { grantedFieldsAutomaton = Automatons.MATCH_ALL; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index 2e51e07bc7676..b68b9dd79204a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; @@ -139,7 +140,7 @@ public ResourcePrivilegesMap checkResourcePrivileges(Set checkForIndexPa final Map predicateCache = new HashMap<>(); for (String forIndexPattern : checkForIndexPatterns) { Automaton checkIndexAutomaton = Automatons.patterns(forIndexPattern); - if (false == allowRestrictedIndices && false == RestrictedIndicesNames.RESTRICTED_NAMES.contains(forIndexPattern)) { + if (false == allowRestrictedIndices && false == isConcreteRestrictedIndex(forIndexPattern)) { checkIndexAutomaton = Automatons.minusAndMinimize(checkIndexAutomaton, RestrictedIndicesNames.NAMES_AUTOMATON); } if (false == Operations.isEmpty(checkIndexAutomaton)) { @@ -268,6 +269,13 @@ public Map authorize(String act return unmodifiableMap(indexPermissions); } + private boolean isConcreteRestrictedIndex(String indexPattern) { + if (Regex.isSimpleMatchPattern(indexPattern) || Automatons.isLuceneRegex(indexPattern)) { + return false; + } + return RestrictedIndicesNames.isRestricted(indexPattern); + } + public static class Group { private final IndexPrivilege privilege; private final Predicate actionMatcher; @@ -316,7 +324,7 @@ private boolean check(String action) { private boolean check(String action, String index) { assert index != null; return check(action) && indexNameMatcher.test(index) - && (allowRestrictedIndices || (false == RestrictedIndicesNames.RESTRICTED_NAMES.contains(index))); + && (allowRestrictedIndices || (false == RestrictedIndicesNames.isRestricted(index))); } boolean hasQuery() { @@ -351,13 +359,13 @@ private static Predicate buildIndexMatcherPredicateForAction(String acti final Predicate predicate; if (restrictedIndices.isEmpty()) { predicate = indexMatcher(ordinaryIndices) - .and(index -> false == RestrictedIndicesNames.RESTRICTED_NAMES.contains(index)); + .and(index -> false == RestrictedIndicesNames.isRestricted(index)); } else if (ordinaryIndices.isEmpty()) { predicate = indexMatcher(restrictedIndices); } else { predicate = indexMatcher(restrictedIndices) .or(indexMatcher(ordinaryIndices) - .and(index -> false == RestrictedIndicesNames.RESTRICTED_NAMES.contains(index))); + .and(index -> false == RestrictedIndicesNames.isRestricted(index))); } return predicate; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index d9db50678c160..7899797b890d9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -9,6 +9,8 @@ import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; import org.elasticsearch.xpack.core.security.action.privilege.GetBuiltinPrivilegesAction; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; @@ -50,11 +52,9 @@ private static Map initializeReservedRoles() { .put("superuser", SUPERUSER_ROLE_DESCRIPTOR) .put("transport_client", new RoleDescriptor("transport_client", new String[] { "transport_client" }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) - .put("kibana_user", new RoleDescriptor("kibana_user", null, null, new RoleDescriptor.ApplicationResourcePrivileges[] { - RoleDescriptor.ApplicationResourcePrivileges.builder() - .application("kibana-.kibana").resources("*").privileges("all").build() }, - null, null, - MetadataUtils.DEFAULT_RESERVED_METADATA, null)) + .put("kibana_admin", kibanaAdminUser("kibana_admin", MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("kibana_user", kibanaAdminUser("kibana_user", + MetadataUtils.getDeprecatedReservedMetadata("Please use the [kibana_admin] role instead"))) .put("monitoring_user", new RoleDescriptor("monitoring_user", new String[] { "cluster:monitor/main", "cluster:monitor/xpack/info", RemoteInfoAction.NAME }, new RoleDescriptor.IndicesPrivileges[] { @@ -108,12 +108,12 @@ private static Map initializeReservedRoles() { RoleDescriptor.ApplicationResourcePrivileges.builder() .application("kibana-.kibana").resources("*").privileges("read").build() }, null, null, - MetadataUtils.DEFAULT_RESERVED_METADATA, + MetadataUtils.getDeprecatedReservedMetadata("Please use Kibana feature privileges instead"), null)) .put(KibanaUser.ROLE_NAME, new RoleDescriptor(KibanaUser.ROLE_NAME, new String[] { "monitor", "manage_index_templates", MonitoringBulkAction.NAME, "manage_saml", "manage_token", "manage_oidc", - GetBuiltinPrivilegesAction.NAME, "delegate_pki" + GetBuiltinPrivilegesAction.NAME, "delegate_pki", GetLifecycleAction.NAME, PutLifecycleAction.NAME }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() @@ -264,6 +264,16 @@ private static Map initializeReservedRoles() { .immutableMap(); } + private static RoleDescriptor kibanaAdminUser(String name, Map metadata) { + return new RoleDescriptor(name, null, null, + new RoleDescriptor.ApplicationResourcePrivileges[] { + RoleDescriptor.ApplicationResourcePrivileges.builder() + .application("kibana-.kibana") + .resources("*").privileges("all") + .build() }, + null, null, metadata, null); + } + public static boolean isReserved(String role) { return RESERVED_ROLES.containsKey(role) || UsernamesField.SYSTEM_ROLE.equals(role) || UsernamesField.XPACK_ROLE.equals(role); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/RestrictedIndicesNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/RestrictedIndicesNames.java index 80c17c484739c..77f6c537b6f81 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/RestrictedIndicesNames.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/RestrictedIndicesNames.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.core.security.support.Automatons; +import java.util.Arrays; import java.util.Collections; import java.util.Set; @@ -21,10 +22,20 @@ public final class RestrictedIndicesNames { public static final String INTERNAL_SECURITY_TOKENS_INDEX_7 = ".security-tokens-7"; public static final String SECURITY_TOKENS_ALIAS = ".security-tokens"; + // public for tests + public static final String ASYNC_SEARCH_PREFIX = ".async-search-"; + private static final Automaton ASYNC_SEARCH_AUTOMATON = Automatons.patterns(ASYNC_SEARCH_PREFIX + "*"); + + // public for tests public static final Set RESTRICTED_NAMES = Collections.unmodifiableSet(Sets.newHashSet(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_6, INTERNAL_SECURITY_MAIN_INDEX_7, INTERNAL_SECURITY_TOKENS_INDEX_7, SECURITY_TOKENS_ALIAS)); - public static final Automaton NAMES_AUTOMATON = Automatons.patterns(RESTRICTED_NAMES); + public static boolean isRestricted(String concreteIndexName) { + return RESTRICTED_NAMES.contains(concreteIndexName) || concreteIndexName.startsWith(ASYNC_SEARCH_PREFIX); + } + + public static final Automaton NAMES_AUTOMATON = Automatons.unionAndMinimize(Arrays.asList(Automatons.patterns(RESTRICTED_NAMES), + ASYNC_SEARCH_AUTOMATON)); private RestrictedIndicesNames() { } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java index 7e6fd7ca46283..c0e4af238b1bd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java @@ -107,6 +107,13 @@ static Automaton pattern(String pattern) { } } + /** + * Is the str a lucene type of pattern + */ + public static boolean isLuceneRegex(String str) { + return str.length() > 1 && str.charAt(0) == '/' && str.charAt(str.length() - 1) == '/'; + } + private static Automaton buildAutomaton(String pattern) { if (pattern.startsWith("/")) { // it's a lucene regexp if (pattern.length() == 1 || !pattern.endsWith("/")) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MetadataUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MetadataUtils.java index 72388a8fdbcb5..e85417b1d94f1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MetadataUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MetadataUtils.java @@ -11,6 +11,8 @@ public class MetadataUtils { public static final String RESERVED_PREFIX = "_"; public static final String RESERVED_METADATA_KEY = RESERVED_PREFIX + "reserved"; + public static final String DEPRECATED_METADATA_KEY = RESERVED_PREFIX + "deprecated"; + public static final String DEPRECATED_REASON_METADATA_KEY = RESERVED_PREFIX + "deprecated_reason"; public static final Map DEFAULT_RESERVED_METADATA = Map.of(RESERVED_METADATA_KEY, true); private MetadataUtils() { @@ -24,4 +26,12 @@ public static boolean containsReservedMetadata(Map metadata) { } return false; } + + public static Map getDeprecatedReservedMetadata(String reason) { + return Map.of( + RESERVED_METADATA_KEY, true, + DEPRECATED_METADATA_KEY, true, + DEPRECATED_REASON_METADATA_KEY, reason + ); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertParsingUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertParsingUtils.java index 30b75aadf5fbb..bba67d6809abe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertParsingUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertParsingUtils.java @@ -120,8 +120,20 @@ public static List readCertificates(InputStream input) throws Certi * return the password for that key. If it returns {@code null}, then the key-pair for that alias is not read. */ public static Map readPkcs12KeyPairs(Path path, char[] password, Function keyPassword) - throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException, UnrecoverableKeyException { - final KeyStore store = readKeyStore(path, "PKCS12", password); + throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException, UnrecoverableKeyException { + return readKeyPairsFromKeystore(path, "PKCS12", password, keyPassword); + } + + public static Map readKeyPairsFromKeystore(Path path, String storeType, char[] password, + Function keyPassword) + throws IOException, KeyStoreException, CertificateException, NoSuchAlgorithmException, UnrecoverableKeyException { + + final KeyStore store = readKeyStore(path, storeType, password); + return readKeyPairsFromKeystore(store, keyPassword); + } + + static Map readKeyPairsFromKeystore(KeyStore store, Function keyPassword) + throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableKeyException { final Enumeration enumeration = store.aliases(); final Map map = new HashMap<>(store.size()); while (enumeration.hasMoreElements()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsage.java new file mode 100644 index 0000000000000..cd779d09d52ac --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsage.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.oss; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; +import java.util.Collections; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; + +public class IndexFeatureSetUsage extends XPackFeatureSet.Usage { + + private static Set sort(Set set) { + return Collections.unmodifiableSet(new TreeSet<>(set)); + } + + private final Set usedFieldTypes; + private final Set usedCharFilters, usedTokenizers, usedTokenFilters, usedAnalyzers; + private final Set usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers; + + public IndexFeatureSetUsage(Set usedFieldTypes, + Set usedCharFilters, Set usedTokenizers, Set usedTokenFilters, Set usedAnalyzers, + Set usedBuiltInCharFilters, Set usedBuiltInTokenizers, Set usedBuiltInTokenFilters, + Set usedBuiltInAnalyzers) { + super(XPackField.INDEX, true, true); + this.usedFieldTypes = sort(usedFieldTypes); + this.usedCharFilters = sort(usedCharFilters); + this.usedTokenizers = sort(usedTokenizers); + this.usedTokenFilters = sort(usedTokenFilters); + this.usedAnalyzers = sort(usedAnalyzers); + this.usedBuiltInCharFilters = sort(usedBuiltInCharFilters); + this.usedBuiltInTokenizers = sort(usedBuiltInTokenizers); + this.usedBuiltInTokenFilters = sort(usedBuiltInTokenFilters); + this.usedBuiltInAnalyzers = sort(usedBuiltInAnalyzers); + } + + public IndexFeatureSetUsage(StreamInput input) throws IOException { + super(input); + usedFieldTypes = input.readSet(StreamInput::readString); + usedCharFilters = input.readSet(StreamInput::readString); + usedTokenizers = input.readSet(StreamInput::readString); + usedTokenFilters = input.readSet(StreamInput::readString); + usedAnalyzers = input.readSet(StreamInput::readString); + usedBuiltInCharFilters = input.readSet(StreamInput::readString); + usedBuiltInTokenizers = input.readSet(StreamInput::readString); + usedBuiltInTokenFilters = input.readSet(StreamInput::readString); + usedBuiltInAnalyzers = input.readSet(StreamInput::readString); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeCollection(usedFieldTypes, StreamOutput::writeString); + out.writeCollection(usedCharFilters, StreamOutput::writeString); + out.writeCollection(usedTokenizers, StreamOutput::writeString); + out.writeCollection(usedTokenFilters, StreamOutput::writeString); + out.writeCollection(usedAnalyzers, StreamOutput::writeString); + out.writeCollection(usedBuiltInCharFilters, StreamOutput::writeString); + out.writeCollection(usedBuiltInTokenizers, StreamOutput::writeString); + out.writeCollection(usedBuiltInTokenFilters, StreamOutput::writeString); + out.writeCollection(usedBuiltInAnalyzers, StreamOutput::writeString); + } + + /** + * Return the set of used field types in the cluster. + */ + public Set getUsedFieldTypes() { + return usedFieldTypes; + } + + /** + * Return the set of used char filters in the cluster. + */ + public Set getUsedCharFilterTypes() { + return usedCharFilters; + } + + /** + * Return the set of used tokenizers in the cluster. + */ + public Set getUsedTokenizerTypes() { + return usedTokenizers; + } + + /** + * Return the set of used token filters in the cluster. + */ + public Set getUsedTokenFilterTypes() { + return usedTokenFilters; + } + + /** + * Return the set of used analyzers in the cluster. + */ + public Set getUsedAnalyzerTypes() { + return usedAnalyzers; + } + + /** + * Return the set of used built-in char filters in the cluster. + */ + public Set getUsedBuiltInCharFilters() { + return usedCharFilters; + } + + /** + * Return the set of used built-in tokenizers in the cluster. + */ + public Set getUsedBuiltInTokenizers() { + return usedTokenizers; + } + + /** + * Return the set of used built-in token filters in the cluster. + */ + public Set getUsedBuiltInTokenFilters() { + return usedTokenFilters; + } + + /** + * Return the set of used built-in analyzers in the cluster. + */ + public Set getUsedBuiltInAnalyzers() { + return usedAnalyzers; + } + + @Override + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + super.innerXContent(builder, params); + + builder.startObject("analysis"); + { + builder.field("char_filter_types", usedCharFilters); + builder.field("tokenizer_types", usedTokenizers); + builder.field("filter_types", usedTokenFilters); + builder.field("analyzer_types", usedAnalyzers); + + builder.field("built_in_char_filters", usedBuiltInCharFilters); + builder.field("built_in_tokenizers", usedBuiltInTokenizers); + builder.field("built_in_filters", usedBuiltInTokenFilters); + builder.field("built_in_analyzers", usedBuiltInAnalyzers); + } + builder.endObject(); + + builder.startObject("mappings"); + { + builder.field("field_types", usedFieldTypes); + } + builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndexFeatureSetUsage that = (IndexFeatureSetUsage) o; + return available == that.available && enabled == that.enabled && + Objects.equals(usedFieldTypes, that.usedFieldTypes) && + Objects.equals(usedCharFilters, that.usedCharFilters) && + Objects.equals(usedTokenizers, that.usedTokenizers) && + Objects.equals(usedTokenFilters, that.usedTokenFilters) && + Objects.equals(usedAnalyzers, that.usedAnalyzers) && + Objects.equals(usedBuiltInCharFilters, that.usedBuiltInCharFilters) && + Objects.equals(usedBuiltInTokenizers, that.usedBuiltInTokenizers) && + Objects.equals(usedBuiltInTokenFilters, that.usedBuiltInTokenFilters) && + Objects.equals(usedBuiltInAnalyzers, that.usedBuiltInAnalyzers); + } + + @Override + public int hashCode() { + return Objects.hash(available, enabled, usedFieldTypes, usedCharFilters, usedTokenizers, usedTokenFilters, + usedAnalyzers, usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, + usedBuiltInAnalyzers); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexUsageTransportAction.java new file mode 100644 index 0000000000000..dd0e002c93e84 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexUsageTransportAction.java @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.oss; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; + +public class IndexUsageTransportAction extends XPackUsageFeatureTransportAction { + + @Inject + public IndexUsageTransportAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(XPackUsageFeatureAction.INDEX.name(), transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver); + } + + @Override + protected void masterOperation(Task task, XPackUsageRequest request, ClusterState state, + ActionListener listener) { + + final Set usedFieldTypes = new HashSet<>(); + final Set usedCharFilters = new HashSet<>(); + final Set usedTokenizers = new HashSet<>(); + final Set usedTokenFilters = new HashSet<>(); + final Set usedAnalyzers = new HashSet<>(); + final Set usedBuiltInCharFilters = new HashSet<>(); + final Set usedBuiltInTokenizers = new HashSet<>(); + final Set usedBuiltInTokenFilters = new HashSet<>(); + final Set usedBuiltInAnalyzers = new HashSet<>(); + + for (IndexMetaData indexMetaData : state.metaData()) { + MappingMetaData mappingMetaData = indexMetaData.mapping(); + if (mappingMetaData != null) { + visitMapping(mappingMetaData.getSourceAsMap(), fieldMapping -> { + Object type = fieldMapping.get("type"); + if (type != null) { + usedFieldTypes.add(type.toString()); + } else if (fieldMapping.containsKey("properties")) { + usedFieldTypes.add("object"); + } + + for (String key : new String[] { "analyzer", "search_analyzer", "search_quote_analyzer" }) { + Object analyzer = fieldMapping.get(key); + if (analyzer != null) { + usedBuiltInAnalyzers.add(analyzer.toString()); + } + } + }); + } + + Settings indexSettings = indexMetaData.getSettings(); + + Map analyzerSettings = indexSettings.getGroups("index.analysis.analyzer"); + usedBuiltInAnalyzers.removeAll(analyzerSettings.keySet()); + for (Settings analyzerSetting : analyzerSettings.values()) { + usedAnalyzers.add(analyzerSetting.get("type", "custom")); + usedBuiltInCharFilters.addAll(analyzerSetting.getAsList("char_filter")); + String tokenizer = analyzerSetting.get("tokenizer"); + if (tokenizer != null) { + usedBuiltInTokenizers.add(tokenizer); + } + usedBuiltInTokenFilters.addAll(analyzerSetting.getAsList("filter")); + } + + Map charFilterSettings = indexSettings.getGroups("index.analysis.char_filter"); + usedBuiltInCharFilters.removeAll(charFilterSettings.keySet()); + aggregateAnalysisTypes(charFilterSettings.values(), usedCharFilters); + + Map tokenizerSettings = indexSettings.getGroups("index.analysis.tokenizer"); + usedBuiltInTokenizers.removeAll(tokenizerSettings.keySet()); + aggregateAnalysisTypes(tokenizerSettings.values(), usedTokenizers); + + Map tokenFilterSettings = indexSettings.getGroups("index.analysis.filter"); + usedBuiltInTokenFilters.removeAll(tokenFilterSettings.keySet()); + aggregateAnalysisTypes(tokenFilterSettings.values(), usedTokenFilters); + } + + listener.onResponse(new XPackUsageFeatureResponse( + new IndexFeatureSetUsage(usedFieldTypes, + usedCharFilters, usedTokenizers, usedTokenFilters, usedAnalyzers, + usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers))); + } + + static void visitMapping(Map mapping, Consumer> fieldMappingConsumer) { + Object properties = mapping.get("properties"); + if (properties != null && properties instanceof Map) { + @SuppressWarnings("unchecked") + Map propertiesAsMap = (Map) properties; + for (Object v : propertiesAsMap.values()) { + if (v != null && v instanceof Map) { + + @SuppressWarnings("unchecked") + Map fieldMapping = (Map) v; + fieldMappingConsumer.accept(fieldMapping); + visitMapping(fieldMapping, fieldMappingConsumer); + + // Multi fields + Object fieldsO = fieldMapping.get("fields"); + if (fieldsO != null && fieldsO instanceof Map) { + @SuppressWarnings("unchecked") + Map fields = (Map) fieldsO; + for (Object v2 : fields.values()) { + if (v2 instanceof Map) { + @SuppressWarnings("unchecked") + Map fieldMapping2 = (Map) v2; + fieldMappingConsumer.accept(fieldMapping2); + } + } + } + } + } + } + } + + static void aggregateAnalysisTypes(Collection analysisComponents, Set usedTypes) { + for (Settings settings : analysisComponents) { + String type = settings.get("type"); + if (type != null) { + usedTypes.add(type); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/package-info.java new file mode 100644 index 0000000000000..56582e0746737 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Package containing usage information for features that are exposed in OSS. + */ +package org.elasticsearch.xpack.oss; \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java index 5bc33ae330a18..aa482707e286c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java @@ -70,6 +70,7 @@ protected void setInitialState(License license, XPackLicenseState licenseState, when(discoveryNodes.getMasterNode()).thenReturn(mockNode); when(discoveryNodes.spliterator()).thenReturn(Arrays.asList(mockNode).spliterator()); when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(false); + when(discoveryNodes.getMinNodeVersion()).thenReturn(mockNode.getVersion()); when(state.nodes()).thenReturn(discoveryNodes); when(state.getNodes()).thenReturn(discoveryNodes); // it is really ridiculous we have nodes() and getNodes()... when(clusterService.state()).thenReturn(state); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseOperationModeUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseOperationModeUpdateTests.java index 20df885261fed..2ef2438123650 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseOperationModeUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseOperationModeUpdateTests.java @@ -35,15 +35,19 @@ public void init() throws Exception { public void testLicenseOperationModeUpdate() throws Exception { License.LicenseType type = randomFrom(License.LicenseType.values()); - License license = License.builder() - .uid("id") - .expiryDate(0) - .issueDate(0) - .issuedTo("elasticsearch") - .issuer("issuer") - .type(type) - .maxNodes(1) - .build(); + final License.Builder licenseBuilder = License.builder() + .uid("id") + .expiryDate(0) + .issueDate(0) + .issuedTo("elasticsearch") + .issuer("issuer") + .type(type); + if (type == License.LicenseType.ENTERPRISE) { + licenseBuilder.maxResourceUnits(1); + } else { + licenseBuilder.maxNodes(1); + } + License license = licenseBuilder.build(); assertThat(license.operationMode(), equalTo(License.OperationMode.resolve(type))); OperationModeFileWatcherTests.writeMode("gold", licenseModeFile); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java index b1b22f15c259f..0f465834ad553 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java @@ -190,7 +190,8 @@ private License buildLicense(License.LicenseType type, TimeValue expires) { .issuer(randomAlphaOfLengthBetween(5, 60)) .issuedTo(randomAlphaOfLengthBetween(5, 60)) .issueDate(System.currentTimeMillis() - TimeUnit.MINUTES.toMillis(randomLongBetween(1, 5000))) - .maxNodes(randomIntBetween(1, 500)) + .maxNodes(type == License.LicenseType.ENTERPRISE ? -1 : randomIntBetween(1, 500)) + .maxResourceUnits(type == License.LicenseType.ENTERPRISE ? randomIntBetween(10, 500) : -1) .signature(null) .build(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseTests.java index aa209f9a520ac..05f54383eedd3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseTests.java @@ -6,13 +6,18 @@ package org.elasticsearch.license; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TestMatchers; +import org.hamcrest.Matchers; import java.nio.BufferUnderflowException; import java.nio.charset.StandardCharsets; @@ -27,7 +32,7 @@ public class LicenseTests extends ESTestCase { - public void testFromXContent() throws Exception { + public void testFromXContentForGoldLicenseWithVersion2Signature() throws Exception { String licenseString = "{\"license\":" + "{\"uid\":\"4056779d-b823-4c12-a9cb-efa4a8d8c422\"," + "\"type\":\"gold\"," + @@ -51,27 +56,107 @@ public void testFromXContent() throws Exception { assertThat(license.issuedTo(), equalTo("customer")); assertThat(license.expiryDate(), equalTo(1546596340459L)); assertThat(license.issueDate(), equalTo(1546589020459L)); + assertThat(license.maxNodes(), equalTo(5)); + assertThat(license.maxResourceUnits(), equalTo(-1)); + assertThat(license.version(), equalTo(2)); + } + + public void testFromXContentForGoldLicenseWithVersion4Signature() throws Exception { + String licenseString = "{\"license\":{" + + "\"uid\":\"4056779d-b823-4c12-a9cb-efa4a8d8c422\"," + + "\"type\":\"gold\"," + + "\"issue_date_in_millis\":1546589020459," + + "\"expiry_date_in_millis\":1546596340459," + + "\"max_nodes\":5," + + "\"issued_to\":\"customer\"," + + "\"issuer\":\"elasticsearch\"," + + "\"signature\":\"AAAABAAAAA22vXffI41oM4jLCwZ6AAAAIAo5/x6hrsGh1GqqrJmy4qgmEC7gK0U4zQ6q5ZEMhm4jAAABAH3oL4weubwYGjLGNZsz90" + + "EerX6yOX3Dh6wswG9EfqCiyv6lcjuC7aeKKuOkqhMRTHZ9vHnfMuakHWVlpuGC14WyGqaMwSmgTZ9jVAzt/W3sIotRxM/3rtlCXUc1rOUXNFcii1i3Kkrc" + + "kTzhENTKjdkOmUN3qZlTEmHkp93eYpx8++iIukHYU9K9Vm2VKgydFfxvYaN/Qr+iPfJSbHJB8+DmS2ywdrmdqW+ScE+1ZNouPNhnP3RKTleNvixXPG9l5B" + + "qZ2So1IlCrxVDByA1E6JH5AvjbOucpcGiWCm7IzvfpkzphKHMyxhUaIByoHl9UAf4AdPLhowWAQk0eHMRDDlo=\"," + + "\"start_date_in_millis\":-1}}\n"; + License license = License.fromSource(new BytesArray(licenseString.getBytes(StandardCharsets.UTF_8)), + XContentType.JSON); + assertThat(license.type(), equalTo("gold")); + assertThat(license.uid(), equalTo("4056779d-b823-4c12-a9cb-efa4a8d8c422")); + assertThat(license.issuer(), equalTo("elasticsearch")); + assertThat(license.issuedTo(), equalTo("customer")); + assertThat(license.expiryDate(), equalTo(1546596340459L)); + assertThat(license.issueDate(), equalTo(1546589020459L)); + assertThat(license.maxNodes(), equalTo(5)); + assertThat(license.maxResourceUnits(), equalTo(-1)); + assertThat(license.version(), equalTo(4)); + } + + public void testFromXContentForEnterpriseLicenseWithV5Signature() throws Exception { + String licenseString = "{\"license\":{" + + "\"uid\":\"4056779d-b823-4c12-a9cb-efa4a8d8c422\"," + + "\"type\":\"enterprise\"," + + "\"issue_date_in_millis\":1546589020459," + + "\"expiry_date_in_millis\":1546596340459," + + "\"max_nodes\":null," + + "\"max_resource_units\":15," + + "\"issued_to\":\"customer\"," + + "\"issuer\":\"elasticsearch\"," + + "\"signature\":\"AAAABQAAAA2MUoEqXb9K9Ie5d6JJAAAAIAo5/x6hrsGh1GqqrJmy4qgmEC7gK0U4zQ6q5ZEMhm4jAAABAAAwVZKGAmDELUlS5PScBkhQsZa" + + "DaQTtJ4ZP5EnZ/nLpmCt9Dj7d/FRsgMtHmSJLrr2CdrIo4Vx5VuhmbwzZvXMttLz2lrJzG7770PX3TnC9e7F9GdnE9ec0FP2U0ZlLOBOtPuirX0q+j6GfB+DLyE" + + "5D+Lo1NQ3eLJGvbd3DBYPWJxkb+EBVHczCH2OrIEVWnN/TafmkdZCPX5PcultkNOs3j7d3s7b51EXHKoye8UTcB/RGmzZwMah+E6I/VJkqu7UHL8bB01wJeqo6W" + + "xI4LC/9+f5kpmHrUu3CHe5pHbmMGDk7O6/cwt1pw/hnJXKIFCi36IGaKcHLgORxQdN0uzE=\"," + + "\"start_date_in_millis\":-1}}"; + License license = License.fromSource(new BytesArray(licenseString.getBytes(StandardCharsets.UTF_8)), + XContentType.JSON); + assertThat(license.type(), equalTo("enterprise")); + assertThat(license.uid(), equalTo("4056779d-b823-4c12-a9cb-efa4a8d8c422")); + assertThat(license.issuer(), equalTo("elasticsearch")); + assertThat(license.issuedTo(), equalTo("customer")); + assertThat(license.expiryDate(), equalTo(1546596340459L)); + assertThat(license.issueDate(), equalTo(1546589020459L)); + assertThat(license.maxNodes(), equalTo(-1)); + assertThat(license.maxResourceUnits(), equalTo(15)); + assertThat(license.version(), equalTo(5)); + } + + public void testThatEnterpriseLicenseMayNotHaveMaxNodes() throws Exception { + License.Builder builder = randomLicense(License.LicenseType.ENTERPRISE) + .maxNodes(randomIntBetween(1, 50)) + .maxResourceUnits(randomIntBetween(10, 500)); + final IllegalStateException ex = expectThrows(IllegalStateException.class, builder::build); + assertThat(ex, TestMatchers.throwableWithMessage("maxNodes may not be set for enterprise licenses (type=[enterprise])")); + } + + public void testThatEnterpriseLicenseMustHaveMaxResourceUnits() throws Exception { + License.Builder builder = randomLicense(License.LicenseType.ENTERPRISE) + .maxResourceUnits(-1); + final IllegalStateException ex = expectThrows(IllegalStateException.class, builder::build); + assertThat(ex, TestMatchers.throwableWithMessage("maxResourceUnits must be set for enterprise licenses (type=[enterprise])")); + } + + public void testThatRegularLicensesMustHaveMaxNodes() throws Exception { + License.LicenseType type = randomValueOtherThan(License.LicenseType.ENTERPRISE, () -> randomFrom(License.LicenseType.values())); + License.Builder builder = randomLicense(type) + .maxNodes(-1); + final IllegalStateException ex = expectThrows(IllegalStateException.class, builder::build); + assertThat(ex, TestMatchers.throwableWithMessage("maxNodes has to be set")); + } + + public void testThatRegularLicensesMayNotHaveMaxResourceUnits() throws Exception { + License.LicenseType type = randomValueOtherThan(License.LicenseType.ENTERPRISE, () -> randomFrom(License.LicenseType.values())); + License.Builder builder = randomLicense(type) + .maxResourceUnits(randomIntBetween(10, 500)) + .maxNodes(randomIntBetween(1, 50)); + final IllegalStateException ex = expectThrows(IllegalStateException.class, builder::build); + assertThat(ex, TestMatchers.throwableWithMessage("maxResourceUnits may only be set for enterprise licenses (not permitted " + + "for type=[" + type.getTypeName() + "])")); } public void testLicenseToAndFromXContentForEveryLicenseType() throws Exception { for (License.LicenseType type : License.LicenseType.values()) { - final License license1 = License.builder() - .uid(UUIDs.randomBase64UUID(random())) - .type(type) - .issueDate(System.currentTimeMillis() - TimeUnit.DAYS.toMillis(randomIntBetween(1, 10))) - .expiryDate(System.currentTimeMillis() + TimeUnit.DAYS.toMillis(randomIntBetween(1, 1000))) - .maxNodes(randomIntBetween(1, 100)) - .issuedTo(randomAlphaOfLengthBetween(5, 50)) - .issuer(randomAlphaOfLengthBetween(5, 50)) + final License license1 = randomLicense(type) // We need a signature that parses correctly, but it doesn't need to verify - .signature("AAAAAgAAAA34V2kfTJVtvdL2LttwAAABmFJ6NGRnbEM3WVQrZVQwNkdKQmR1VytlMTMyM1J0dTZ1WGwyY2ZCVFhqMGtJU2gzZ3pnNTVpOW" + - "F5Y1NaUkwyN2VsTEtCYnlZR2c5WWtjQ0phaDlhRjlDUXViUmUwMWhjSkE2TFcwSGdneTJHbUV4N2RHUWJxV20ybjRsZHRzV2xkN0ZmdDlYblJmNVc" + - "xMlBWeU81V1hLUm1EK0V1dmF3cFdlSGZzTU5SZE1qUmFra3JkS1hCanBWVmVTaFFwV3BVZERzeG9Sci9rYnlJK2toODZXY09tNmFHUVNUL3IyUHEx" + - "V3VSTlBneWNJcFQ0bXl0cmhNNnRwbE1CWE4zWjJ5eGFuWFo0NGhsb3B5WFd1eTdYbFFWQkxFVFFPSlBERlB0eVVJYXVSZ0lsR2JpRS9rN1h4MSsvN" + - "UpOcGN6cU1NOHN1cHNtSTFIUGN1bWNGNEcxekhrblhNOXZ2VEQvYmRzQUFwbytUZEpRR3l6QU5oS2ZFSFdSbGxxNDZyZ0xvUHIwRjdBL2JqcnJnNG" + - "FlK09Cek9pYlJ5Umc9PQAAAQAth77fQLF7CCEL7wA6Z0/UuRm/weECcsjW/50kBnPLO8yEs+9/bPa5LSU0bF6byEXOVeO0ebUQfztpjulbXh8TrBD" + - "SG+6VdxGtohPo2IYPBaXzGs3LOOor6An/lhptxBWdwYmfbcp0m8mnXZh1vN9rmbTsZXnhBIoPTaRDwUBi3vJ3Ms3iLaEm4S8Slrfmtht2jUjgGZ2v" + - "AeZ9OHU2YsGtrSpz6f") + .signature("AAAABQAAAA2MUoEqXb9K9Ie5d6JJAAAAIAo5/x6hrsGh1GqqrJmy4qgmEC7gK0U4zQ6q5ZEMhm4jAAABAAAwVZKGAmDELUlS5PScBkhQsZa" + + "DaQTtJ4ZP5EnZ/nLpmCt9Dj7d/FRsgMtHmSJLrr2CdrIo4Vx5VuhmbwzZvXMttLz2lrJzG7770PX3TnC9e7F9GdnE9ec0FP2U0ZlLOBOtPuirX0q+j" + + "6GfB+DLyE5D+Lo1NQ3eLJGvbd3DBYPWJxkb+EBVHczCH2OrIEVWnN/TafmkdZCPX5PcultkNOs3j7d3s7b51EXHKoye8UTcB/RGmzZwMah+E6I/VJk" + + "qu7UHL8bB01wJeqo6WxI4LC/9+f5kpmHrUu3CHe5pHbmMGDk7O6/cwt1pw/hnJXKIFCi36IGaKcHLgORxQdN0uzE=") .build(); XContentParser parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, THROW_UNSUPPORTED_OPERATION, Strings.toString(license1)); @@ -83,6 +168,46 @@ public void testLicenseToAndFromXContentForEveryLicenseType() throws Exception { assertThat(license2.issuedTo(), equalTo(license1.issuedTo())); assertThat(license2.expiryDate(), equalTo(license1.expiryDate())); assertThat(license2.issueDate(), equalTo(license1.issueDate())); + assertThat(license2.maxNodes(), equalTo(license1.maxNodes())); + assertThat(license2.maxResourceUnits(), equalTo(license1.maxResourceUnits())); + } + } + + public void testSerializationOfLicenseForEveryLicenseType() throws Exception { + for (License.LicenseType type : License.LicenseType.values()) { + final String signature = randomBoolean() ? null : "AAAABQAAAA2MUoEqXb9K9Ie5d6JJAAAAIAo5/x6hrsGh1GqqrJmy4qgmEC7gK0U4zQ6q5ZEM" + + "hm4jAAABAAAwVZKGAmDELUlS5PScBkhQsZaDaQTtJ4ZP5EnZ/nLpmCt9Dj7d/FRsgMtHmSJLrr2CdrIo4Vx5VuhmbwzZvXMttLz2lrJzG7770PX3TnC9e7" + + "F9GdnE9ec0FP2U0ZlLOBOtPuirX0q+j6GfB+DLyE5D+Lo1NQ3eLJGvbd3DBYPWJxkb+EBVHczCH2OrIEVWnN/TafmkdZCPX5PcultkNOs3j7d3s7b51EXH" + + "Koye8UTcB/RGmzZwMah+E6I/VJkqu7UHL8bB01wJeqo6WxI4LC/9+f5kpmHrUu3CHe5pHbmMGDk7O6/cwt1pw/hnJXKIFCi36IGaKcHLgORxQdN0uzE="; + final int version; + if (type == License.LicenseType.ENTERPRISE) { + version = randomIntBetween(License.VERSION_ENTERPRISE, License.VERSION_CURRENT); + } else { + version = randomIntBetween(License.VERSION_NO_FEATURE_TYPE, License.VERSION_CURRENT); + } + + final License license1 = randomLicense(type).signature(signature).version(version).build(); + + final BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(Version.CURRENT); + license1.writeTo(out); + + final StreamInput in = out.bytes().streamInput(); + in.setVersion(Version.CURRENT); + final License license2 = License.readLicense(in); + assertThat(in.read(), Matchers.equalTo(-1)); + + assertThat(license2, notNullValue()); + assertThat(license2.type(), equalTo(type.getTypeName())); + assertThat(license2.version(), equalTo(version)); + assertThat(license2.signature(), equalTo(signature)); + assertThat(license2.uid(), equalTo(license1.uid())); + assertThat(license2.issuer(), equalTo(license1.issuer())); + assertThat(license2.issuedTo(), equalTo(license1.issuedTo())); + assertThat(license2.expiryDate(), equalTo(license1.expiryDate())); + assertThat(license2.issueDate(), equalTo(license1.issueDate())); + assertThat(license2.maxNodes(), equalTo(license1.maxNodes())); + assertThat(license2.maxResourceUnits(), equalTo(license1.maxResourceUnits())); } } @@ -158,4 +283,17 @@ public void testUnableToBase64DecodeFromXContent() throws Exception { assertThat(exception.getMessage(), containsString("malformed signature for license [4056779d-b823-4c12-a9cb-efa4a8d8c422]")); assertThat(exception.getCause(), instanceOf(IllegalArgumentException.class)); } + + private License.Builder randomLicense(License.LicenseType type) { + return License.builder() + .uid(UUIDs.randomBase64UUID(random())) + .type(type) + .issueDate(System.currentTimeMillis() - TimeUnit.DAYS.toMillis(randomIntBetween(1, 10))) + .expiryDate(System.currentTimeMillis() + TimeUnit.DAYS.toMillis(randomIntBetween(1, 1000))) + .maxNodes(type == License.LicenseType.ENTERPRISE ? -1 : randomIntBetween(1, 100)) + .maxResourceUnits(type == License.LicenseType.ENTERPRISE ? randomIntBetween(1, 100) : -1) + .issuedTo(randomAlphaOfLengthBetween(5, 50)) + .issuer(randomAlphaOfLengthBetween(5, 50)); + } + } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseUtilsTests.java index 26ed6f5e446db..1c96fef045a64 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseUtilsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseUtilsTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.license; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -33,4 +34,8 @@ public void testIsLicenseExpiredException() { exception = new ElasticsearchSecurityException("msg"); assertFalse(LicenseUtils.isLicenseExpiredException(exception)); } + + public void testVersionsUpToDate() { + assertThat(LicenseUtils.compatibleLicenseVersion(DiscoveryNodes.EMPTY_NODES), equalTo(License.VERSION_CURRENT)); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/FileMatchers.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/FileMatchers.java new file mode 100644 index 0000000000000..a18e1bab94123 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/FileMatchers.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.test; + +import org.hamcrest.CustomMatcher; +import org.hamcrest.Description; +import org.hamcrest.Matcher; + +import java.nio.file.Files; +import java.nio.file.LinkOption; +import java.nio.file.Path; + +public class FileMatchers { + public static Matcher pathExists(LinkOption... options) { + return new CustomMatcher<>("Path exists") { + @Override + public boolean matches(Object item) { + if (item instanceof Path) { + Path path = (Path) item; + return Files.exists(path, options); + } else { + return false; + } + + } + }; + } + + public static Matcher isDirectory(LinkOption... options) { + return new FileTypeMatcher("directory", options) { + @Override + protected boolean matchPath(Path path) { + return Files.isDirectory(path, options); + } + }; + } + + public static Matcher isRegularFile(LinkOption... options) { + return new FileTypeMatcher("regular file", options) { + @Override + protected boolean matchPath(Path path) { + return Files.isRegularFile(path, options); + } + }; + } + + private abstract static class FileTypeMatcher extends CustomMatcher { + private final LinkOption[] options; + + FileTypeMatcher(String typeName, LinkOption... options) { + super("Path is " + typeName); + this.options = options; + } + + @Override + public boolean matches(Object item) { + if (item instanceof Path) { + Path path = (Path) item; + return matchPath(path); + } else { + return false; + } + } + + protected abstract boolean matchPath(Path path); + + @Override + public void describeMismatch(Object item, Description description) { + super.describeMismatch(item, description); + if (item instanceof Path) { + Path path = (Path) item; + if (Files.exists(path, options) == false) { + description.appendText(" (file not found)"); + } else if (Files.isDirectory(path, options)) { + description.appendText(" (directory)"); + } else if (Files.isSymbolicLink(path)) { + description.appendText(" (symlink)"); + } else if (Files.isRegularFile(path, options)) { + description.appendText(" (regular file)"); + } else { + description.appendText(" (unknown file type)"); + } + } + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/TestMatchers.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/TestMatchers.java index 452805c859838..c1a290f509a58 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/TestMatchers.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/TestMatchers.java @@ -20,6 +20,10 @@ public class TestMatchers extends Matchers { + /** + * @deprecated Use {@link FileMatchers#pathExists} + */ + @Deprecated public static Matcher pathExists(Path path, LinkOption... options) { return new CustomMatcher("Path " + path + " exists") { @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocateActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocateActionTests.java index f19b31d659ffa..4b6700e7ac151 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocateActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocateActionTests.java @@ -114,7 +114,7 @@ public static Map randomMap(int minEntries, int maxEntries) { Map map = new HashMap<>(); int numIncludes = randomIntBetween(minEntries, maxEntries); for (int i = 0; i < numIncludes; i++) { - map.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + map.put(randomAlphaOfLengthBetween(2, 20), randomAlphaOfLengthBetween(2, 20)); } return map; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java index f06b8c873e237..4d45e18b32671 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java @@ -196,8 +196,11 @@ public void testConditionNotMetDueToRelocation() { public void testExecuteAllocateNotComplete() throws Exception { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); Map includes = AllocateActionTests.randomMap(1, 5); - Map excludes = AllocateActionTests.randomMap(1, 5); - Map requires = AllocateActionTests.randomMap(1, 5); + Map excludes = randomValueOtherThanMany(map -> map.keySet().stream().anyMatch(includes::containsKey), + () -> AllocateActionTests.randomMap(1, 5)); + Map requires = randomValueOtherThanMany(map -> map.keySet().stream().anyMatch(includes::containsKey) || + map.keySet().stream().anyMatch(excludes::containsKey), + () -> AllocateActionTests.randomMap(1, 5)); Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); Settings.Builder expectedSettings = Settings.builder(); @@ -230,8 +233,11 @@ public void testExecuteAllocateNotComplete() throws Exception { public void testExecuteAllocateNotCompleteOnlyOneCopyAllocated() throws Exception { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); Map includes = AllocateActionTests.randomMap(1, 5); - Map excludes = AllocateActionTests.randomMap(1, 5); - Map requires = AllocateActionTests.randomMap(1, 5); + Map excludes = randomValueOtherThanMany(map -> map.keySet().stream().anyMatch(includes::containsKey), + () -> AllocateActionTests.randomMap(1, 5)); + Map requires = randomValueOtherThanMany(map -> map.keySet().stream().anyMatch(includes::containsKey) || + map.keySet().stream().anyMatch(excludes::containsKey), + () -> AllocateActionTests.randomMap(1, 5)); Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); Settings.Builder expectedSettings = Settings.builder(); @@ -266,8 +272,11 @@ public void testExecuteAllocateNotCompleteOnlyOneCopyAllocated() throws Exceptio public void testExecuteAllocateUnassigned() throws Exception { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); Map includes = AllocateActionTests.randomMap(1, 5); - Map excludes = AllocateActionTests.randomMap(1, 5); - Map requires = AllocateActionTests.randomMap(1, 5); + Map excludes = randomValueOtherThanMany(map -> map.keySet().stream().anyMatch(includes::containsKey), + () -> AllocateActionTests.randomMap(1, 5)); + Map requires = randomValueOtherThanMany(map -> map.keySet().stream().anyMatch(includes::containsKey) || + map.keySet().stream().anyMatch(excludes::containsKey), + () -> AllocateActionTests.randomMap(1, 5)); Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); Settings.Builder expectedSettings = Settings.builder(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelActionRequestTests.java new file mode 100644 index 0000000000000..0c39469c9029e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelActionRequestTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction.Request; +import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfigTests; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinitionTests; + +public class PutTrainedModelActionRequestTests extends AbstractWireSerializingTestCase { + + @Override + protected Request createTestInstance() { + String modelId = randomAlphaOfLength(10); + return new Request(TrainedModelConfigTests.createTestInstance(modelId) + .setParsedDefinition(TrainedModelDefinitionTests.createRandomBuilder()) + .build()); + } + + @Override + protected Writeable.Reader instanceReader() { + return (in) -> { + Request request = new Request(in); + request.getTrainedModelConfig().ensureParsedDefinition(xContentRegistry()); + return request; + }; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(new MlInferenceNamedXContentProvider().getNamedWriteables()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelActionResponseTests.java new file mode 100644 index 0000000000000..5813b13c8ad55 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelActionResponseTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction.Response; +import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfigTests; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinitionTests; + +public class PutTrainedModelActionResponseTests extends AbstractWireSerializingTestCase { + + @Override + protected Response createTestInstance() { + String modelId = randomAlphaOfLength(10); + return new Response(TrainedModelConfigTests.createTestInstance(modelId) + .setParsedDefinition(TrainedModelDefinitionTests.createRandomBuilder()) + .build()); + } + + @Override + protected Writeable.Reader instanceReader() { + return (in) -> { + Response response = new Response(in); + response.getResponse().ensureParsedDefinition(xContentRegistry()); + return response; + }; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(new MlInferenceNamedXContentProvider().getNamedWriteables()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index 3d1677fcc78d3..4c8bd4168df20 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -274,7 +274,7 @@ public void testFutureConfigParse() throws IOException { .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED); XContentParseException e = expectThrows(XContentParseException.class, () -> DatafeedConfig.STRICT_PARSER.apply(parser, null).build()); - assertEquals("[6:5] [datafeed_config] unknown field [tomorrows_technology_today], parser not found", e.getMessage()); + assertEquals("[6:5] [datafeed_config] unknown field [tomorrows_technology_today]", e.getMessage()); } public void testPastQueryConfigParse() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java index 428b63554d8cf..ad6f79fdd37ed 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java @@ -328,7 +328,7 @@ public void testPreventCreateTimeInjection() throws IOException { XContentFactory.xContent(XContentType.JSON).createParser( xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json)) { Exception e = expectThrows(IllegalArgumentException.class, () -> DataFrameAnalyticsConfig.STRICT_PARSER.apply(parser, null)); - assertThat(e.getMessage(), containsString("unknown field [create_time], parser not found")); + assertThat(e.getMessage(), containsString("unknown field [create_time]")); } } @@ -343,7 +343,7 @@ public void testPreventVersionInjection() throws IOException { XContentFactory.xContent(XContentType.JSON).createParser( xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json)) { Exception e = expectThrows(IllegalArgumentException.class, () -> DataFrameAnalyticsConfig.STRICT_PARSER.apply(parser, null)); - assertThat(e.getMessage(), containsString("unknown field [version], parser not found")); + assertThat(e.getMessage(), containsString("unknown field [version]")); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/BoostedTreeParamsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/BoostedTreeParamsTests.java index 145533df407cd..6f3aff88846d9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/BoostedTreeParamsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/BoostedTreeParamsTests.java @@ -23,7 +23,7 @@ protected BoostedTreeParams doParseInstance(XContentParser parser) throws IOExce new ConstructingObjectParser<>( BoostedTreeParams.NAME, true, - a -> new BoostedTreeParams((Double) a[0], (Double) a[1], (Double) a[2], (Integer) a[3], (Double) a[4])); + a -> new BoostedTreeParams((Double) a[0], (Double) a[1], (Double) a[2], (Integer) a[3], (Double) a[4], (Integer) a[5])); BoostedTreeParams.declareFields(objParser); return objParser.apply(parser, null); } @@ -34,12 +34,14 @@ protected BoostedTreeParams createTestInstance() { } public static BoostedTreeParams createRandom() { - Double lambda = randomBoolean() ? null : randomDoubleBetween(0.0, Double.MAX_VALUE, true); - Double gamma = randomBoolean() ? null : randomDoubleBetween(0.0, Double.MAX_VALUE, true); - Double eta = randomBoolean() ? null : randomDoubleBetween(0.001, 1.0, true); - Integer maximumNumberTrees = randomBoolean() ? null : randomIntBetween(1, 2000); - Double featureBagFraction = randomBoolean() ? null : randomDoubleBetween(0.0, 1.0, false); - return new BoostedTreeParams(lambda, gamma, eta, maximumNumberTrees, featureBagFraction); + return BoostedTreeParams.builder() + .setLambda(randomBoolean() ? null : randomDoubleBetween(0.0, Double.MAX_VALUE, true)) + .setGamma(randomBoolean() ? null : randomDoubleBetween(0.0, Double.MAX_VALUE, true)) + .setEta(randomBoolean() ? null : randomDoubleBetween(0.001, 1.0, true)) + .setMaximumNumberTrees(randomBoolean() ? null : randomIntBetween(1, 2000)) + .setFeatureBagFraction(randomBoolean() ? null : randomDoubleBetween(0.0, 1.0, false)) + .setNumTopFeatureImportanceValues(randomBoolean() ? null : randomIntBetween(0, Integer.MAX_VALUE)) + .build(); } @Override @@ -49,57 +51,64 @@ protected Writeable.Reader instanceReader() { public void testConstructor_GivenNegativeLambda() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new BoostedTreeParams(-0.00001, 0.0, 0.5, 500, 0.3)); + () -> BoostedTreeParams.builder().setLambda(-0.00001).build()); assertThat(e.getMessage(), equalTo("[lambda] must be a non-negative double")); } public void testConstructor_GivenNegativeGamma() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new BoostedTreeParams(0.0, -0.00001, 0.5, 500, 0.3)); + () -> BoostedTreeParams.builder().setGamma(-0.00001).build()); assertThat(e.getMessage(), equalTo("[gamma] must be a non-negative double")); } public void testConstructor_GivenEtaIsZero() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new BoostedTreeParams(0.0, 0.0, 0.0, 500, 0.3)); + () -> BoostedTreeParams.builder().setEta(0.0).build()); assertThat(e.getMessage(), equalTo("[eta] must be a double in [0.001, 1]")); } public void testConstructor_GivenEtaIsGreaterThanOne() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new BoostedTreeParams(0.0, 0.0, 1.00001, 500, 0.3)); + () -> BoostedTreeParams.builder().setEta(1.00001).build()); assertThat(e.getMessage(), equalTo("[eta] must be a double in [0.001, 1]")); } public void testConstructor_GivenMaximumNumberTreesIsZero() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new BoostedTreeParams(0.0, 0.0, 0.5, 0, 0.3)); + () -> BoostedTreeParams.builder().setMaximumNumberTrees(0).build()); assertThat(e.getMessage(), equalTo("[maximum_number_trees] must be an integer in [1, 2000]")); } public void testConstructor_GivenMaximumNumberTreesIsGreaterThan2k() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new BoostedTreeParams(0.0, 0.0, 0.5, 2001, 0.3)); + () -> BoostedTreeParams.builder().setMaximumNumberTrees(2001).build()); assertThat(e.getMessage(), equalTo("[maximum_number_trees] must be an integer in [1, 2000]")); } public void testConstructor_GivenFeatureBagFractionIsLessThanZero() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new BoostedTreeParams(0.0, 0.0, 0.5, 500, -0.00001)); + () -> BoostedTreeParams.builder().setFeatureBagFraction(-0.00001).build()); assertThat(e.getMessage(), equalTo("[feature_bag_fraction] must be a double in (0, 1]")); } public void testConstructor_GivenFeatureBagFractionIsGreaterThanOne() { ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> new BoostedTreeParams(0.0, 0.0, 0.5, 500, 1.00001)); + () -> BoostedTreeParams.builder().setFeatureBagFraction(1.00001).build()); assertThat(e.getMessage(), equalTo("[feature_bag_fraction] must be a double in (0, 1]")); } + + public void testConstructor_GivenTopFeatureImportanceValuesIsNegative() { + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> BoostedTreeParams.builder().setNumTopFeatureImportanceValues(-1).build()); + + assertThat(e.getMessage(), equalTo("[num_top_feature_importance_values] must be a non-negative integer")); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/ClassificationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/ClassificationTests.java index 1b988379fc218..64b14157cf613 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/ClassificationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/ClassificationTests.java @@ -34,7 +34,7 @@ public class ClassificationTests extends AbstractSerializingTestCase { - private static final BoostedTreeParams BOOSTED_TREE_PARAMS = new BoostedTreeParams(0.0, 0.0, 0.5, 500, 1.0); + private static final BoostedTreeParams BOOSTED_TREE_PARAMS = BoostedTreeParams.builder().build(); @Override protected Classification doParseInstance(XContentParser parser) throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/RegressionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/RegressionTests.java index c7f89cc0413b5..83df5b44ced25 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/RegressionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/RegressionTests.java @@ -30,7 +30,7 @@ public class RegressionTests extends AbstractSerializingTestCase { - private static final BoostedTreeParams BOOSTED_TREE_PARAMS = new BoostedTreeParams(0.0, 0.0, 0.5, 500, 1.0); + private static final BoostedTreeParams BOOSTED_TREE_PARAMS = BoostedTreeParams.builder().build(); @Override protected Regression doParseInstance(XContentParser parser) throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java index 8b267e10ca429..67b67a45500f1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.inference; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; @@ -56,14 +56,16 @@ public static TrainedModelConfig.Builder createTestInstance(String modelId) { return TrainedModelConfig.builder() .setInput(TrainedModelInputTests.createRandomInput()) .setMetadata(randomBoolean() ? null : Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10))) - .setCreateTime(Instant.ofEpochMilli(randomNonNegativeLong())) + .setCreateTime(Instant.ofEpochMilli(randomLongBetween(Instant.MIN.getEpochSecond(), Instant.MAX.getEpochSecond()))) .setVersion(Version.CURRENT) .setModelId(modelId) .setCreatedBy(randomAlphaOfLength(10)) .setDescription(randomBoolean() ? null : randomAlphaOfLength(100)) .setEstimatedHeapMemory(randomNonNegativeLong()) .setEstimatedOperations(randomNonNegativeLong()) - .setLicenseLevel(License.OperationMode.PLATINUM.description()) + .setLicenseLevel(randomFrom(License.OperationMode.PLATINUM.description(), + License.OperationMode.GOLD.description(), + License.OperationMode.BASIC.description())) .setTags(tags); } @@ -191,50 +193,52 @@ public void testParseWithBothDefinitionAndCompressedSupplied() throws IOExceptio } public void testValidateWithNullDefinition() { - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> TrainedModelConfig.builder().validate()); - assertThat(ex.getMessage(), equalTo("[definition] must not be null.")); + ActionRequestValidationException ex = expectThrows(ActionRequestValidationException.class, + () -> TrainedModelConfig.builder().validate()); + assertThat(ex.getMessage(), containsString("[definition] must not be null.")); } public void testValidateWithInvalidID() { String modelId = "InvalidID-"; - ElasticsearchException ex = expectThrows(ElasticsearchException.class, + ActionRequestValidationException ex = expectThrows(ActionRequestValidationException.class, () -> TrainedModelConfig.builder() .setParsedDefinition(TrainedModelDefinitionTests.createRandomBuilder()) .setModelId(modelId).validate()); - assertThat(ex.getMessage(), equalTo(Messages.getMessage(Messages.INVALID_ID, "model_id", modelId))); + assertThat(ex.getMessage(), containsString(Messages.getMessage(Messages.INVALID_ID, "model_id", modelId))); } public void testValidateWithLongID() { String modelId = IntStream.range(0, 100).mapToObj(x -> "a").collect(Collectors.joining()); - ElasticsearchException ex = expectThrows(ElasticsearchException.class, + ActionRequestValidationException ex = expectThrows(ActionRequestValidationException.class, () -> TrainedModelConfig.builder() .setParsedDefinition(TrainedModelDefinitionTests.createRandomBuilder()) .setModelId(modelId).validate()); - assertThat(ex.getMessage(), equalTo(Messages.getMessage(Messages.ID_TOO_LONG, "model_id", modelId, MlStrings.ID_LENGTH_LIMIT))); + assertThat(ex.getMessage(), + containsString(Messages.getMessage(Messages.ID_TOO_LONG, "model_id", modelId, MlStrings.ID_LENGTH_LIMIT))); } public void testValidateWithIllegallyUserProvidedFields() { String modelId = "simplemodel"; - ElasticsearchException ex = expectThrows(ElasticsearchException.class, + ActionRequestValidationException ex = expectThrows(ActionRequestValidationException.class, () -> TrainedModelConfig.builder() .setParsedDefinition(TrainedModelDefinitionTests.createRandomBuilder()) .setCreateTime(Instant.now()) - .setModelId(modelId).validate()); - assertThat(ex.getMessage(), equalTo("illegal to set [create_time] at inference model creation")); + .setModelId(modelId).validate(true)); + assertThat(ex.getMessage(), containsString("illegal to set [create_time] at inference model creation")); - ex = expectThrows(ElasticsearchException.class, + ex = expectThrows(ActionRequestValidationException.class, () -> TrainedModelConfig.builder() .setParsedDefinition(TrainedModelDefinitionTests.createRandomBuilder()) .setVersion(Version.CURRENT) - .setModelId(modelId).validate()); - assertThat(ex.getMessage(), equalTo("illegal to set [version] at inference model creation")); + .setModelId(modelId).validate(true)); + assertThat(ex.getMessage(), containsString("illegal to set [version] at inference model creation")); - ex = expectThrows(ElasticsearchException.class, + ex = expectThrows(ActionRequestValidationException.class, () -> TrainedModelConfig.builder() .setParsedDefinition(TrainedModelDefinitionTests.createRandomBuilder()) .setCreatedBy("ml_user") - .setModelId(modelId).validate()); - assertThat(ex.getMessage(), equalTo("illegal to set [created_by] at inference model creation")); + .setModelId(modelId).validate(true)); + assertThat(ex.getMessage(), containsString("illegal to set [created_by] at inference model creation")); } public void testSerializationWithLazyDefinition() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/EnsembleTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/EnsembleTests.java index 46373dae834c9..ba2926e5050b3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/EnsembleTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/EnsembleTests.java @@ -8,10 +8,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; @@ -28,7 +26,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -77,16 +74,24 @@ public static Ensemble createRandom() { OutputAggregator outputAggregator = randomFrom(new WeightedMode(weights), new WeightedSum(weights), new LogisticRegression(weights)); + TargetType targetType = randomFrom(TargetType.values()); List categoryLabels = null; - if (randomBoolean()) { + if (randomBoolean() && targetType == TargetType.CLASSIFICATION) { categoryLabels = Arrays.asList(generateRandomStringArray(randomIntBetween(1, 10), randomIntBetween(1, 10), false, false)); } + double[] thresholds = randomBoolean() && targetType == TargetType.CLASSIFICATION ? + Stream.generate(ESTestCase::randomDouble) + .limit(categoryLabels == null ? randomIntBetween(1, 10) : categoryLabels.size()) + .mapToDouble(Double::valueOf) + .toArray() : + null; return new Ensemble(featureNames, models, outputAggregator, - randomFrom(TargetType.values()), - categoryLabels); + targetType, + categoryLabels, + thresholds); } @Override @@ -101,17 +106,12 @@ protected Writeable.Reader instanceReader() { @Override protected NamedXContentRegistry xContentRegistry() { - List namedXContent = new ArrayList<>(); - namedXContent.addAll(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); - namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); - return new NamedXContentRegistry(namedXContent); + return new NamedXContentRegistry(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); } @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { - List entries = new ArrayList<>(); - entries.addAll(new MlInferenceNamedXContentProvider().getNamedWriteables()); - return new NamedWriteableRegistry(entries); + return new NamedWriteableRegistry(new MlInferenceNamedXContentProvider().getNamedWriteables()); } public void testEnsembleWithAggregatedOutputDifferingFromTrainedModels() { @@ -184,16 +184,15 @@ public void testEnsembleWithAggregatorOutputNotSupportingTargetType() { public void testEnsembleWithTargetTypeAndLabelsMismatch() { List featureNames = Arrays.asList("foo", "bar"); - String msg = "[target_type] should be [classification] if [classification_labels] is provided, and vice versa"; + String msg = "[target_type] should be [classification] if " + + "[classification_labels] or [classification_weights] are provided"; ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> { Ensemble.builder() .setFeatureNames(featureNames) .setTrainedModels(Arrays.asList( Tree.builder() .setNodes(TreeNode.builder(0) - .setLeftChild(1) - .setSplitFeature(1) - .setThreshold(randomDouble())) + .setLeafValue(randomDouble())) .setFeatureNames(featureNames) .build())) .setClassificationLabels(Arrays.asList("label1", "label2")) @@ -201,23 +200,6 @@ public void testEnsembleWithTargetTypeAndLabelsMismatch() { .validate(); }); assertThat(ex.getMessage(), equalTo(msg)); - ex = expectThrows(ElasticsearchException.class, () -> { - Ensemble.builder() - .setFeatureNames(featureNames) - .setTrainedModels(Arrays.asList( - Tree.builder() - .setNodes(TreeNode.builder(0) - .setLeftChild(1) - .setSplitFeature(1) - .setThreshold(randomDouble())) - .setFeatureNames(featureNames) - .build())) - .setTargetType(TargetType.CLASSIFICATION) - .setOutputAggregator(new WeightedMode()) - .build() - .validate(); - }); - assertThat(ex.getMessage(), equalTo(msg)); } public void testClassificationProbability() { @@ -262,34 +244,41 @@ public void testClassificationProbability() { .setFeatureNames(featureNames) .setTrainedModels(Arrays.asList(tree1, tree2, tree3)) .setOutputAggregator(new WeightedMode(new double[]{0.7, 0.5, 1.0})) + .setClassificationWeights(Arrays.asList(0.7, 0.3)) .build(); List featureVector = Arrays.asList(0.4, 0.0); Map featureMap = zipObjMap(featureNames, featureVector); List expected = Arrays.asList(0.768524783, 0.231475216); + List scores = Arrays.asList(0.230557435, 0.162032651); double eps = 0.000001; List probabilities = ((ClassificationInferenceResults)ensemble.infer(featureMap, new ClassificationConfig(2))).getTopClasses(); for(int i = 0; i < expected.size(); i++) { assertThat(probabilities.get(i).getProbability(), closeTo(expected.get(i), eps)); + assertThat(probabilities.get(i).getScore(), closeTo(scores.get(i), eps)); } featureVector = Arrays.asList(2.0, 0.7); featureMap = zipObjMap(featureNames, featureVector); - expected = Arrays.asList(0.689974481, 0.3100255188); + expected = Arrays.asList(0.310025518, 0.6899744811); + scores = Arrays.asList(0.217017863, 0.2069923443); probabilities = ((ClassificationInferenceResults)ensemble.infer(featureMap, new ClassificationConfig(2))).getTopClasses(); for(int i = 0; i < expected.size(); i++) { assertThat(probabilities.get(i).getProbability(), closeTo(expected.get(i), eps)); + assertThat(probabilities.get(i).getScore(), closeTo(scores.get(i), eps)); } featureVector = Arrays.asList(0.0, 1.0); featureMap = zipObjMap(featureNames, featureVector); expected = Arrays.asList(0.768524783, 0.231475216); + scores = Arrays.asList(0.230557435, 0.162032651); probabilities = ((ClassificationInferenceResults)ensemble.infer(featureMap, new ClassificationConfig(2))).getTopClasses(); for(int i = 0; i < expected.size(); i++) { assertThat(probabilities.get(i).getProbability(), closeTo(expected.get(i), eps)); + assertThat(probabilities.get(i).getScore(), closeTo(scores.get(i), eps)); } // This should handle missing values and take the default_left path @@ -298,10 +287,12 @@ public void testClassificationProbability() { put("bar", null); }}; expected = Arrays.asList(0.6899744811, 0.3100255188); + scores = Arrays.asList(0.482982136, 0.0930076556); probabilities = ((ClassificationInferenceResults)ensemble.infer(featureMap, new ClassificationConfig(2))).getTopClasses(); for(int i = 0; i < expected.size(); i++) { assertThat(probabilities.get(i).getProbability(), closeTo(expected.get(i), eps)); + assertThat(probabilities.get(i).getScore(), closeTo(scores.get(i), eps)); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/TreeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/TreeTests.java index 8c05c8d7b9d3a..123a298b1d3a9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/TreeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/TreeTests.java @@ -93,14 +93,13 @@ public static Tree buildRandomTree(List featureNames, int depth) { } childNodes = nextNodes; } + TargetType targetType = randomFrom(TargetType.values()); List categoryLabels = null; - if (randomBoolean()) { + if (randomBoolean() && targetType == TargetType.CLASSIFICATION) { categoryLabels = Arrays.asList(generateRandomStringArray(randomIntBetween(1, 10), randomIntBetween(1, 10), false, false)); } - return builder.setTargetType(randomFrom(TargetType.values())) - .setClassificationLabels(categoryLabels) - .build(); + return builder.setTargetType(targetType).setClassificationLabels(categoryLabels).build(); } @Override @@ -325,7 +324,7 @@ public void testTreeWithCycle() { public void testTreeWithTargetTypeAndLabelsMismatch() { List featureNames = Arrays.asList("foo", "bar"); - String msg = "[target_type] should be [classification] if [classification_labels] is provided, and vice versa"; + String msg = "[target_type] should be [classification] if [classification_labels] are provided"; ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> { Tree.builder() .setRoot(TreeNode.builder(0) @@ -338,18 +337,6 @@ public void testTreeWithTargetTypeAndLabelsMismatch() { .validate(); }); assertThat(ex.getMessage(), equalTo(msg)); - ex = expectThrows(ElasticsearchException.class, () -> { - Tree.builder() - .setRoot(TreeNode.builder(0) - .setLeftChild(1) - .setSplitFeature(1) - .setThreshold(randomDouble())) - .setFeatureNames(featureNames) - .setTargetType(TargetType.CLASSIFICATION) - .build() - .validate(); - }); - assertThat(ex.getMessage(), equalTo(msg)); } public void testOperationsEstimations() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java index f8fea739a2500..0ca0e1ccd814b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -81,7 +81,7 @@ public void testFutureConfigParse() throws IOException { .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_JOB); XContentParseException e = expectThrows(XContentParseException.class, () -> Job.STRICT_PARSER.apply(parser, null).build()); - assertEquals("[4:5] [job_details] unknown field [tomorrows_technology_today], parser not found", e.getMessage()); + assertEquals("[4:5] [job_details] unknown field [tomorrows_technology_today]", e.getMessage()); } public void testFutureMetadataParse() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java index a8bf5272e94fb..be83ab37c7628 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java @@ -6,6 +6,9 @@ package org.elasticsearch.xpack.core.security.authz.accesscontrol; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -21,6 +24,8 @@ import org.apache.lucene.util.BitSet; import org.elasticsearch.client.Client; import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexSettings; @@ -31,23 +36,53 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.MockLogAppender; import org.hamcrest.Matchers; - +import org.junit.After; +import org.junit.Before; +import org.mockito.Mockito; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class DocumentSubsetBitsetCacheTests extends ESTestCase { + private static final int FIELD_COUNT = 10; + private ExecutorService singleThreadExecutor; + + @Before + public void setUpExecutor() throws Exception { + singleThreadExecutor = Executors.newSingleThreadExecutor(); + } + + @After + public void cleanUpExecutor() throws Exception { + singleThreadExecutor.shutdown(); + } + public void testSameBitSetIsReturnedForIdenticalQuery() throws Exception { - final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache cache = newCache(Settings.EMPTY); runTestOnIndex((shardContext, leafContext) -> { final Query query1 = QueryBuilders.termQuery("field-1", "value-1").toQuery(shardContext); final BitSet bitSet1 = cache.getBitSet(query1, leafContext); @@ -62,7 +97,7 @@ public void testSameBitSetIsReturnedForIdenticalQuery() throws Exception { } public void testNullBitSetIsReturnedForNonMatchingQuery() throws Exception { - final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache cache = newCache(Settings.EMPTY); runTestOnIndex((shardContext, leafContext) -> { final Query query = QueryBuilders.termQuery("does-not-exist", "any-value").toQuery(shardContext); final BitSet bitSet = cache.getBitSet(query, leafContext); @@ -71,7 +106,7 @@ public void testNullBitSetIsReturnedForNonMatchingQuery() throws Exception { } public void testNullEntriesAreNotCountedInMemoryUsage() throws Exception { - final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache cache = newCache(Settings.EMPTY); assertThat(cache.ramBytesUsed(), equalTo(0L)); runTestOnIndex((shardContext, leafContext) -> { @@ -95,7 +130,7 @@ public void testCacheRespectsMemoryLimit() throws Exception { final Settings settings = Settings.builder() .put(DocumentSubsetBitsetCache.CACHE_SIZE_SETTING.getKey(), maxCacheBytes + "b") .build(); - final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(settings); + final DocumentSubsetBitsetCache cache = newCache(settings); assertThat(cache.entryCount(), equalTo(0)); assertThat(cache.ramBytesUsed(), equalTo(0L)); @@ -138,11 +173,98 @@ public void testCacheRespectsMemoryLimit() throws Exception { }); } + public void testLogWarningIfBitSetExceedsCacheSize() throws Exception { + // This value is based on the internal implementation details of lucene's FixedBitSet + // If the implementation changes, this can be safely updated to match the new ram usage for a single bitset + final long expectedBytesPerBitSet = 56; + + // Enough to hold less than 1 bit-sets in the cache + final long maxCacheBytes = expectedBytesPerBitSet - expectedBytesPerBitSet/3; + final Settings settings = Settings.builder() + .put(DocumentSubsetBitsetCache.CACHE_SIZE_SETTING.getKey(), maxCacheBytes + "b") + .build(); + final DocumentSubsetBitsetCache cache = newCache(settings); + assertThat(cache.entryCount(), equalTo(0)); + assertThat(cache.ramBytesUsed(), equalTo(0L)); + + final Logger cacheLogger = LogManager.getLogger(cache.getClass()); + final MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.start(); + try { + Loggers.addAppender(cacheLogger, mockAppender); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation( + "[bitset too big]", + cache.getClass().getName(), + Level.WARN, + "built a DLS BitSet that uses [" + expectedBytesPerBitSet + "] bytes; the DLS BitSet cache has a maximum size of [" + + maxCacheBytes + "] bytes; this object cannot be cached and will need to be rebuilt for each use;" + + " consider increasing the value of [xpack.security.dls.bitset.cache.size]" + )); + + runTestOnIndex((shardContext, leafContext) -> { + final TermQueryBuilder queryBuilder = QueryBuilders.termQuery("field-1", "value-1"); + final Query query = queryBuilder.toQuery(shardContext); + final BitSet bitSet = cache.getBitSet(query, leafContext); + assertThat(bitSet, notNullValue()); + assertThat(bitSet.ramBytesUsed(), equalTo(expectedBytesPerBitSet)); + }); + + mockAppender.assertAllExpectationsMatched(); + } finally { + Loggers.removeAppender(cacheLogger, mockAppender); + mockAppender.stop(); + } + } + + public void testLogMessageIfCacheFull() throws Exception { + // This value is based on the internal implementation details of lucene's FixedBitSet + // If the implementation changes, this can be safely updated to match the new ram usage for a single bitset + final long expectedBytesPerBitSet = 56; + + // Enough to hold slightly more than 1 bit-sets in the cache + final long maxCacheBytes = expectedBytesPerBitSet + expectedBytesPerBitSet/3; + final Settings settings = Settings.builder() + .put(DocumentSubsetBitsetCache.CACHE_SIZE_SETTING.getKey(), maxCacheBytes + "b") + .build(); + final DocumentSubsetBitsetCache cache = newCache(settings); + assertThat(cache.entryCount(), equalTo(0)); + assertThat(cache.ramBytesUsed(), equalTo(0L)); + + final Logger cacheLogger = LogManager.getLogger(cache.getClass()); + final MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.start(); + try { + Loggers.addAppender(cacheLogger, mockAppender); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation( + "[cache full]", + cache.getClass().getName(), + Level.INFO, + "the Document Level Security BitSet cache is full which may impact performance;" + + " consider increasing the value of [xpack.security.dls.bitset.cache.size]" + )); + + runTestOnIndex((shardContext, leafContext) -> { + for (int i = 1; i <= 3; i++) { + final TermQueryBuilder queryBuilder = QueryBuilders.termQuery("field-" + i, "value-" + i); + final Query query = queryBuilder.toQuery(shardContext); + final BitSet bitSet = cache.getBitSet(query, leafContext); + assertThat(bitSet, notNullValue()); + assertThat(bitSet.ramBytesUsed(), equalTo(expectedBytesPerBitSet)); + } + }); + + mockAppender.assertAllExpectationsMatched(); + } finally { + Loggers.removeAppender(cacheLogger, mockAppender); + mockAppender.stop(); + } + } + public void testCacheRespectsAccessTimeExpiry() throws Exception { final Settings settings = Settings.builder() .put(DocumentSubsetBitsetCache.CACHE_TTL_SETTING.getKey(), "10ms") .build(); - final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(settings); + final DocumentSubsetBitsetCache cache = newCache(settings); assertThat(cache.entryCount(), equalTo(0)); assertThat(cache.ramBytesUsed(), equalTo(0L)); @@ -167,8 +289,131 @@ public void testCacheRespectsAccessTimeExpiry() throws Exception { }); } + public void testIndexLookupIsClearedWhenBitSetIsEvicted() throws Exception { + // This value is based on the internal implementation details of lucene's FixedBitSet + // If the implementation changes, this can be safely updated to match the new ram usage for a single bitset + final long expectedBytesPerBitSet = 56; + + // Enough to hold slightly more than 1 bit-set in the cache + final long maxCacheBytes = expectedBytesPerBitSet + expectedBytesPerBitSet/2; + final Settings settings = Settings.builder() + .put(DocumentSubsetBitsetCache.CACHE_SIZE_SETTING.getKey(), maxCacheBytes + "b") + .build(); + + final ExecutorService executor = mock(ExecutorService.class); + final AtomicReference runnableRef = new AtomicReference<>(); + when(executor.submit(any(Runnable.class))).thenAnswer(inv -> { + final Runnable r = (Runnable) inv.getArguments()[0]; + runnableRef.set(r); + return null; + }); + + final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(settings, executor); + assertThat(cache.entryCount(), equalTo(0)); + assertThat(cache.ramBytesUsed(), equalTo(0L)); + + runTestOnIndex((shardContext, leafContext) -> { + final Query query1 = QueryBuilders.termQuery("field-1", "value-1").toQuery(shardContext); + final BitSet bitSet1 = cache.getBitSet(query1, leafContext); + assertThat(bitSet1, notNullValue()); + + final Query query2 = QueryBuilders.termQuery("field-2", "value-2").toQuery(shardContext); + final BitSet bitSet2 = cache.getBitSet(query2, leafContext); + assertThat(bitSet2, notNullValue()); + + // BitSet1 has been evicted now, run the cleanup... + final Runnable runnable1 = runnableRef.get(); + assertThat(runnable1, notNullValue()); + runnable1.run(); + cache.verifyInternalConsistency(); + + // Check that the original bitset is no longer in the cache (a new instance is returned) + assertThat(cache.getBitSet(query1, leafContext), not(sameInstance(bitSet1))); + + // BitSet2 has been evicted now, run the cleanup... + final Runnable runnable2 = runnableRef.get(); + assertThat(runnable2, not(sameInstance(runnable1))); + runnable2.run(); + cache.verifyInternalConsistency(); + }); + } + public void testCacheUnderConcurrentAccess() throws Exception { + // This value is based on the internal implementation details of lucene's FixedBitSet + // If the implementation changes, this can be safely updated to match the new ram usage for a single bitset + final long expectedBytesPerBitSet = 56; + + final int concurrentThreads = randomIntBetween(5, 15); + final int numberOfIndices = randomIntBetween(3, 8); + + // Force cache evictions by setting the size to be less than the number of distinct queries we search on. + final int maxCacheCount = randomIntBetween(FIELD_COUNT / 2, FIELD_COUNT * 3 / 4); + final long maxCacheBytes = expectedBytesPerBitSet * maxCacheCount; + final Settings settings = Settings.builder() + .put(DocumentSubsetBitsetCache.CACHE_SIZE_SETTING.getKey(), maxCacheBytes + "b") + .build(); + + final ExecutorService threads = Executors.newFixedThreadPool(concurrentThreads + 1); + final ExecutorService cleanupExecutor = Mockito.mock(ExecutorService.class); + when(cleanupExecutor.submit(any(Runnable.class))).thenAnswer(inv -> { + final Runnable runnable = (Runnable) inv.getArguments()[0]; + return threads.submit(() -> { + // Sleep for a small (random) length of time. + // This increases the likelihood that cache could have been modified between the eviction & the cleanup + Thread.sleep(randomIntBetween(1, 10)); + runnable.run(); + return null; + }); + }); + try { + final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(settings, cleanupExecutor); + assertThat(cache.entryCount(), equalTo(0)); + assertThat(cache.ramBytesUsed(), equalTo(0L)); + + runTestOnIndices(numberOfIndices, contexts -> { + final CountDownLatch start = new CountDownLatch(concurrentThreads); + final CountDownLatch end = new CountDownLatch(concurrentThreads); + final Set uniqueBitSets = Collections.synchronizedSet(Collections.newSetFromMap(new IdentityHashMap<>())); + for (int thread = 0; thread < concurrentThreads; thread++) { + threads.submit(() -> { + start.countDown(); + start.await(100, TimeUnit.MILLISECONDS); + for (int loop = 0; loop < 15; loop++) { + for (int field = 1; field <= FIELD_COUNT; field++) { + final TermQueryBuilder queryBuilder = QueryBuilders.termQuery("field-" + field, "value-" + field); + final TestIndexContext randomContext = randomFrom(contexts); + final Query query = queryBuilder.toQuery(randomContext.queryShardContext); + final BitSet bitSet = cache.getBitSet(query, randomContext.leafReaderContext); + assertThat(bitSet, notNullValue()); + assertThat(bitSet.ramBytesUsed(), equalTo(expectedBytesPerBitSet)); + uniqueBitSets.add(bitSet); + } + } + end.countDown(); + return null; + }); + } + + assertTrue("Query threads did not complete in expected time", end.await(1, TimeUnit.SECONDS)); + + threads.shutdown(); + assertTrue("Cleanup thread did not complete in expected time", threads.awaitTermination(3, TimeUnit.SECONDS)); + cache.verifyInternalConsistency(); + + // Due to cache evictions, we must get more bitsets than fields + assertThat(uniqueBitSets.size(), Matchers.greaterThan(FIELD_COUNT)); + // Due to cache evictions, we must have seen more bitsets than the cache currently holds + assertThat(uniqueBitSets.size(), Matchers.greaterThan(cache.entryCount())); + // Even under concurrent pressure, the cache should hit the expected size + assertThat(cache.entryCount(), is(maxCacheCount)); + assertThat(cache.ramBytesUsed(), is(maxCacheBytes)); + }); + } finally { + threads.shutdown(); + } + } + public void testCacheIsPerIndex() throws Exception { - final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache cache = newCache(Settings.EMPTY); assertThat(cache.entryCount(), equalTo(0)); assertThat(cache.ramBytesUsed(), equalTo(0L)); @@ -195,7 +440,7 @@ public void accept(QueryShardContext shardContext, LeafReaderContext leafContext } public void testCacheClearEntriesWhenIndexIsClosed() throws Exception { - final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache cache = newCache(Settings.EMPTY); assertThat(cache.entryCount(), equalTo(0)); assertThat(cache.ramBytesUsed(), equalTo(0L)); @@ -215,35 +460,106 @@ public void testCacheClearEntriesWhenIndexIsClosed() throws Exception { } private void runTestOnIndex(CheckedBiConsumer body) throws Exception { + runTestOnIndices(1, ctx -> { + final TestIndexContext indexContext = ctx.get(0); + body.accept(indexContext.queryShardContext, indexContext.leafReaderContext); + }); + } + + private static final class TestIndexContext implements Closeable { + private final Directory directory; + private final IndexWriter indexWriter; + private final DirectoryReader directoryReader; + private final QueryShardContext queryShardContext; + private final LeafReaderContext leafReaderContext; + + private TestIndexContext(Directory directory, IndexWriter indexWriter, DirectoryReader directoryReader, + QueryShardContext queryShardContext, LeafReaderContext leafReaderContext) { + this.directory = directory; + this.indexWriter = indexWriter; + this.directoryReader = directoryReader; + this.queryShardContext = queryShardContext; + this.leafReaderContext = leafReaderContext; + } + + @Override + public void close() throws IOException { + directoryReader.close(); + indexWriter.close(); + directory.close(); + } + } + + private TestIndexContext testIndex(MapperService mapperService, Client client) throws IOException { + TestIndexContext context = null; + + final long nowInMillis = randomNonNegativeLong(); final ShardId shardId = new ShardId("idx_" + randomAlphaOfLengthBetween(2, 8), randomAlphaOfLength(12), 0); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), Settings.EMPTY); - final MapperService mapperService = mock(MapperService.class); - final long nowInMillis = randomNonNegativeLong(); + final IndexWriterConfig writerConfig = new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE); - final Client client = mock(Client.class); - when(client.settings()).thenReturn(Settings.EMPTY); + Directory directory = null; + IndexWriter iw = null; + DirectoryReader directoryReader = null; + try { + directory = newDirectory(); - final IndexWriterConfig writerConfig = new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE); - try (Directory directory = newDirectory(); - IndexWriter iw = new IndexWriter(directory, writerConfig)) { + iw = new IndexWriter(directory, writerConfig); for (int i = 1; i <= 100; i++) { Document document = new Document(); - for (int j = 1; j <= 10; j++) { + for (int j = 1; j <= FIELD_COUNT; j++) { document.add(new StringField("field-" + j, "value-" + i, Field.Store.NO)); } iw.addDocument(document); } iw.commit(); - try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { - final LeafReaderContext leaf = directoryReader.leaves().get(0); + directoryReader = DirectoryReader.open(directory); + final LeafReaderContext leaf = directoryReader.leaves().get(0); + + final QueryShardContext shardContext = new QueryShardContext(shardId.id(), indexSettings, BigArrays.NON_RECYCLING_INSTANCE, + null, null, mapperService, null, null, xContentRegistry(), writableRegistry(), + client, new IndexSearcher(directoryReader), () -> nowInMillis, null, null); - final QueryShardContext context = new QueryShardContext(shardId.id(), indexSettings, BigArrays.NON_RECYCLING_INSTANCE, - null, null, mapperService, null, null, xContentRegistry(), writableRegistry(), - client, new IndexSearcher(directoryReader), () -> nowInMillis, null, null); - body.accept(context, leaf); + context = new TestIndexContext(directory, iw, directoryReader, shardContext, leaf); + return context; + } finally { + if (context == null) { + if (directoryReader != null) { + directoryReader.close(); + } + if (iw != null) { + iw.close(); + } + if (directory != null) { + directory.close(); + } + } + } + } + + private void runTestOnIndices(int numberIndices, CheckedConsumer, Exception> body) throws Exception { + final MapperService mapperService = mock(MapperService.class); + + final Client client = mock(Client.class); + when(client.settings()).thenReturn(Settings.EMPTY); + + final List context = new ArrayList<>(numberIndices); + try { + for (int i = 0; i < numberIndices; i++) { + context.add(testIndex(mapperService, client)); + } + + body.accept(context); + } finally { + for (TestIndexContext indexContext : context) { + indexContext.close(); } } } + private DocumentSubsetBitsetCache newCache(Settings settings) { + return new DocumentSubsetBitsetCache(settings, singleThreadExecutor); + } + } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java index c84c0027302e6..3fae0c26ea76d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java @@ -32,6 +32,7 @@ import org.junit.Before; import java.io.IOException; +import java.util.concurrent.Executors; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -50,7 +51,7 @@ public void setUpDirectory() { assertTrue(DocumentSubsetReader.NUM_DOCS_CACHE.toString(), DocumentSubsetReader.NUM_DOCS_CACHE.isEmpty()); directory = newDirectory(); - bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY, Executors.newSingleThreadExecutor()); } @After diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java index ca2b38318a06f..1f8ba70426366 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java @@ -48,6 +48,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.Executors; import static java.util.Collections.singleton; import static java.util.Collections.singletonMap; @@ -80,7 +81,7 @@ public void testDLS() throws Exception { null, null, mapperService, null, null, xContentRegistry(), writableRegistry(), client, null, () -> nowInMillis, null, null); QueryShardContext queryShardContext = spy(realQueryShardContext); - DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY, Executors.newSingleThreadExecutor()); XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); @@ -202,7 +203,7 @@ public void testDLSWithLimitedPermissions() throws Exception { null, null, mapperService, null, null, xContentRegistry(), writableRegistry(), client, null, () -> nowInMillis, null, null); QueryShardContext queryShardContext = spy(realQueryShardContext); - DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY, Executors.newSingleThreadExecutor()); XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index b2d32e871e15b..11cc52a970f72 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -50,6 +50,11 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.action.XPackInfoAction; +import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.DeleteLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.StartILMAction; +import org.elasticsearch.xpack.core.ilm.action.StopILMAction; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.DeleteCalendarAction; @@ -164,6 +169,7 @@ import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.mock; /** @@ -179,6 +185,7 @@ public void testIsReserved() { assertThat(ReservedRolesStore.isReserved("foobar"), is(false)); assertThat(ReservedRolesStore.isReserved(SystemUser.ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved("transport_client"), is(true)); + assertThat(ReservedRolesStore.isReserved("kibana_admin"), is(true)); assertThat(ReservedRolesStore.isReserved("kibana_user"), is(true)); assertThat(ReservedRolesStore.isReserved("ingest_admin"), is(true)); assertThat(ReservedRolesStore.isReserved("monitoring_user"), is(true)); @@ -249,8 +256,11 @@ public void testSnapshotUserRole() { // but that depends on how users are supposed to perform snapshots of those new indices. assertThat(snapshotUserRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(index), is(true)); } + assertThat(snapshotUserRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test( + RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)), is(true)); assertNoAccessAllowed(snapshotUserRole, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(snapshotUserRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } public void testIngestAdminRole() { @@ -280,6 +290,7 @@ public void testIngestAdminRole() { is(false)); assertNoAccessAllowed(ingestAdminRole, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(ingestAdminRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } public void testKibanaSystemRole() { @@ -300,6 +311,13 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(kibanaRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(true)); + // ILM + assertThat(kibanaRole.cluster().check(GetLifecycleAction.NAME, request, authentication), is(true)); + assertThat(kibanaRole.cluster().check(PutLifecycleAction.NAME, request, authentication), is(true)); + assertThat(kibanaRole.cluster().check(DeleteLifecycleAction.NAME, request, authentication), is(false)); + assertThat(kibanaRole.cluster().check(StartILMAction.NAME, request, authentication), is(false)); + assertThat(kibanaRole.cluster().check(StopILMAction.NAME, request, authentication), is(false)); + // SAML and token assertThat(kibanaRole.cluster().check(SamlPrepareAuthenticationAction.NAME, request, authentication), is(true)); assertThat(kibanaRole.cluster().check(SamlAuthenticateAction.NAME, request, authentication), is(true)); @@ -390,6 +408,55 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(index), is(false)); assertNoAccessAllowed(kibanaRole, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(kibanaRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); + } + + public void testKibanaAdminRole() { + final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = mock(Authentication.class); + + RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("kibana_admin"); + assertNotNull(roleDescriptor); + assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); + assertThat(roleDescriptor.getMetadata(), not(hasEntry("_deprecated", true))); + + Role kibanaAdminRole = Role.builder(roleDescriptor, null).build(); + assertThat(kibanaAdminRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(false)); + assertThat(kibanaAdminRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); + assertThat(kibanaAdminRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); + assertThat(kibanaAdminRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(kibanaAdminRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); + assertThat(kibanaAdminRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), + is(false)); + assertThat(kibanaAdminRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); + assertThat(kibanaAdminRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), + is(false)); + + assertThat(kibanaAdminRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); + + assertThat(kibanaAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test("foo"), is(false)); + assertThat(kibanaAdminRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".reporting"), is(false)); + assertThat( + kibanaAdminRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), + is(false)); + + final String randomApplication = "kibana-" + randomAlphaOfLengthBetween(8, 24); + assertThat(kibanaAdminRole.application().grants(new ApplicationPrivilege(randomApplication, "app-random", "all"), + "*"), is(false)); + + final String application = "kibana-.kibana"; + assertThat(kibanaAdminRole.application().grants(new ApplicationPrivilege(application, "app-foo", "foo"), "*"), + is(false)); + assertThat(kibanaAdminRole.application().grants(new ApplicationPrivilege(application, "app-all", "all"), "*"), + is(true)); + + final String applicationWithRandomIndex = "kibana-.kibana_" + randomAlphaOfLengthBetween(8, 24); + assertThat( + kibanaAdminRole.application() + .grants(new ApplicationPrivilege(applicationWithRandomIndex, "app-random-index", "all"), "*"), + is(false)); + + assertNoAccessAllowed(kibanaAdminRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testKibanaUserRole() { @@ -399,6 +466,7 @@ public void testKibanaUserRole() { RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("kibana_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); + assertThat(roleDescriptor.getMetadata(), hasEntry("_deprecated", true)); Role kibanaUserRole = Role.builder(roleDescriptor, null).build(); assertThat(kibanaUserRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(false)); @@ -429,6 +497,7 @@ public void testKibanaUserRole() { "*"), is(false)); assertNoAccessAllowed(kibanaUserRole, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(kibanaUserRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } public void testMonitoringUserRole() { @@ -476,6 +545,7 @@ public void testMonitoringUserRole() { assertThat(monitoringUserRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(index), is(true)); assertNoAccessAllowed(monitoringUserRole, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(monitoringUserRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); assertThat(monitoringUserRole.application().grants( @@ -550,6 +620,7 @@ public void testRemoteMonitoringAgentRole() { assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME).test(metricbeatIndex), is(false)); assertNoAccessAllowed(remoteMonitoringAgentRole, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(remoteMonitoringAgentRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } public void testRemoteMonitoringCollectorRole() { @@ -603,29 +674,50 @@ public void testRemoteMonitoringCollectorRole() { // (but ideally, the monitoring user should see all indices). assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetSettingsAction.NAME) .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetSettingsAction.NAME) + .test(RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)), is(true)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndicesShardStoresAction.NAME) .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndicesShardStoresAction.NAME) + .test(RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)), is(true)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(UpgradeStatusAction.NAME) .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(UpgradeStatusAction.NAME) + .test(RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)), is(true)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(RecoveryAction.NAME) .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(RecoveryAction.NAME) + .test(RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)), is(true)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndicesStatsAction.NAME) .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndicesStatsAction.NAME) + .test(RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)), is(true)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndicesSegmentsAction.NAME) .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndicesSegmentsAction.NAME) + .test(RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)), is(true)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME) .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME) + .test(RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)), is(false)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME) .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME) + .test(RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)), is(false)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(DeleteAction.NAME) .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(DeleteAction.NAME) + .test(RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)), is(false)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndexAction.NAME) .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndexAction.NAME) + .test(RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)), is(false)); assertMonitoringOnRestrictedIndices(remoteMonitoringAgentRole); assertNoAccessAllowed(remoteMonitoringAgentRole, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(remoteMonitoringAgentRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } private void assertMonitoringOnRestrictedIndices(Role role) { @@ -644,11 +736,13 @@ private void assertMonitoringOnRestrictedIndices(Role role) { final List indexMonitoringActionNamesList = Arrays.asList(IndicesStatsAction.NAME, IndicesSegmentsAction.NAME, GetSettingsAction.NAME, IndicesShardStoresAction.NAME, UpgradeStatusAction.NAME, RecoveryAction.NAME); for (final String indexMonitoringActionName : indexMonitoringActionNamesList) { + String asyncSearchIndex = RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2); final Map authzMap = role.indices().authorize(indexMonitoringActionName, - Sets.newHashSet(internalSecurityIndex, RestrictedIndicesNames.SECURITY_MAIN_ALIAS), + Sets.newHashSet(internalSecurityIndex, RestrictedIndicesNames.SECURITY_MAIN_ALIAS, asyncSearchIndex), metaData.getAliasAndIndexLookup(), fieldPermissionsCache); assertThat(authzMap.get(internalSecurityIndex).isGranted(), is(true)); assertThat(authzMap.get(RestrictedIndicesNames.SECURITY_MAIN_ALIAS).isGranted(), is(true)); + assertThat(authzMap.get(asyncSearchIndex).isGranted(), is(true)); } } @@ -692,6 +786,7 @@ public void testReportingUserRole() { assertThat(reportingUserRole.indices().allowedIndicesMatcher(BulkAction.NAME).test(index), is(false)); assertNoAccessAllowed(reportingUserRole, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(reportingUserRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } public void testKibanaDashboardOnlyUserRole() { @@ -701,6 +796,7 @@ public void testKibanaDashboardOnlyUserRole() { RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("kibana_dashboard_only_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); + assertThat(roleDescriptor.getMetadata(), hasEntry("_deprecated", true)); Role dashboardsOnlyUserRole = Role.builder(roleDescriptor, null).build(); assertThat(dashboardsOnlyUserRole.cluster().check(ClusterHealthAction.NAME, request, authentication), is(false)); @@ -728,6 +824,7 @@ public void testKibanaDashboardOnlyUserRole() { new ApplicationPrivilege(applicationWithRandomIndex, "app-random-index", "all"), "*"), is(false)); assertNoAccessAllowed(dashboardsOnlyUserRole, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(dashboardsOnlyUserRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } public void testSuperuserRole() { @@ -828,6 +925,7 @@ public void testLogstashSystemRole() { is(false)); assertNoAccessAllowed(logstashSystemRole, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(logstashSystemRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } public void testBeatsAdminRole() { @@ -868,6 +966,7 @@ public void testBeatsAdminRole() { assertThat(beatsAdminRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); assertNoAccessAllowed(beatsAdminRole, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(beatsAdminRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } public void testBeatsSystemRole() { @@ -903,6 +1002,7 @@ public void testBeatsSystemRole() { assertThat(beatsSystemRole.indices().allowedIndicesMatcher(BulkAction.NAME).test(index), is(true)); assertNoAccessAllowed(beatsSystemRole, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(beatsSystemRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } public void testAPMSystemRole() { @@ -943,7 +1043,7 @@ public void testAPMSystemRole() { "indices:data/write/index:op_type/" + randomAlphaOfLengthBetween(3,5)).test(index), is(false)); assertNoAccessAllowed(APMSystemRole, RestrictedIndicesNames.RESTRICTED_NAMES); - + assertNoAccessAllowed(APMSystemRole, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } public void testAPMUserRole() { @@ -1037,6 +1137,7 @@ public void testMachineLearningAdminRole() { assertReadWriteDocsButNotDeleteIndexAllowed(role, AnnotationIndex.INDEX_NAME); assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(role, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); assertThat(role.application().grants( @@ -1123,6 +1224,7 @@ public void testMachineLearningUserRole() { assertReadWriteDocsButNotDeleteIndexAllowed(role, AnnotationIndex.INDEX_NAME); assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(role, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); @@ -1170,6 +1272,7 @@ public void testTransformAdminRole() { assertNoAccessAllowed(role, TransformInternalIndexConstants.LATEST_INDEX_NAME); // internal use only assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(role, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); assertThat(role.application().grants( @@ -1222,6 +1325,7 @@ public void testDataFrameTransformsUserRole() { assertNoAccessAllowed(role, TransformInternalIndexConstants.LATEST_INDEX_NAME); assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(role, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); assertThat(role.application().grants( @@ -1272,6 +1376,7 @@ public void testWatcherAdminRole() { } assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(role, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } public void testWatcherUserRole() { @@ -1305,6 +1410,7 @@ public void testWatcherUserRole() { } assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(role, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } private void assertReadWriteDocsButNotDeleteIndexAllowed(Role role, String index) { @@ -1329,6 +1435,7 @@ private void assertOnlyReadAllowed(Role role, String index) { assertThat(role.indices().allowedIndicesMatcher(BulkAction.NAME).test(index), is(false)); assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + assertNoAccessAllowed(role, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)); } private void assertNoAccessAllowed(Role role, Collection indices) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsageTests.java new file mode 100644 index 0000000000000..078a17a7aa555 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsageTests.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.oss; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +public class IndexFeatureSetUsageTests extends AbstractWireSerializingTestCase { + + @Override + protected Reader instanceReader() { + return IndexFeatureSetUsage::new; + } + + @Override + protected IndexFeatureSetUsage createTestInstance() { + Set fields = new HashSet<>(); + if (randomBoolean()) { + fields.add("keyword"); + } + if (randomBoolean()) { + fields.add("integer"); + } + + Set charFilters = new HashSet<>(); + if (randomBoolean()) { + charFilters.add("pattern_replace"); + } + + Set tokenizers = new HashSet<>(); + if (randomBoolean()) { + tokenizers.add("whitespace"); + } + + Set tokenFilters = new HashSet<>(); + if (randomBoolean()) { + tokenFilters.add("stop"); + } + + Set analyzers = new HashSet<>(); + if (randomBoolean()) { + tokenFilters.add("english"); + } + + Set builtInCharFilters = new HashSet<>(); + if (randomBoolean()) { + builtInCharFilters.add("html_strip"); + } + + Set builtInTokenizers = new HashSet<>(); + if (randomBoolean()) { + builtInTokenizers.add("keyword"); + } + + Set builtInTokenFilters = new HashSet<>(); + if (randomBoolean()) { + builtInTokenFilters.add("trim"); + } + + Set builtInAnalyzers = new HashSet<>(); + if (randomBoolean()) { + builtInAnalyzers.add("french"); + } + + return new IndexFeatureSetUsage(fields, + charFilters, tokenizers, tokenFilters, analyzers, + builtInCharFilters, builtInTokenizers, builtInTokenFilters, builtInAnalyzers); + } + + @Override + protected IndexFeatureSetUsage mutateInstance(IndexFeatureSetUsage instance) throws IOException { + switch (randomInt(8)) { + case 0: + Set fields = new HashSet<>(instance.getUsedFieldTypes()); + if (fields.add("keyword") == false) { + fields.remove("keyword"); + } + return new IndexFeatureSetUsage(fields, instance.getUsedCharFilterTypes(), instance.getUsedTokenizerTypes(), + instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 1: + Set charFilters = new HashSet<>(instance.getUsedCharFilterTypes()); + if (charFilters.add("pattern_replace") == false) { + charFilters.remove("pattern_replace"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), charFilters, instance.getUsedTokenizerTypes(), + instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 2: + Set tokenizers = new HashSet<>(instance.getUsedTokenizerTypes()); + if (tokenizers.add("whitespace") == false) { + tokenizers.remove("whitespace"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), tokenizers, + instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 3: + Set tokenFilters = new HashSet<>(instance.getUsedTokenFilterTypes()); + if (tokenFilters.add("stop") == false) { + tokenFilters.remove("stop"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), + tokenFilters, instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 4: + Set analyzers = new HashSet<>(instance.getUsedAnalyzerTypes()); + if (analyzers.add("english") == false) { + analyzers.remove("english"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), analyzers, + instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 5: + Set builtInCharFilters = new HashSet<>(); + if (builtInCharFilters.add("html_strip") == false) { + builtInCharFilters.remove("html_strip"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), + instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), builtInCharFilters, + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 6: + Set builtInTokenizers = new HashSet<>(); + if (builtInTokenizers.add("keyword") == false) { + builtInTokenizers.remove("keyword"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), + instance.getUsedBuiltInCharFilters(), builtInTokenizers, instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 7: + Set builtInTokenFilters = new HashSet<>(); + if (builtInTokenFilters.add("trim") == false) { + builtInTokenFilters.remove("trim"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), + instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), builtInTokenFilters, + instance.getUsedBuiltInAnalyzers()); + case 8: + Set builtInAnalyzers = new HashSet<>(); + if (builtInAnalyzers.add("french") == false) { + builtInAnalyzers.remove("french"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), + instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + builtInAnalyzers); + default: + throw new AssertionError(); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexUsageTransportActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexUsageTransportActionTests.java new file mode 100644 index 0000000000000..605e8cc0e1534 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexUsageTransportActionTests.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.oss; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +public class IndexUsageTransportActionTests extends ESTestCase { + + private static void collectTypes(Map mapping, Set types) { + IndexUsageTransportAction.visitMapping(mapping, + m -> { + if (m.containsKey("type")) { + types.add(m.get("type").toString()); + } else { + types.add("object"); + } + }); + } + + public void testCountTopLevelFields() { + Map mapping = new HashMap<>(); + Set fields = new HashSet<>(); + collectTypes(mapping, fields); + assertEquals(Collections.emptySet(), fields); + + Map properties = new HashMap<>(); + mapping.put("properties", properties); + + Map keywordField = new HashMap<>(); + keywordField.put("type", "keyword"); + properties.put("foo", keywordField); + collectTypes(mapping, fields); + assertEquals(Collections.singleton("keyword"), fields); + + Map IndexField = new HashMap<>(); + IndexField.put("type", "integer"); + properties.put("bar", IndexField); + fields = new HashSet<>(); + collectTypes(mapping, fields); + assertEquals(new HashSet<>(Arrays.asList("keyword", "integer")), fields); + + properties.put("baz", IndexField); + fields = new HashSet<>(); + collectTypes(mapping, fields); + assertEquals(new HashSet<>(Arrays.asList("keyword", "integer")), fields); + } + + public void testCountMultiFields() { + Map keywordField = new HashMap<>(); + keywordField.put("type", "keyword"); + + Map textField = new HashMap<>(); + textField.put("type", "text"); + + Map fields = new HashMap<>(); + fields.put("keyword", keywordField); + textField.put("fields", fields); + + Map properties = new HashMap<>(); + properties.put("foo", textField); + + Map mapping = new HashMap<>(); + mapping.put("properties", properties); + + Set usedFields = new HashSet<>(); + collectTypes(mapping, usedFields); + assertEquals(new HashSet<>(Arrays.asList("keyword", "text")), usedFields); + } + + public void testCountInnerFields() { + Map keywordField = new HashMap<>(); + keywordField.put("type", "keyword"); + + Map properties = new HashMap<>(); + properties.put("foo", keywordField); + + Map objectMapping = new HashMap<>(); + objectMapping.put("properties", properties); + + Map mapping = new HashMap<>(); + + properties = new HashMap<>(); + properties.put("obj", objectMapping); + mapping.put("properties", properties); + Set fields = new HashSet<>(); + collectTypes(mapping, fields); + assertEquals(new HashSet<>(Arrays.asList("keyword", "object")), fields); + + properties.put("bar", keywordField); + fields = new HashSet<>(); + collectTypes(mapping, fields); + assertEquals(new HashSet<>(Arrays.asList("keyword", "object")), fields); + } + +} diff --git a/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java b/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java index 9ab357f23499e..39c331008ff46 100644 --- a/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java +++ b/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java @@ -34,11 +34,10 @@ public abstract class CommonEnrichRestTestCase extends ESRestTestCase { public void deletePolicies() throws Exception { Map responseMap = toMap(adminClient().performRequest(new Request("GET", "/_enrich/policy"))); @SuppressWarnings("unchecked") - List> policies = (List>) responseMap.get("policies"); + List> policies = (List>) responseMap.get("policies"); - for (Map entry: policies) { - client().performRequest(new Request("DELETE", "/_enrich/policy/" + - XContentMapValues.extractValue("config.match.name", entry))); + for (Map entry : policies) { + client().performRequest(new Request("DELETE", "/_enrich/policy/" + XContentMapValues.extractValue("config.match.name", entry))); List sourceIndices = (List) XContentMapValues.extractValue("config.match.indices", entry); for (Object sourceIndex : sourceIndices) { @@ -72,9 +71,9 @@ private void setupGenericLifecycleTest(boolean deletePipeilne) throws Exception // Create pipeline Request putPipelineRequest = new Request("PUT", "/_ingest/pipeline/my_pipeline"); - putPipelineRequest.setJsonEntity("{\"processors\":[" + - "{\"enrich\":{\"policy_name\":\"my_policy\",\"field\":\"host\",\"target_field\":\"entry\"}}" + - "]}"); + putPipelineRequest.setJsonEntity( + "{\"processors\":[" + "{\"enrich\":{\"policy_name\":\"my_policy\",\"field\":\"host\",\"target_field\":\"entry\"}}" + "]}" + ); assertOK(client().performRequest(putPipelineRequest)); // Index document using pipeline with enrich processor: @@ -120,8 +119,10 @@ public void testDeleteIsCaseSensitive() throws Exception { putPolicyRequest.setJsonEntity(generatePolicySource("my-source-index")); assertOK(client().performRequest(putPolicyRequest)); - ResponseException exc = expectThrows(ResponseException.class, - () -> client().performRequest(new Request("DELETE", "/_enrich/policy/MY_POLICY"))); + ResponseException exc = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("DELETE", "/_enrich/policy/MY_POLICY")) + ); assertTrue(exc.getMessage().contains("policy [MY_POLICY] not found")); } @@ -130,15 +131,19 @@ public void testDeleteExistingPipeline() throws Exception { setupGenericLifecycleTest(false); Request putPipelineRequest = new Request("PUT", "/_ingest/pipeline/another_pipeline"); - putPipelineRequest.setJsonEntity("{\"processors\":[" + - "{\"enrich\":{\"policy_name\":\"my_policy\",\"field\":\"host\",\"target_field\":\"entry\"}}" + - "]}"); + putPipelineRequest.setJsonEntity( + "{\"processors\":[" + "{\"enrich\":{\"policy_name\":\"my_policy\",\"field\":\"host\",\"target_field\":\"entry\"}}" + "]}" + ); assertOK(client().performRequest(putPipelineRequest)); - ResponseException exc = expectThrows(ResponseException.class, - () -> client().performRequest(new Request("DELETE", "/_enrich/policy/my_policy"))); - assertTrue(exc.getMessage().contains("Could not delete policy [my_policy] because" + - " a pipeline is referencing it [my_pipeline, another_pipeline]")); + ResponseException exc = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("DELETE", "/_enrich/policy/my_policy")) + ); + assertTrue( + exc.getMessage() + .contains("Could not delete policy [my_policy] because" + " a pipeline is referencing it [my_pipeline, another_pipeline]") + ); // delete the pipelines so the policies can be deleted client().performRequest(new Request("DELETE", "/_ingest/pipeline/my_pipeline")); @@ -157,7 +162,7 @@ public static String generatePolicySource(String index) throws IOException { source.field("query", QueryBuilders.matchAllQuery()); } source.field("match_field", "host"); - source.field("enrich_fields", new String[] {"globalRank", "tldRank", "tld"}); + source.field("enrich_fields", new String[] { "globalRank", "tldRank", "tld" }); } source.endObject().endObject(); return Strings.toString(source); @@ -169,12 +174,12 @@ public static void createSourceIndex(String index) throws IOException { } public static String createSourceIndexMapping() { - return "\"properties\":" + - "{\"host\": {\"type\":\"keyword\"}," + - "\"globalRank\":{\"type\":\"keyword\"}," + - "\"tldRank\":{\"type\":\"keyword\"}," + - "\"tld\":{\"type\":\"keyword\"}" + - "}"; + return "\"properties\":" + + "{\"host\": {\"type\":\"keyword\"}," + + "\"globalRank\":{\"type\":\"keyword\"}," + + "\"tldRank\":{\"type\":\"keyword\"}," + + "\"tld\":{\"type\":\"keyword\"}" + + "}"; } private static Map toMap(Response response) throws IOException { @@ -204,11 +209,15 @@ private static void verifyEnrichMonitoring() throws IOException { for (int i = 0; i < hits.size(); i++) { Map hit = (Map) hits.get(i); - int foundRemoteRequestsTotal = - (int) XContentMapValues.extractValue("_source.enrich_coordinator_stats.remote_requests_total", hit); + int foundRemoteRequestsTotal = (int) XContentMapValues.extractValue( + "_source.enrich_coordinator_stats.remote_requests_total", + hit + ); maxRemoteRequestsTotal = Math.max(maxRemoteRequestsTotal, foundRemoteRequestsTotal); - int foundExecutedSearchesTotal = - (int) XContentMapValues.extractValue("_source.enrich_coordinator_stats.executed_searches_total", hit); + int foundExecutedSearchesTotal = (int) XContentMapValues.extractValue( + "_source.enrich_coordinator_stats.executed_searches_total", + hit + ); maxExecutedSearchesTotal = Math.max(maxExecutedSearchesTotal, foundExecutedSearchesTotal); } diff --git a/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityFailureIT.java b/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityFailureIT.java index 4129dcbf920f1..483e5c7056fa2 100644 --- a/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityFailureIT.java +++ b/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityFailureIT.java @@ -20,17 +20,13 @@ public class EnrichSecurityFailureIT extends ESRestTestCase { @Override protected Settings restClientSettings() { String token = basicAuthHeaderValue("test_enrich_no_privs", new SecureString("x-pack-test-password".toCharArray())); - return Settings.builder() - .put(ThreadContext.PREFIX + ".Authorization", token) - .build(); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } @Override protected Settings restAdminSettings() { String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray())); - return Settings.builder() - .put(ThreadContext.PREFIX + ".Authorization", token) - .build(); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } public void testFailure() throws Exception { diff --git a/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityIT.java b/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityIT.java index 7ea64a121c32b..76afb1173224f 100644 --- a/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityIT.java +++ b/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityIT.java @@ -20,17 +20,13 @@ public class EnrichSecurityIT extends CommonEnrichRestTestCase { @Override protected Settings restClientSettings() { String token = basicAuthHeaderValue("test_enrich", new SecureString("x-pack-test-password".toCharArray())); - return Settings.builder() - .put(ThreadContext.PREFIX + ".Authorization", token) - .build(); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } @Override protected Settings restAdminSettings() { String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray())); - return Settings.builder() - .put(ThreadContext.PREFIX + ".Authorization", token) - .build(); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } public void testInsufficientPermissionsOnNonExistentIndex() throws Exception { @@ -42,7 +38,9 @@ public void testInsufficientPermissionsOnNonExistentIndex() throws Exception { Request putPolicyRequest = new Request("PUT", "/_enrich/policy/my_policy"); putPolicyRequest.setJsonEntity(generatePolicySource("some-other-index")); ResponseException exc = expectThrows(ResponseException.class, () -> client().performRequest(putPolicyRequest)); - assertThat(exc.getMessage(), - containsString("unable to store policy because no indices match with the specified index patterns [some-other-index]")); + assertThat( + exc.getMessage(), + containsString("unable to store policy because no indices match with the specified index patterns [some-other-index]") + ); } } diff --git a/x-pack/plugin/enrich/qa/rest/src/test/java/org/elasticsearch/xpack/enrich/EnrichIT.java b/x-pack/plugin/enrich/qa/rest/src/test/java/org/elasticsearch/xpack/enrich/EnrichIT.java index ccc3386ee42c2..a0b44f227cbcd 100644 --- a/x-pack/plugin/enrich/qa/rest/src/test/java/org/elasticsearch/xpack/enrich/EnrichIT.java +++ b/x-pack/plugin/enrich/qa/rest/src/test/java/org/elasticsearch/xpack/enrich/EnrichIT.java @@ -7,5 +7,4 @@ import org.elasticsearch.test.enrich.CommonEnrichRestTestCase; -public class EnrichIT extends CommonEnrichRestTestCase { -} +public class EnrichIT extends CommonEnrichRestTestCase {} diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/GeoMatchProcessor.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/GeoMatchProcessor.java index b10bafa5e959c..62f8c16dd0d0b 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/GeoMatchProcessor.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/GeoMatchProcessor.java @@ -59,12 +59,11 @@ public final class GeoMatchProcessor extends AbstractEnrichProcessor { this.shapeRelation = shapeRelation; } - @SuppressWarnings("unchecked") @Override public QueryBuilder getQueryBuilder(Object fieldValue) { List points = new ArrayList<>(); if (fieldValue instanceof List) { - List values = (List) fieldValue; + List values = (List) fieldValue; if (values.size() == 2 && values.get(0) instanceof Number) { GeoPoint geoPoint = GeoUtils.parseGeoPoint(values, true); points.add(new Point(geoPoint.lon(), geoPoint.lat())); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/MatchProcessor.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/MatchProcessor.java index 6e2967272fa26..317e470b764d9 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/MatchProcessor.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/MatchProcessor.java @@ -16,7 +16,7 @@ import java.util.List; import java.util.function.BiConsumer; -public class MatchProcessor extends AbstractEnrichProcessor { +public final class MatchProcessor extends AbstractEnrichProcessor { MatchProcessor( String tag, @@ -50,7 +50,7 @@ public class MatchProcessor extends AbstractEnrichProcessor { @Override public QueryBuilder getQueryBuilder(Object fieldValue) { if (fieldValue instanceof List) { - return new TermsQueryBuilder(matchField, (List) fieldValue); + return new TermsQueryBuilder(matchField, (List) fieldValue); } else { return new TermQueryBuilder(matchField, fieldValue); } diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportExecuteEnrichPolicyAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportExecuteEnrichPolicyAction.java index 080a119d6564f..f288d5e71b80a 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportExecuteEnrichPolicyAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportExecuteEnrichPolicyAction.java @@ -29,8 +29,9 @@ import java.io.IOException; -public class TransportExecuteEnrichPolicyAction extends - TransportMasterNodeAction { +public class TransportExecuteEnrichPolicyAction extends TransportMasterNodeAction< + ExecuteEnrichPolicyAction.Request, + ExecuteEnrichPolicyAction.Response> { private final EnrichPolicyExecutor executor; diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportGetEnrichPolicyAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportGetEnrichPolicyAction.java index 46fe85b7307c6..59f3f20ee5bab 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportGetEnrichPolicyAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportGetEnrichPolicyAction.java @@ -26,8 +26,9 @@ import java.util.HashMap; import java.util.Map; -public class TransportGetEnrichPolicyAction extends - TransportMasterNodeReadAction { +public class TransportGetEnrichPolicyAction extends TransportMasterNodeReadAction< + GetEnrichPolicyAction.Request, + GetEnrichPolicyAction.Response> { @Inject public TransportGetEnrichPolicyAction( diff --git a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java index 6de7952e4d84f..a877159b079cd 100644 --- a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java +++ b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java @@ -57,11 +57,7 @@ public void testRecoverExistingReplica() throws Exception { indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, randomIntBetween(0, 50)) .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); ensureGreen(indexName); - if (randomBoolean()) { - client().admin().indices().prepareFlush(indexName).get(); - } else { - client().admin().indices().prepareSyncedFlush(indexName).get(); - } + client().admin().indices().prepareFlush(indexName).get(); // index more documents while one shard copy is offline internalCluster().restartNode(dataNodes.get(1), new InternalTestCluster.RestartCallback() { @Override diff --git a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java index 4dfdc507350b5..3933a1cd70365 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java @@ -34,6 +34,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.concurrent.TimeUnit; import static java.util.Collections.singletonMap; @@ -742,8 +743,10 @@ private static Object getIndexSetting(RestClient client, String index, String se Request request = new Request("GET", "/" + index + "/_settings"); request.addParameter("flat_settings", "true"); Map response = toMap(client.performRequest(request)); - Map settings = (Map) ((Map) response.get(index)).get("settings"); - return settings.get(setting); + return Optional.ofNullable((Map) response.get(index)) + .map(m -> (Map) m.get("settings")) + .map(m -> m.get(setting)) + .orElse(null); } private void assertDocumentExists(RestClient client, String index, String id) throws IOException { diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java index ee94b88345a74..0455b92edd7c6 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java @@ -40,12 +40,12 @@ import org.elasticsearch.xpack.core.ilm.SetPriorityAction; import org.elasticsearch.xpack.core.ilm.ShrinkAction; import org.elasticsearch.xpack.core.ilm.ShrinkStep; -import org.elasticsearch.xpack.core.ilm.WaitForSnapshotAction; import org.elasticsearch.xpack.core.ilm.Step; import org.elasticsearch.xpack.core.ilm.Step.StepKey; import org.elasticsearch.xpack.core.ilm.TerminalPolicyStep; import org.elasticsearch.xpack.core.ilm.UpdateRolloverLifecycleDateStep; import org.elasticsearch.xpack.core.ilm.WaitForRolloverReadyStep; +import org.elasticsearch.xpack.core.ilm.WaitForSnapshotAction; import org.hamcrest.Matchers; import org.junit.Before; @@ -324,50 +324,62 @@ public void testAllocateActionOnlyReplicas() throws Exception { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/50781") public void testWaitForSnapshot() throws Exception { createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); - createNewSingletonPolicy("delete", new WaitForSnapshotAction("slm")); + String smlPolicy = randomAlphaOfLengthBetween(4, 10); + createNewSingletonPolicy("delete", new WaitForSnapshotAction(smlPolicy)); updatePolicy(index, policy); assertBusy(() -> assertThat(getStepKeyForIndex(index).getAction(), equalTo("wait_for_snapshot"))); - assertBusy(() -> assertThat(getStepKeyForIndex(index).getName(), equalTo("wait-for-snapshot"))); assertBusy(() -> assertThat(getFailedStepForIndex(index), equalTo("wait-for-snapshot"))); - createSnapshotRepo(); - createSlmPolicy(); + String repo = createSnapshotRepo(); + createSlmPolicy(smlPolicy, repo); assertBusy(() -> assertThat(getStepKeyForIndex(index).getAction(), equalTo("wait_for_snapshot"))); - Request request = new Request("PUT", "/_slm/policy/slm/_execute"); + Request request = new Request("PUT", "/_slm/policy/" + smlPolicy + "/_execute"); assertOK(client().performRequest(request)); - assertBusy(() -> assertThat(getStepKeyForIndex(index).getAction(), equalTo("completed"))); - } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/50781") + assertBusy(() -> assertThat(getStepKeyForIndex(index).getAction(), equalTo("completed")), 2, TimeUnit.MINUTES); + + request = new Request("DELETE", "/_slm/policy/" + smlPolicy); + assertOK(client().performRequest(request)); + + request = new Request("DELETE", "/_snapshot/" + repo); + assertOK(client().performRequest(request)); + } + public void testWaitForSnapshotSlmExecutedBefore() throws Exception { createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); - createNewSingletonPolicy("delete", new WaitForSnapshotAction("slm")); + String smlPolicy = randomAlphaOfLengthBetween(4, 10); + createNewSingletonPolicy("delete", new WaitForSnapshotAction(smlPolicy)); - createSnapshotRepo(); - createSlmPolicy(); + String repo = createSnapshotRepo(); + createSlmPolicy(smlPolicy, repo); - Request request = new Request("PUT", "/_slm/policy/slm/_execute"); + Request request = new Request("PUT", "/_slm/policy/" + smlPolicy + "/_execute"); assertOK(client().performRequest(request)); updatePolicy(index, policy); assertBusy(() -> assertThat(getStepKeyForIndex(index).getAction(), equalTo("wait_for_snapshot"))); assertBusy(() -> assertThat(getStepKeyForIndex(index).getName(), equalTo("wait-for-snapshot"))); - request = new Request("PUT", "/_slm/policy/slm/_execute"); + request = new Request("PUT", "/_slm/policy/" + smlPolicy + "/_execute"); + assertOK(client().performRequest(request)); + + request = new Request("PUT", "/_slm/policy/" + smlPolicy + "/_execute"); assertOK(client().performRequest(request)); - request = new Request("PUT", "/_slm/policy/slm/_execute"); + assertBusy(() -> assertThat(getStepKeyForIndex(index).getAction(), equalTo("completed")), 2, TimeUnit.MINUTES); + + request = new Request("DELETE", "/_slm/policy/" + smlPolicy); assertOK(client().performRequest(request)); - assertBusy(() -> assertThat(getStepKeyForIndex(index).getAction(), equalTo("completed"))); + request = new Request("DELETE", "/_snapshot/" + repo); + assertOK(client().performRequest(request)); } public void testDelete() throws Exception { @@ -1342,6 +1354,41 @@ public void testRetryableInitializationStep() throws Exception { }); } + public void testRefreshablePhaseJson() throws Exception { + String index = "refresh-index"; + + createNewSingletonPolicy("hot", new RolloverAction(null, null, 100L)); + Request createIndexTemplate = new Request("PUT", "_template/rolling_indexes"); + createIndexTemplate.setJsonEntity("{" + + "\"index_patterns\": [\""+ index + "-*\"], \n" + + " \"settings\": {\n" + + " \"number_of_shards\": 1,\n" + + " \"number_of_replicas\": 0,\n" + + " \"index.lifecycle.name\": \"" + policy+ "\",\n" + + " \"index.lifecycle.rollover_alias\": \"alias\"\n" + + " }\n" + + "}"); + client().performRequest(createIndexTemplate); + + createIndexWithSettings(index + "-1", + Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0), + true); + + // Index a document + index(client(), index + "-1", "1", "foo", "bar"); + + // Wait for the index to enter the check-rollover-ready step + assertBusy(() -> assertThat(getStepKeyForIndex(index + "-1").getName(), equalTo(WaitForRolloverReadyStep.NAME))); + + // Update the policy to allow rollover at 1 document instead of 100 + createNewSingletonPolicy("hot", new RolloverAction(null, null, 1L)); + + // Index should now have been able to roll over, creating the new index and proceeding to the "complete" step + assertBusy(() -> assertThat(indexExists(index + "-000002"), is(true))); + assertBusy(() -> assertThat(getStepKeyForIndex(index + "-1").getName(), equalTo(TerminalPolicyStep.KEY.getName()))); + } + // This method should be called inside an assertBusy, it has no retry logic of its own private void assertHistoryIsPresent(String policyName, String indexName, boolean success, String stepName) throws IOException { assertHistoryIsPresent(policyName, indexName, success, null, null, stepName); @@ -1593,24 +1640,26 @@ private String getSnapshotState(String snapshot) throws IOException { return (String) snapResponse.get("state"); } - private void createSlmPolicy() throws IOException { + private void createSlmPolicy(String smlPolicy, String repo) throws IOException { Request request; - request = new Request("PUT", "/_slm/policy/slm"); + request = new Request("PUT", "/_slm/policy/" + smlPolicy); request.setJsonEntity(Strings .toString(JsonXContent.contentBuilder() .startObject() .field("schedule", "59 59 23 31 12 ? 2099") - .field("repository", "repo") + .field("repository", repo) .field("name", "snap" + randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT)) .startObject("config") + .field("include_global_state", false) .endObject() .endObject())); assertOK(client().performRequest(request)); } - private void createSnapshotRepo() throws IOException { - Request request = new Request("PUT", "/_snapshot/repo"); + private String createSnapshotRepo() throws IOException { + String repo = randomAlphaOfLengthBetween(4, 10); + Request request = new Request("PUT", "/_snapshot/" + repo); request.setJsonEntity(Strings .toString(JsonXContent.contentBuilder() .startObject() @@ -1622,5 +1671,6 @@ private void createSnapshotRepo() throws IOException { .endObject() .endObject())); assertOK(client().performRequest(request)); + return repo; } } diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java index 07628b54c960e..b3c114b5e67fb 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java @@ -82,11 +82,10 @@ public void testMissingRepo() throws Exception { } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/50358") public void testFullPolicySnapshot() throws Exception { final String indexName = "test"; final String policyName = "test-policy"; - final String repoId = "my-repo"; + final String repoId = "full-policy-repo"; int docCount = randomIntBetween(10, 50); List indexReqs = new ArrayList<>(); for (int i = 0; i < docCount; i++) { @@ -116,9 +115,12 @@ public void testFullPolicySnapshot() throws Exception { Map metadata = (Map) snapResponse.get(0).get("metadata"); assertNotNull(metadata); assertThat(metadata.get("policy"), equalTo(policyName)); - assertHistoryIsPresent(policyName, true, repoId, CREATE_OPERATION); + }); + + assertBusy(() -> assertHistoryIsPresent(policyName, true, repoId, CREATE_OPERATION)); - // Check that the last success date was written to the cluster state + // Check that the last success date was written to the cluster state + assertBusy(() -> { Request getReq = new Request("GET", "/_slm/policy/" + policyName); Response policyMetadata = client().performRequest(getReq); Map policyResponseMap; @@ -136,9 +138,10 @@ public void testFullPolicySnapshot() throws Exception { String lastSnapshotName = (String) lastSuccessObject.get("snapshot_name"); assertThat(lastSnapshotName, startsWith("snap-")); + }); - assertHistoryIsPresent(policyName, true, repoId, CREATE_OPERATION); - + // Check that the stats are written + assertBusy(() -> { Map stats = getSLMStats(); Map policyStats = policyStatsAsMap(stats); Map policyIdStats = (Map) policyStats.get(policyName); @@ -155,7 +158,7 @@ public void testFullPolicySnapshot() throws Exception { @SuppressWarnings("unchecked") public void testPolicyFailure() throws Exception { final String policyName = "test-policy"; - final String repoName = "test-repo"; + final String repoName = "policy-failure-repo"; final String indexPattern = "index-doesnt-exist"; initializeRepo(repoName); @@ -204,7 +207,7 @@ public void testPolicyFailure() throws Exception { public void testPolicyManualExecution() throws Exception { final String indexName = "test"; final String policyName = "test-policy"; - final String repoId = "my-repo"; + final String repoId = "manual-execution-repo"; int docCount = randomIntBetween(10, 50); for (int i = 0; i < docCount; i++) { index(client(), indexName, "" + i, "foo", "bar"); @@ -348,10 +351,10 @@ public void testStartStopStatus() throws Exception { @SuppressWarnings("unchecked") @TestIssueLogging(value = "org.elasticsearch.xpack.slm:TRACE,org.elasticsearch.xpack.core.slm:TRACE,org.elasticsearch.snapshots:TRACE", issueUrl = "https://github.com/elastic/elasticsearch/issues/48017") - public void testBasicTimeBasedRetenion() throws Exception { + public void testBasicTimeBasedRetention() throws Exception { final String indexName = "test"; final String policyName = "test-policy"; - final String repoId = "my-repo"; + final String repoId = "time-based-retention-repo"; int docCount = randomIntBetween(10, 50); List indexReqs = new ArrayList<>(); for (int i = 0; i < docCount; i++) { diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransition.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransition.java index 816186882927c..0f6696b0be537 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransition.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransition.java @@ -251,8 +251,8 @@ private static LifecycleExecutionState updateExecutionStateToStep(LifecyclePolic /** * Given a cluster state and lifecycle state, return a new state using the new lifecycle state for the given index. */ - private static ClusterState.Builder newClusterStateWithLifecycleState(Index index, ClusterState clusterState, - LifecycleExecutionState lifecycleState) { + public static ClusterState.Builder newClusterStateWithLifecycleState(Index index, ClusterState clusterState, + LifecycleExecutionState lifecycleState) { ClusterState.Builder newClusterStateBuilder = ClusterState.builder(clusterState); newClusterStateBuilder.metaData(MetaData.builder(clusterState.getMetaData()) .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleAction.java index e5139c1b02f2a..a05768cc72a32 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleAction.java @@ -8,35 +8,55 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.ilm.ErrorStep; import org.elasticsearch.xpack.core.ilm.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.ilm.LifecycleExecutionState; import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.core.ilm.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.ilm.Step; import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction.Request; import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction.Response; +import org.elasticsearch.xpack.ilm.IndexLifecycleTransition; import java.io.IOException; import java.time.Instant; +import java.util.LinkedHashSet; +import java.util.List; import java.util.Map; +import java.util.Set; import java.util.SortedMap; +import java.util.Spliterators; import java.util.TreeMap; import java.util.stream.Collectors; +import java.util.stream.StreamSupport; /** * This class is responsible for bootstrapping {@link IndexLifecycleMetadata} into the cluster-state, as well @@ -45,12 +65,17 @@ public class TransportPutLifecycleAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportPutLifecycleAction.class); + private final NamedXContentRegistry xContentRegistry; + private final Client client; @Inject public TransportPutLifecycleAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + NamedXContentRegistry namedXContentRegistry, Client client) { super(PutLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); + this.xContentRegistry = namedXContentRegistry; + this.client = client; } @Override @@ -82,7 +107,7 @@ protected Response newResponse(boolean acknowledged) { @Override public ClusterState execute(ClusterState currentState) throws Exception { - ClusterState.Builder newState = ClusterState.builder(currentState); + ClusterState.Builder stateBuilder = ClusterState.builder(currentState); IndexLifecycleMetadata currentMetadata = currentState.metaData().custom(IndexLifecycleMetadata.TYPE); if (currentMetadata == null) { // first time using index-lifecycle feature, bootstrap metadata currentMetadata = IndexLifecycleMetadata.EMPTY; @@ -100,13 +125,195 @@ public ClusterState execute(ClusterState currentState) throws Exception { logger.info("updating index lifecycle policy [{}]", request.getPolicy().getName()); } IndexLifecycleMetadata newMetadata = new IndexLifecycleMetadata(newPolicies, currentMetadata.getOperationMode()); - newState.metaData(MetaData.builder(currentState.getMetaData()) + stateBuilder.metaData(MetaData.builder(currentState.getMetaData()) .putCustom(IndexLifecycleMetadata.TYPE, newMetadata).build()); - return newState.build(); + ClusterState nonRefreshedState = stateBuilder.build(); + if (oldPolicy == null) { + return nonRefreshedState; + } else { + try { + return updateIndicesForPolicy(nonRefreshedState, xContentRegistry, client, + oldPolicy.getPolicy(), lifecyclePolicyMetadata); + } catch (Exception e) { + logger.warn(new ParameterizedMessage("unable to refresh indices phase JSON for updated policy [{}]", + oldPolicy.getName()), e); + // Revert to the non-refreshed state + return nonRefreshedState; + } + } } }); } + /** + * Ensure that we have the minimum amount of metadata necessary to check for cache phase + * refresh. This includes: + * - An execution state + * - Existing phase definition JSON + * - A current step key + * - A current phase in the step key + * - Not currently in the ERROR step + */ + static boolean eligibleToCheckForRefresh(final IndexMetaData metaData) { + LifecycleExecutionState executionState = LifecycleExecutionState.fromIndexMetadata(metaData); + if (executionState == null || executionState.getPhaseDefinition() == null) { + return false; + } + + Step.StepKey currentStepKey = LifecycleExecutionState.getCurrentStepKey(executionState); + if (currentStepKey == null || currentStepKey.getPhase() == null) { + return false; + } + + return ErrorStep.NAME.equals(currentStepKey.getName()) == false; + } + + /** + * Parse the {@code phaseDef} phase definition to get the stepkeys for the given phase. + * If there is an error parsing or if the phase definition is missing the required + * information, returns null. + */ + @Nullable + static Set readStepKeys(final NamedXContentRegistry xContentRegistry, final Client client, + final String phaseDef, final String currentPhase) { + final PhaseExecutionInfo phaseExecutionInfo; + try (XContentParser parser = JsonXContent.jsonXContent.createParser(xContentRegistry, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, phaseDef)) { + phaseExecutionInfo = PhaseExecutionInfo.parse(parser, currentPhase); + } catch (Exception e) { + logger.trace(new ParameterizedMessage("exception reading step keys checking for refreshability, phase definition: {}", + phaseDef), e); + return null; + } + + if (phaseExecutionInfo == null || phaseExecutionInfo.getPhase() == null) { + return null; + } + + return phaseExecutionInfo.getPhase().getActions().values().stream() + .flatMap(a -> a.toSteps(client, phaseExecutionInfo.getPhase().getName(), null).stream()) + .map(Step::getKey) + .collect(Collectors.toCollection(LinkedHashSet::new)); + } + + /** + * Returns 'true' if the index's cached phase JSON can be safely reread, 'false' otherwise. + */ + static boolean isIndexPhaseDefinitionUpdatable(final NamedXContentRegistry xContentRegistry, final Client client, + final IndexMetaData metaData, final LifecyclePolicy newPolicy) { + final String index = metaData.getIndex().getName(); + if (eligibleToCheckForRefresh(metaData) == false) { + logger.debug("[{}] does not contain enough information to check for eligibility of refreshing phase", index); + return false; + } + final String policyId = newPolicy.getName(); + + final LifecycleExecutionState executionState = LifecycleExecutionState.fromIndexMetadata(metaData); + final Step.StepKey currentStepKey = LifecycleExecutionState.getCurrentStepKey(executionState); + final String currentPhase = currentStepKey.getPhase(); + + final Set newStepKeys = newPolicy.toSteps(client).stream() + .map(Step::getKey) + .collect(Collectors.toCollection(LinkedHashSet::new)); + + if (newStepKeys.contains(currentStepKey) == false) { + // The index is on a step that doesn't exist in the new policy, we + // can't safely re-read the JSON + logger.debug("[{}] updated policy [{}] does not contain the current step key [{}], so the policy phase will not be refreshed", + index, policyId, currentStepKey); + return false; + } + + final String phaseDef = executionState.getPhaseDefinition(); + final Set oldStepKeys = readStepKeys(xContentRegistry, client, phaseDef, currentPhase); + if (oldStepKeys == null) { + logger.debug("[{}] unable to parse phase definition for cached policy [{}], policy phase will not be refreshed", + index, policyId); + return false; + } + + final Set oldPhaseStepKeys = oldStepKeys.stream() + .filter(sk -> currentPhase.equals(sk.getPhase())) + .collect(Collectors.toCollection(LinkedHashSet::new)); + + final PhaseExecutionInfo phaseExecutionInfo = new PhaseExecutionInfo(policyId, newPolicy.getPhases().get(currentPhase), 1L, 1L); + final String peiJson = Strings.toString(phaseExecutionInfo); + + final Set newPhaseStepKeys = readStepKeys(xContentRegistry, client, peiJson, currentPhase); + if (newPhaseStepKeys == null) { + logger.debug(new ParameterizedMessage("[{}] unable to parse phase definition for policy [{}] " + + "to determine if it could be refreshed", index, policyId)); + return false; + } + + if (newPhaseStepKeys.equals(oldPhaseStepKeys)) { + // The new and old phase have the same stepkeys for this current phase, so we can + // refresh the definition because we know it won't change the execution flow. + logger.debug("[{}] updated policy [{}] contains the same phase step keys and can be refreshed", index, policyId); + return true; + } else { + logger.debug("[{}] updated policy [{}] has different phase step keys and will NOT refresh phase " + + "definition as it differs too greatly. old: {}, new: {}", + index, policyId, oldPhaseStepKeys, newPhaseStepKeys); + return false; + } + } + + /** + * Rereads the phase JSON for the given index, returning a new cluster state. + */ + static ClusterState refreshPhaseDefinition(final ClusterState state, final String index, final LifecyclePolicyMetadata updatedPolicy) { + final IndexMetaData idxMeta = state.metaData().index(index); + assert eligibleToCheckForRefresh(idxMeta) : "index " + index + " is missing crucial information needed to refresh phase definition"; + + logger.trace("[{}] updating cached phase definition for policy [{}]", index, updatedPolicy.getName()); + LifecycleExecutionState currentExState = LifecycleExecutionState.fromIndexMetadata(idxMeta); + + String currentPhase = currentExState.getPhase(); + PhaseExecutionInfo pei = new PhaseExecutionInfo(updatedPolicy.getName(), + updatedPolicy.getPolicy().getPhases().get(currentPhase), updatedPolicy.getVersion(), updatedPolicy.getModifiedDate()); + + LifecycleExecutionState newExState = LifecycleExecutionState.builder(currentExState) + .setPhaseDefinition(Strings.toString(pei, false, false)) + .build(); + + return IndexLifecycleTransition.newClusterStateWithLifecycleState(idxMeta.getIndex(), state, newExState).build(); + } + + /** + * For the given new policy, returns a new cluster with all updateable indices' phase JSON refreshed. + */ + static ClusterState updateIndicesForPolicy(final ClusterState state, final NamedXContentRegistry xContentRegistry, final Client client, + final LifecyclePolicy oldPolicy, final LifecyclePolicyMetadata newPolicy) { + assert oldPolicy.getName().equals(newPolicy.getName()) : "expected both policies to have the same id but they were: [" + + oldPolicy.getName() + "] vs. [" + newPolicy.getName() + "]"; + + // No need to update anything if the policies are identical in contents + if (oldPolicy.equals(newPolicy.getPolicy())) { + logger.debug("policy [{}] is unchanged and no phase definition refresh is needed", oldPolicy.getName()); + return state; + } + + final List indicesThatCanBeUpdated = + StreamSupport.stream(Spliterators.spliteratorUnknownSize(state.metaData().indices().valuesIt(), 0), false) + .filter(meta -> newPolicy.getName().equals(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(meta.getSettings()))) + .filter(meta -> isIndexPhaseDefinitionUpdatable(xContentRegistry, client, meta, newPolicy.getPolicy())) + .map(meta -> meta.getIndex().getName()) + .collect(Collectors.toList()); + + ClusterState updatedState = state; + for (String index : indicesThatCanBeUpdated) { + try { + updatedState = refreshPhaseDefinition(updatedState, index, newPolicy); + } catch (Exception e) { + logger.warn(new ParameterizedMessage("[{}] unable to refresh phase definition for updated policy [{}]", + index, newPolicy.getName()), e); + } + } + + return updatedState; + } + @Override protected ClusterBlockException checkBlock(Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java index 25de192e76d04..689141b2cf235 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java @@ -446,7 +446,7 @@ public static boolean okayToDeleteSnapshots(ClusterState state) { // Cannot delete during a restore final RestoreInProgress restoreInProgress = state.custom(RestoreInProgress.TYPE); - if (restoreInProgress != null) { + if (restoreInProgress != null && restoreInProgress.isEmpty() == false) { return false; } @@ -480,6 +480,7 @@ public void onNewClusterState(ClusterState state) { logger.debug("retrying SLM snapshot retention deletion after snapshot operation has completed"); reRun.accept(state); } else { + logger.trace("received new cluster state but a snapshot operation is still running"); observer.waitForNextChange(this); } } catch (Exception e) { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleActionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleActionTests.java new file mode 100644 index 0000000000000..f2f67fa281f90 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleActionTests.java @@ -0,0 +1,499 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ilm.action; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ilm.AllocateAction; +import org.elasticsearch.xpack.core.ilm.AllocationRoutedStep; +import org.elasticsearch.xpack.core.ilm.ErrorStep; +import org.elasticsearch.xpack.core.ilm.ForceMergeAction; +import org.elasticsearch.xpack.core.ilm.FreezeAction; +import org.elasticsearch.xpack.core.ilm.LifecycleAction; +import org.elasticsearch.xpack.core.ilm.LifecycleExecutionState; +import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; +import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.core.ilm.Phase; +import org.elasticsearch.xpack.core.ilm.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.ilm.ReadOnlyAction; +import org.elasticsearch.xpack.core.ilm.RolloverAction; +import org.elasticsearch.xpack.core.ilm.RolloverStep; +import org.elasticsearch.xpack.core.ilm.SegmentCountStep; +import org.elasticsearch.xpack.core.ilm.SetPriorityAction; +import org.elasticsearch.xpack.core.ilm.Step; +import org.elasticsearch.xpack.core.ilm.UpdateRolloverLifecycleDateStep; +import org.elasticsearch.xpack.core.ilm.WaitForRolloverReadyStep; +import org.elasticsearch.xpack.ilm.IndexLifecycle; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; + +public class TransportPutLifecycleActionTests extends ESTestCase { + private static final NamedXContentRegistry REGISTRY; + private static final Client client = mock(Client.class); + private static final String index = "eggplant"; + + static { + try (IndexLifecycle indexLifecycle = new IndexLifecycle(Settings.EMPTY)) { + List entries = new ArrayList<>(indexLifecycle.getNamedXContent()); + REGISTRY = new NamedXContentRegistry(entries); + } + } + + public void testEligibleForRefresh() { + IndexMetaData meta = mkMeta().build(); + assertFalse(TransportPutLifecycleAction.eligibleToCheckForRefresh(meta)); + + LifecycleExecutionState state = LifecycleExecutionState.builder().build(); + meta = mkMeta().putCustom(ILM_CUSTOM_METADATA_KEY, state.asMap()).build(); + assertFalse(TransportPutLifecycleAction.eligibleToCheckForRefresh(meta)); + + state = LifecycleExecutionState.builder() + .setPhase("phase") + .setAction("action") + .setStep("step") + .build(); + meta = mkMeta().putCustom(ILM_CUSTOM_METADATA_KEY, state.asMap()).build(); + assertFalse(TransportPutLifecycleAction.eligibleToCheckForRefresh(meta)); + + state = LifecycleExecutionState.builder() + .setPhaseDefinition("{}") + .build(); + meta = mkMeta().putCustom(ILM_CUSTOM_METADATA_KEY, state.asMap()).build(); + assertFalse(TransportPutLifecycleAction.eligibleToCheckForRefresh(meta)); + + state = LifecycleExecutionState.builder() + .setPhase("phase") + .setAction("action") + .setStep(ErrorStep.NAME) + .setPhaseDefinition("{}") + .build(); + meta = mkMeta().putCustom(ILM_CUSTOM_METADATA_KEY, state.asMap()).build(); + assertFalse(TransportPutLifecycleAction.eligibleToCheckForRefresh(meta)); + + state = LifecycleExecutionState.builder() + .setPhase("phase") + .setAction("action") + .setStep("step") + .setPhaseDefinition("{}") + .build(); + meta = mkMeta().putCustom(ILM_CUSTOM_METADATA_KEY, state.asMap()).build(); + assertTrue(TransportPutLifecycleAction.eligibleToCheckForRefresh(meta)); + } + + public void testReadStepKeys() { + assertNull(TransportPutLifecycleAction.readStepKeys(REGISTRY, client, "{}", "phase")); + assertNull(TransportPutLifecycleAction.readStepKeys(REGISTRY, client, "aoeu", "phase")); + assertNull(TransportPutLifecycleAction.readStepKeys(REGISTRY, client, "", "phase")); + + assertThat(TransportPutLifecycleAction.readStepKeys(REGISTRY, client, "{\n" + + " \"policy\": \"my_lifecycle3\",\n" + + " \"phase_definition\": { \n" + + " \"min_age\": \"0ms\",\n" + + " \"actions\": {\n" + + " \"rollover\": {\n" + + " \"max_age\": \"30s\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"version\": 3, \n" + + " \"modified_date_in_millis\": 1539609701576 \n" + + " }", "phase"), + contains(new Step.StepKey("phase", "rollover", WaitForRolloverReadyStep.NAME), + new Step.StepKey("phase", "rollover", RolloverStep.NAME), + new Step.StepKey("phase", "rollover", UpdateRolloverLifecycleDateStep.NAME), + new Step.StepKey("phase", "rollover", RolloverAction.INDEXING_COMPLETE_STEP_NAME))); + + assertThat(TransportPutLifecycleAction.readStepKeys(REGISTRY, client, "{\n" + + " \"policy\" : \"my_lifecycle3\",\n" + + " \"phase_definition\" : {\n" + + " \"min_age\" : \"20m\",\n" + + " \"actions\" : {\n" + + " \"rollover\" : {\n" + + " \"max_age\" : \"5s\"\n" + + " },\n" + + " \"set_priority\" : {\n" + + " \"priority\" : 150\n" + + " }\n" + + " }\n" + + " },\n" + + " \"version\" : 1,\n" + + " \"modified_date_in_millis\" : 1578521007076\n" + + " }", "phase"), + contains(new Step.StepKey("phase", "rollover", WaitForRolloverReadyStep.NAME), + new Step.StepKey("phase", "rollover", RolloverStep.NAME), + new Step.StepKey("phase", "rollover", UpdateRolloverLifecycleDateStep.NAME), + new Step.StepKey("phase", "rollover", RolloverAction.INDEXING_COMPLETE_STEP_NAME), + new Step.StepKey("phase", "set_priority", SetPriorityAction.NAME))); + + Map actions = new HashMap<>(); + actions.put("forcemerge", new ForceMergeAction(5)); + actions.put("freeze", new FreezeAction()); + actions.put("allocate", new AllocateAction(1, null, null, null)); + PhaseExecutionInfo pei = new PhaseExecutionInfo("policy", new Phase("wonky", TimeValue.ZERO, actions), 1, 1); + String phaseDef = Strings.toString(pei); + logger.info("--> phaseDef: {}", phaseDef); + + assertThat(TransportPutLifecycleAction.readStepKeys(REGISTRY, client, phaseDef, "phase"), + contains(new Step.StepKey("phase", "freeze", FreezeAction.NAME), + new Step.StepKey("phase", "allocate", AllocateAction.NAME), + new Step.StepKey("phase", "allocate", AllocationRoutedStep.NAME), + new Step.StepKey("phase", "forcemerge", ReadOnlyAction.NAME), + new Step.StepKey("phase", "forcemerge", ForceMergeAction.NAME), + new Step.StepKey("phase", "forcemerge", SegmentCountStep.NAME))); + } + + public void testIndexCanBeSafelyUpdated() { + + // Success case, it can be updated even though the configuration for the + // rollover and set_priority actions has changed + { + LifecycleExecutionState exState = LifecycleExecutionState.builder() + .setPhase("hot") + .setAction("rollover") + .setStep("check-rollover-ready") + .setPhaseDefinition("{\n" + + " \"policy\" : \"my-policy\",\n" + + " \"phase_definition\" : {\n" + + " \"min_age\" : \"20m\",\n" + + " \"actions\" : {\n" + + " \"rollover\" : {\n" + + " \"max_age\" : \"5s\"\n" + + " },\n" + + " \"set_priority\" : {\n" + + " \"priority\" : 150\n" + + " }\n" + + " }\n" + + " },\n" + + " \"version\" : 1,\n" + + " \"modified_date_in_millis\" : 1578521007076\n" + + " }") + .build(); + + IndexMetaData meta = mkMeta() + .putCustom(ILM_CUSTOM_METADATA_KEY, exState.asMap()) + .build(); + + Map actions = new HashMap<>(); + actions.put("rollover", new RolloverAction(null, null, 1L)); + actions.put("set_priority", new SetPriorityAction(100)); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); + Map phases = Collections.singletonMap("hot", hotPhase); + LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); + + assertTrue(TransportPutLifecycleAction.isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy)); + } + + // Failure case, can't update because the step we're currently on has been removed in the new policy + { + LifecycleExecutionState exState = LifecycleExecutionState.builder() + .setPhase("hot") + .setAction("rollover") + .setStep("check-rollover-ready") + .setPhaseDefinition("{\n" + + " \"policy\" : \"my-policy\",\n" + + " \"phase_definition\" : {\n" + + " \"min_age\" : \"20m\",\n" + + " \"actions\" : {\n" + + " \"rollover\" : {\n" + + " \"max_age\" : \"5s\"\n" + + " },\n" + + " \"set_priority\" : {\n" + + " \"priority\" : 150\n" + + " }\n" + + " }\n" + + " },\n" + + " \"version\" : 1,\n" + + " \"modified_date_in_millis\" : 1578521007076\n" + + " }") + .build(); + + IndexMetaData meta = mkMeta() + .putCustom(ILM_CUSTOM_METADATA_KEY, exState.asMap()) + .build(); + + Map actions = new HashMap<>(); + actions.put("set_priority", new SetPriorityAction(150)); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); + Map phases = Collections.singletonMap("hot", hotPhase); + LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); + + assertFalse(TransportPutLifecycleAction.isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy)); + } + + // Failure case, can't update because the future step has been deleted + { + LifecycleExecutionState exState = LifecycleExecutionState.builder() + .setPhase("hot") + .setAction("rollover") + .setStep("check-rollover-ready") + .setPhaseDefinition("{\n" + + " \"policy\" : \"my-policy\",\n" + + " \"phase_definition\" : {\n" + + " \"min_age\" : \"20m\",\n" + + " \"actions\" : {\n" + + " \"rollover\" : {\n" + + " \"max_age\" : \"5s\"\n" + + " },\n" + + " \"set_priority\" : {\n" + + " \"priority\" : 150\n" + + " }\n" + + " }\n" + + " },\n" + + " \"version\" : 1,\n" + + " \"modified_date_in_millis\" : 1578521007076\n" + + " }") + .build(); + + IndexMetaData meta = mkMeta() + .putCustom(ILM_CUSTOM_METADATA_KEY, exState.asMap()) + .build(); + + Map actions = new HashMap<>(); + actions.put("rollover", new RolloverAction(null, TimeValue.timeValueSeconds(5), null)); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); + Map phases = Collections.singletonMap("hot", hotPhase); + LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); + + assertFalse(TransportPutLifecycleAction.isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy)); + } + + // Failure case, index doesn't have enough info to check + { + LifecycleExecutionState exState = LifecycleExecutionState.builder() + .setPhaseDefinition("{\n" + + " \"policy\" : \"my-policy\",\n" + + " \"phase_definition\" : {\n" + + " \"min_age\" : \"20m\",\n" + + " \"actions\" : {\n" + + " \"rollover\" : {\n" + + " \"max_age\" : \"5s\"\n" + + " },\n" + + " \"set_priority\" : {\n" + + " \"priority\" : 150\n" + + " }\n" + + " }\n" + + " },\n" + + " \"version\" : 1,\n" + + " \"modified_date_in_millis\" : 1578521007076\n" + + " }") + .build(); + + IndexMetaData meta = mkMeta() + .putCustom(ILM_CUSTOM_METADATA_KEY, exState.asMap()) + .build(); + + Map actions = new HashMap<>(); + actions.put("rollover", new RolloverAction(null, null, 1L)); + actions.put("set_priority", new SetPriorityAction(100)); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); + Map phases = Collections.singletonMap("hot", hotPhase); + LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); + + assertFalse(TransportPutLifecycleAction.isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy)); + } + + // Failure case, the phase JSON is unparseable + { + LifecycleExecutionState exState = LifecycleExecutionState.builder() + .setPhase("hot") + .setAction("rollover") + .setStep("check-rollover-ready") + .setPhaseDefinition("potato") + .build(); + + IndexMetaData meta = mkMeta() + .putCustom(ILM_CUSTOM_METADATA_KEY, exState.asMap()) + .build(); + + Map actions = new HashMap<>(); + actions.put("rollover", new RolloverAction(null, null, 1L)); + actions.put("set_priority", new SetPriorityAction(100)); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); + Map phases = Collections.singletonMap("hot", hotPhase); + LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); + + assertFalse(TransportPutLifecycleAction.isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy)); + } + } + + public void testRefreshPhaseJson() { + LifecycleExecutionState exState = LifecycleExecutionState.builder() + .setPhase("hot") + .setAction("rollover") + .setStep("check-rollover-ready") + .setPhaseDefinition("{\n" + + " \"policy\" : \"my-policy\",\n" + + " \"phase_definition\" : {\n" + + " \"min_age\" : \"20m\",\n" + + " \"actions\" : {\n" + + " \"rollover\" : {\n" + + " \"max_age\" : \"5s\"\n" + + " },\n" + + " \"set_priority\" : {\n" + + " \"priority\" : 150\n" + + " }\n" + + " }\n" + + " },\n" + + " \"version\" : 1,\n" + + " \"modified_date_in_millis\" : 1578521007076\n" + + " }") + .build(); + + IndexMetaData meta = mkMeta() + .putCustom(ILM_CUSTOM_METADATA_KEY, exState.asMap()) + .build(); + + Map actions = new HashMap<>(); + actions.put("rollover", new RolloverAction(null, null, 1L)); + actions.put("set_priority", new SetPriorityAction(100)); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); + Map phases = Collections.singletonMap("hot", hotPhase); + LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), 2L, 2L); + + ClusterState existingState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metaData(MetaData.builder(MetaData.EMPTY_META_DATA) + .put(meta, false) + .build()) + .build(); + + ClusterState changedState = TransportPutLifecycleAction.refreshPhaseDefinition(existingState, index, policyMetadata); + + IndexMetaData newIdxMeta = changedState.metaData().index(index); + LifecycleExecutionState afterExState = LifecycleExecutionState.fromIndexMetadata(newIdxMeta); + Map beforeState = new HashMap<>(exState.asMap()); + beforeState.remove("phase_definition"); + Map afterState = new HashMap<>(afterExState.asMap()); + afterState.remove("phase_definition"); + // Check that no other execution state changes have been made + assertThat(beforeState, equalTo(afterState)); + + // Check that the phase definition has been refreshed + assertThat(afterExState.getPhaseDefinition(), + equalTo("{\"policy\":\"my-policy\",\"phase_definition\":{\"min_age\":\"0ms\",\"actions\":{\"rollover\":{\"max_docs\":1}," + + "\"set_priority\":{\"priority\":100}}},\"version\":2,\"modified_date_in_millis\":2}")); + } + + public void testUpdateIndicesForPolicy() { + LifecycleExecutionState exState = LifecycleExecutionState.builder() + .setPhase("hot") + .setAction("rollover") + .setStep("check-rollover-ready") + .setPhaseDefinition("{\"policy\":\"my-policy\",\"phase_definition\":{\"min_age\":\"0ms\",\"actions\":{\"rollover\":" + + "{\"max_docs\":1},\"set_priority\":{\"priority\":100}}},\"version\":1,\"modified_date_in_millis\":1578521007076}") + .build(); + + IndexMetaData meta = mkMeta() + .putCustom(ILM_CUSTOM_METADATA_KEY, exState.asMap()) + .build(); + + assertTrue(TransportPutLifecycleAction.eligibleToCheckForRefresh(meta)); + + Map oldActions = new HashMap<>(); + oldActions.put("rollover", new RolloverAction(null, null, 1L)); + oldActions.put("set_priority", new SetPriorityAction(100)); + Phase oldHotPhase = new Phase("hot", TimeValue.ZERO, oldActions); + Map oldPhases = Collections.singletonMap("hot", oldHotPhase); + LifecyclePolicy oldPolicy = new LifecyclePolicy("my-policy", oldPhases); + + Map actions = new HashMap<>(); + actions.put("rollover", new RolloverAction(null, null, 1L)); + actions.put("set_priority", new SetPriorityAction(100)); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); + Map phases = Collections.singletonMap("hot", hotPhase); + LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), 2L, 2L); + + assertTrue(TransportPutLifecycleAction.isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy)); + + ClusterState existingState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metaData(MetaData.builder(MetaData.EMPTY_META_DATA) + .put(meta, false) + .build()) + .build(); + + logger.info("--> update for unchanged policy"); + ClusterState updatedState = TransportPutLifecycleAction.updateIndicesForPolicy(existingState, REGISTRY, + client, oldPolicy, policyMetadata); + + // No change, because the policies were identical + assertThat(updatedState, equalTo(existingState)); + + actions = new HashMap<>(); + actions.put("rollover", new RolloverAction(null, null, 2L)); + actions.put("set_priority", new SetPriorityAction(150)); + hotPhase = new Phase("hot", TimeValue.ZERO, actions); + phases = Collections.singletonMap("hot", hotPhase); + newPolicy = new LifecyclePolicy("my-policy", phases); + policyMetadata = new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), 2L, 2L); + + logger.info("--> update with changed policy, but not configured in settings"); + updatedState = TransportPutLifecycleAction.updateIndicesForPolicy(existingState, REGISTRY, client, oldPolicy, policyMetadata); + + // No change, because the index doesn't have a lifecycle.name setting for this policy + assertThat(updatedState, equalTo(existingState)); + + meta = IndexMetaData.builder(index) + .settings(Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, "my-policy") + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 10)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 5)) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, randomAlphaOfLength(5))) + .putCustom(ILM_CUSTOM_METADATA_KEY, exState.asMap()) + .build(); + existingState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metaData(MetaData.builder(MetaData.EMPTY_META_DATA) + .put(meta, false) + .build()) + .build(); + + logger.info("--> update with changed policy and this index has the policy"); + updatedState = TransportPutLifecycleAction.updateIndicesForPolicy(existingState, REGISTRY, client, oldPolicy, policyMetadata); + + IndexMetaData newIdxMeta = updatedState.metaData().index(index); + LifecycleExecutionState afterExState = LifecycleExecutionState.fromIndexMetadata(newIdxMeta); + Map beforeState = new HashMap<>(exState.asMap()); + beforeState.remove("phase_definition"); + Map afterState = new HashMap<>(afterExState.asMap()); + afterState.remove("phase_definition"); + // Check that no other execution state changes have been made + assertThat(beforeState, equalTo(afterState)); + + // Check that the phase definition has been refreshed + assertThat(afterExState.getPhaseDefinition(), + equalTo("{\"policy\":\"my-policy\",\"phase_definition\":{\"min_age\":\"0ms\",\"actions\":{\"rollover\":{\"max_docs\":2}," + + "\"set_priority\":{\"priority\":150}}},\"version\":2,\"modified_date_in_millis\":2}")); + } + + private static IndexMetaData.Builder mkMeta() { + return IndexMetaData.builder(index) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 10)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 5)) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, randomAlphaOfLength(5))); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java index 16f388fa49481..0b3c2d2dbd58f 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java @@ -8,6 +8,9 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -22,6 +25,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotMissingException; @@ -30,6 +34,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.core.slm.SnapshotInvocationRecord; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyItem; import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; @@ -385,6 +390,74 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex } } + public void testSLMRetentionAfterRestore() throws Exception { + final String indexName = "test"; + final String policyName = "test-policy"; + int docCount = 20; + for (int i = 0; i < docCount; i++) { + index(indexName, i + "", Collections.singletonMap("foo", "bar")); + } + + // Create a snapshot repo + initializeRepo(REPO); + + logger.info("--> creating policy {}", policyName); + createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, REPO, indexName, true, false, + new SnapshotRetentionConfiguration(TimeValue.ZERO, null, null)); + + logger.info("--> executing snapshot lifecycle"); + final String snapshotName = executePolicy(policyName); + + // Check that the executed snapshot shows up in the SLM output + assertBusy(() -> { + GetSnapshotLifecycleAction.Response getResp = + client().execute(GetSnapshotLifecycleAction.INSTANCE, new GetSnapshotLifecycleAction.Request(policyName)).get(); + logger.info("--> checking for in progress snapshot..."); + + assertThat(getResp.getPolicies().size(), greaterThan(0)); + SnapshotLifecyclePolicyItem item = getResp.getPolicies().get(0); + SnapshotInvocationRecord lastSuccess = item.getLastSuccess(); + assertNotNull(lastSuccess); + assertThat(lastSuccess.getSnapshotName(), equalTo(snapshotName)); + }); + + logger.info("--> restoring index"); + RestoreSnapshotRequest restoreReq = new RestoreSnapshotRequest(REPO, snapshotName); + restoreReq.indices(indexName); + restoreReq.renamePattern("(.+)"); + restoreReq.renameReplacement("restored_$1"); + restoreReq.waitForCompletion(true); + RestoreSnapshotResponse resp = client().execute(RestoreSnapshotAction.INSTANCE, restoreReq).get(); + assertThat(resp.status(), equalTo(RestStatus.OK)); + + logger.info("--> executing SLM retention"); + assertAcked(client().execute(ExecuteSnapshotRetentionAction.INSTANCE, new ExecuteSnapshotRetentionAction.Request()).get()); + logger.info("--> waiting for {} snapshot to be deleted", snapshotName); + assertBusy(() -> { + try { + try { + GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() + .prepareGetSnapshots(REPO).setSnapshots(snapshotName).get(); + assertThat(snapshotsStatusResponse.getSnapshots(REPO), empty()); + } catch (SnapshotMissingException e) { + // This is what we want to happen + } + logger.info("--> snapshot [{}] has been deleted", snapshotName); + } catch (RepositoryException re) { + // Concurrent status calls and write operations may lead to failures in determining the current repository generation + // TODO: Remove this hack once tracking the current repository generation has been made consistent + throw new AssertionError(re); + } + }); + + // Cancel/delete the snapshot + try { + client().admin().cluster().prepareDeleteSnapshot(REPO, snapshotName).get(); + } catch (SnapshotMissingException e) { + // ignore + } + } + private SnapshotsStatusResponse getSnapshotStatus(String snapshotName) { try { return client().admin().cluster().prepareSnapshotStatus(REPO).setSnapshots(snapshotName).get(); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java index ede01f35b04b0..91887f3146aec 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java @@ -67,6 +67,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class SnapshotRetentionTaskTests extends ESTestCase { @@ -363,6 +364,14 @@ public void testOkToDeleteSnapshots() { .build(); assertThat(SnapshotRetentionTask.okayToDeleteSnapshots(state), equalTo(false)); + + restoreInProgress = mock(RestoreInProgress.class); + when(restoreInProgress.isEmpty()).thenReturn(true); + state = ClusterState.builder(new ClusterName("cluster")) + .putCustom(RestoreInProgress.TYPE, restoreInProgress) + .build(); + + assertThat(SnapshotRetentionTask.okayToDeleteSnapshots(state), equalTo(true)); } public void testSkipWhileStopping() throws Exception { @@ -420,10 +429,10 @@ private void doTestRunManuallyDuringMode(OperationMode mode) throws Exception { final String repoId = "repo"; SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(policyId, "snap", "1 * * * * ?", repoId, null, new SnapshotRetentionConfiguration(TimeValue.timeValueDays(30), null, null)); - + ClusterState state = createState(mode, policy); ClusterServiceUtils.setState(clusterService, state); - + AtomicBoolean retentionWasRun = new AtomicBoolean(false); MockSnapshotRetentionTask task = new MockSnapshotRetentionTask(noOpClient, clusterService, new SnapshotLifecycleTaskTests.VerifyingHistoryStore(noOpClient, ZoneOffset.UTC, (historyItem) -> { @@ -436,10 +445,10 @@ private void doTestRunManuallyDuringMode(OperationMode mode) throws Exception { (deletionPolicyId, repo, snapId, slmStats, listener) -> { }, System::nanoTime); - + long time = System.currentTimeMillis(); task.triggered(new SchedulerEngine.Event(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID, time, time)); - + assertTrue("retention should be run manually even if SLM is disabled", retentionWasRun.get()); } finally { threadPool.shutdownNow(); diff --git a/x-pack/plugin/mapper-flattened/src/test/java/org/elasticsearch/xpack/flattened/mapper/FlatObjectFieldMapperTests.java b/x-pack/plugin/mapper-flattened/src/test/java/org/elasticsearch/xpack/flattened/mapper/FlatObjectFieldMapperTests.java index aeb22c331b355..44dfc11898016 100644 --- a/x-pack/plugin/mapper-flattened/src/test/java/org/elasticsearch/xpack/flattened/mapper/FlatObjectFieldMapperTests.java +++ b/x-pack/plugin/mapper-flattened/src/test/java/org/elasticsearch/xpack/flattened/mapper/FlatObjectFieldMapperTests.java @@ -56,7 +56,7 @@ protected Collection> getPlugins() { public void testDefaults() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -65,7 +65,7 @@ public void testDefaults() throws Exception { .endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); BytesReference doc = BytesReference.bytes(XContentFactory.jsonBuilder().startObject() @@ -111,7 +111,7 @@ public void testDefaults() throws Exception { public void testDisableIndex() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -121,7 +121,7 @@ public void testDisableIndex() throws Exception { .endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); BytesReference doc = BytesReference.bytes(XContentFactory.jsonBuilder().startObject() @@ -143,7 +143,7 @@ public void testDisableIndex() throws Exception { public void testDisableDocValues() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -153,7 +153,7 @@ public void testDisableDocValues() throws Exception { .endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); BytesReference doc = BytesReference.bytes(XContentFactory.jsonBuilder().startObject() @@ -195,7 +195,7 @@ public void testEnableStore() throws Exception { public void testIndexOptions() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -205,12 +205,12 @@ public void testIndexOptions() throws IOException { .endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); for (String indexOptions : Arrays.asList("positions", "offsets")) { String invalidMapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -220,14 +220,14 @@ public void testIndexOptions() throws IOException { .endObject() .endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> parser.parse("type", new CompressedXContent(invalidMapping))); + () -> parser.parse("_doc", new CompressedXContent(invalidMapping))); assertEquals("The [flattened] field does not support positions, got [index_options]=" + indexOptions, e.getMessage()); } } public void testNullField() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -236,7 +236,7 @@ public void testNullField() throws Exception { .endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); BytesReference doc = BytesReference.bytes(XContentFactory.jsonBuilder().startObject() @@ -250,7 +250,7 @@ public void testNullField() throws Exception { public void testMalformedJson() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -259,7 +259,7 @@ public void testMalformedJson() throws Exception { .endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); BytesReference doc1 = BytesReference.bytes(XContentFactory.jsonBuilder().startObject() @@ -276,7 +276,7 @@ public void testMalformedJson() throws Exception { public void testFieldMultiplicity() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -285,7 +285,7 @@ public void testFieldMultiplicity() throws Exception { .endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); BytesReference doc = BytesReference.bytes(XContentFactory.jsonBuilder().startObject() @@ -318,7 +318,7 @@ public void testFieldMultiplicity() throws Exception { public void testDepthLimit() throws IOException { // First verify the default behavior when depth_limit is not set. String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -327,7 +327,7 @@ public void testDepthLimit() throws IOException { .endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); BytesReference doc = BytesReference.bytes(XContentFactory.jsonBuilder().startObject() @@ -344,7 +344,7 @@ public void testDepthLimit() throws IOException { // Set a lower value for depth_limit and check that the field is rejected. String newMapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -355,7 +355,7 @@ public void testDepthLimit() throws IOException { .endObject()); DocumentMapper newMapper = mapper.merge( - parser.parse("type", new CompressedXContent(newMapping)).mapping()); + parser.parse("_doc", new CompressedXContent(newMapping)).mapping()); expectThrows(MapperParsingException.class, () -> newMapper.parse(new SourceToParse("test", "1", doc, XContentType.JSON))); @@ -363,7 +363,7 @@ public void testDepthLimit() throws IOException { public void testEagerGlobalOrdinals() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -371,7 +371,7 @@ public void testEagerGlobalOrdinals() throws IOException { .endObject().endObject() .endObject().endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); FieldMapper fieldMapper = (FieldMapper) mapper.mappers().getMapper("field"); @@ -381,7 +381,7 @@ public void testEagerGlobalOrdinals() throws IOException { public void testIgnoreAbove() throws IOException { // First verify the default behavior when ignore_above is not set. String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -390,7 +390,7 @@ public void testIgnoreAbove() throws IOException { .endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); BytesReference doc = BytesReference.bytes(XContentFactory.jsonBuilder().startObject() @@ -427,7 +427,7 @@ public void testIgnoreAbove() throws IOException { public void testNullValues() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type") + .startObject("_doc") .startObject("properties") .startObject("field") .field("type", "flattened") @@ -440,7 +440,7 @@ public void testNullValues() throws Exception { .endObject() .endObject()); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); BytesReference doc = BytesReference.bytes(XContentFactory.jsonBuilder().startObject() diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index 8c9ba6df7f009..7e19a4d606d25 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -133,7 +133,6 @@ integTest.runner { 'ml/get_datafeed_stats/Test get datafeed stats given missing datafeed_id', 'ml/get_datafeeds/Test get datafeed given missing datafeed_id', 'ml/inference_crud/Test delete given used trained model', - 'ml/inference_crud/Test delete given unused trained model', 'ml/inference_crud/Test delete with missing model', 'ml/inference_crud/Test get given missing trained model', 'ml/inference_crud/Test get given expression without matches and allow_no_match is false', diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java index 32d8ad0b79845..d1c8c6c4c75ef 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ml.integration; import com.google.common.collect.Ordering; - import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; @@ -28,7 +27,6 @@ import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.analyses.BoostedTreeParams; -import org.elasticsearch.xpack.core.ml.dataframe.analyses.BoostedTreeParamsTests; import org.elasticsearch.xpack.core.ml.dataframe.analyses.Classification; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.classification.Accuracy; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.classification.MulticlassConfusionMatrix; @@ -63,6 +61,9 @@ public class ClassificationIT extends MlNativeDataFrameAnalyticsIntegTestCase { private static final String NUMERICAL_FIELD = "numerical-field"; private static final String DISCRETE_NUMERICAL_FIELD = "discrete-numerical-field"; private static final String KEYWORD_FIELD = "keyword-field"; + private static final String NESTED_FIELD = "outer-field.inner-field"; + private static final String ALIAS_TO_KEYWORD_FIELD = "alias-to-keyword-field"; + private static final String ALIAS_TO_NESTED_FIELD = "alias-to-nested-field"; private static final List BOOLEAN_FIELD_VALUES = Collections.unmodifiableList(Arrays.asList(false, true)); private static final List NUMERICAL_FIELD_VALUES = Collections.unmodifiableList(Arrays.asList(1.0, 2.0)); private static final List DISCRETE_NUMERICAL_FIELD_VALUES = Collections.unmodifiableList(Arrays.asList(10, 20)); @@ -83,7 +84,14 @@ public void testSingleNumericFeatureAndMixedTrainingAndNonTrainingRows() throws String predictedClassField = KEYWORD_FIELD + "_prediction"; indexData(sourceIndex, 300, 50, KEYWORD_FIELD); - DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(KEYWORD_FIELD)); + DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, + new Classification( + KEYWORD_FIELD, + BoostedTreeParams.builder().setNumTopFeatureImportanceValues(1).build(), + null, + null, + null, + null)); registerAnalytics(config); putAnalytics(config); @@ -101,6 +109,7 @@ public void testSingleNumericFeatureAndMixedTrainingAndNonTrainingRows() throws assertThat(getFieldValue(resultsObject, predictedClassField), is(in(KEYWORD_FIELD_VALUES))); assertThat(getFieldValue(resultsObject, "is_training"), is(destDoc.containsKey(KEYWORD_FIELD))); assertTopClasses(resultsObject, 2, KEYWORD_FIELD, KEYWORD_FIELD_VALUES); + assertThat(resultsObject.keySet().stream().filter(k -> k.startsWith("feature_importance.")).findAny().isPresent(), is(true)); } assertProgress(jobId, 100, 100, 100, 100); @@ -175,7 +184,7 @@ public void testWithOnlyTrainingRowsAndTrainingPercentIsFifty(String jobId, sourceIndex, destIndex, null, - new Classification(dependentVariable, BoostedTreeParamsTests.createRandom(), null, numTopClasses, 50.0, null)); + new Classification(dependentVariable, BoostedTreeParams.builder().build(), null, numTopClasses, 50.0, null)); registerAnalytics(config); putAnalytics(config); @@ -301,7 +310,6 @@ public void testStopAndRestart() throws Exception { assertInferenceModelPersisted(jobId); assertMlResultsFieldMappings(predictedClassField, "keyword"); assertEvaluation(KEYWORD_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField); - } public void testDependentVariableCardinalityTooHighError() throws Exception { @@ -343,6 +351,63 @@ public void testDependentVariableCardinalityTooHighButWithQueryMakesItWithinRang assertProgress(jobId, 100, 100, 100, 100); } + public void testDependentVariableIsNested() throws Exception { + initialize("dependent_variable_is_nested"); + String predictedClassField = NESTED_FIELD + "_prediction"; + indexData(sourceIndex, 100, 0, NESTED_FIELD); + + DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(NESTED_FIELD)); + registerAnalytics(config); + putAnalytics(config); + startAnalytics(jobId); + waitUntilAnalyticsIsStopped(jobId); + + assertProgress(jobId, 100, 100, 100, 100); + assertThat(searchStoredProgress(jobId).getHits().getTotalHits().value, equalTo(1L)); + assertModelStatePersisted(stateDocId()); + assertInferenceModelPersisted(jobId); + assertMlResultsFieldMappings(predictedClassField, "keyword"); + assertEvaluation(NESTED_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField); + } + + public void testDependentVariableIsAliasToKeyword() throws Exception { + initialize("dependent_variable_is_alias"); + String predictedClassField = ALIAS_TO_KEYWORD_FIELD + "_prediction"; + indexData(sourceIndex, 100, 0, KEYWORD_FIELD); + + DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(ALIAS_TO_KEYWORD_FIELD)); + registerAnalytics(config); + putAnalytics(config); + startAnalytics(jobId); + waitUntilAnalyticsIsStopped(jobId); + + assertProgress(jobId, 100, 100, 100, 100); + assertThat(searchStoredProgress(jobId).getHits().getTotalHits().value, equalTo(1L)); + assertModelStatePersisted(stateDocId()); + assertInferenceModelPersisted(jobId); + assertMlResultsFieldMappings(predictedClassField, "keyword"); + assertEvaluation(ALIAS_TO_KEYWORD_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField); + } + + public void testDependentVariableIsAliasToNested() throws Exception { + initialize("dependent_variable_is_alias_to_nested"); + String predictedClassField = ALIAS_TO_NESTED_FIELD + "_prediction"; + indexData(sourceIndex, 100, 0, NESTED_FIELD); + + DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Classification(ALIAS_TO_NESTED_FIELD)); + registerAnalytics(config); + putAnalytics(config); + startAnalytics(jobId); + waitUntilAnalyticsIsStopped(jobId); + + assertProgress(jobId, 100, 100, 100, 100); + assertThat(searchStoredProgress(jobId).getHits().getTotalHits().value, equalTo(1L)); + assertModelStatePersisted(stateDocId()); + assertInferenceModelPersisted(jobId); + assertMlResultsFieldMappings(predictedClassField, "keyword"); + assertEvaluation(ALIAS_TO_NESTED_FIELD, KEYWORD_FIELD_VALUES, "ml." + predictedClassField); + } + public void testTwoJobsWithSameRandomizeSeedUseSameTrainingSet() throws Exception { String sourceIndex = "classification_two_jobs_with_same_randomize_seed_source"; String dependentVariable = KEYWORD_FIELD; @@ -355,7 +420,13 @@ public void testTwoJobsWithSameRandomizeSeedUseSameTrainingSet() throws Exceptio String firstJobId = "classification_two_jobs_with_same_randomize_seed_1"; String firstJobDestIndex = firstJobId + "_dest"; - BoostedTreeParams boostedTreeParams = new BoostedTreeParams(1.0, 1.0, 1.0, 1, 1.0); + BoostedTreeParams boostedTreeParams = BoostedTreeParams.builder() + .setLambda(1.0) + .setGamma(1.0) + .setEta(1.0) + .setFeatureBagFraction(1.0) + .setMaximumNumberTrees(1) + .build(); DataFrameAnalyticsConfig firstJob = buildAnalytics(firstJobId, sourceIndex, firstJobDestIndex, null, new Classification(dependentVariable, boostedTreeParams, null, 1, 50.0, null)); @@ -434,7 +505,10 @@ private static void createIndex(String index) { BOOLEAN_FIELD, "type=boolean", NUMERICAL_FIELD, "type=double", DISCRETE_NUMERICAL_FIELD, "type=integer", - KEYWORD_FIELD, "type=keyword") + KEYWORD_FIELD, "type=keyword", + NESTED_FIELD, "type=keyword", + ALIAS_TO_KEYWORD_FIELD, "type=alias,path=" + KEYWORD_FIELD, + ALIAS_TO_NESTED_FIELD, "type=alias,path=" + NESTED_FIELD) .get(); } @@ -446,7 +520,8 @@ private static void indexData(String sourceIndex, int numTrainingRows, int numNo BOOLEAN_FIELD, BOOLEAN_FIELD_VALUES.get(i % BOOLEAN_FIELD_VALUES.size()), NUMERICAL_FIELD, NUMERICAL_FIELD_VALUES.get(i % NUMERICAL_FIELD_VALUES.size()), DISCRETE_NUMERICAL_FIELD, DISCRETE_NUMERICAL_FIELD_VALUES.get(i % DISCRETE_NUMERICAL_FIELD_VALUES.size()), - KEYWORD_FIELD, KEYWORD_FIELD_VALUES.get(i % KEYWORD_FIELD_VALUES.size())); + KEYWORD_FIELD, KEYWORD_FIELD_VALUES.get(i % KEYWORD_FIELD_VALUES.size()), + NESTED_FIELD, KEYWORD_FIELD_VALUES.get(i % KEYWORD_FIELD_VALUES.size())); IndexRequest indexRequest = new IndexRequest(sourceIndex).source(source.toArray()); bulkRequestBuilder.add(indexRequest); } @@ -465,6 +540,9 @@ private static void indexData(String sourceIndex, int numTrainingRows, int numNo if (KEYWORD_FIELD.equals(dependentVariable) == false) { source.addAll(List.of(KEYWORD_FIELD, KEYWORD_FIELD_VALUES.get(i % KEYWORD_FIELD_VALUES.size()))); } + if (NESTED_FIELD.equals(dependentVariable) == false) { + source.addAll(List.of(NESTED_FIELD, KEYWORD_FIELD_VALUES.get(i % KEYWORD_FIELD_VALUES.size()))); + } IndexRequest indexRequest = new IndexRequest(sourceIndex).source(source.toArray()); bulkRequestBuilder.add(indexRequest); } @@ -487,10 +565,12 @@ private static Map getDestDoc(DataFrameAnalyticsConfig config, S } /** - * Wrapper around extractValue with implicit casting to the appropriate type. + * Wrapper around extractValue that: + * - allows dots (".") in the path elements provided as arguments + * - supports implicit casting to the appropriate type */ private static T getFieldValue(Map doc, String... path) { - return (T)extractValue(doc, path); + return (T)extractValue(String.join(".", path), doc); } private static void assertTopClasses(Map resultsObject, @@ -501,9 +581,11 @@ private static void assertTopClasses(Map resultsObject, assertThat(topClasses, hasSize(numTopClasses)); List classNames = new ArrayList<>(topClasses.size()); List classProbabilities = new ArrayList<>(topClasses.size()); + List classScores = new ArrayList<>(topClasses.size()); for (Map topClass : topClasses) { classNames.add(getFieldValue(topClass, "class_name")); classProbabilities.add(getFieldValue(topClass, "class_probability")); + classScores.add(getFieldValue(topClass, "class_score")); } // Assert that all the predicted class names come from the set of dependent variable values. classNames.forEach(className -> assertThat(className, is(in(dependentVariableValues)))); @@ -511,8 +593,8 @@ private static void assertTopClasses(Map resultsObject, assertThat(classNames.get(0), equalTo(resultsObject.get(dependentVariable + "_prediction"))); // Assert that all the class probabilities lie within [0, 1] interval. classProbabilities.forEach(p -> assertThat(p, allOf(greaterThanOrEqualTo(0.0), lessThanOrEqualTo(1.0)))); - // Assert that the top classes are listed in the order of decreasing probabilities. - assertThat(Ordering.natural().reverse().isOrdered(classProbabilities), is(true)); + // Assert that the top classes are listed in the order of decreasing scores. + assertThat(Ordering.natural().reverse().isOrdered(classScores), is(true)); } private void assertEvaluation(String dependentVariable, List dependentVariableValues, String predictedClassField) { @@ -582,8 +664,14 @@ private void assertMlResultsFieldMappings(String predictedClassField, String exp .mappings() .get(destIndex) .sourceAsMap(); - assertThat(getFieldValue(mappings, "properties", "ml", "properties", predictedClassField, "type"), equalTo(expectedType)); assertThat( + mappings.toString(), + getFieldValue( + mappings, + "properties", "ml", "properties", String.join(".properties.", predictedClassField.split("\\.")), "type"), + equalTo(expectedType)); + assertThat( + mappings.toString(), getFieldValue(mappings, "properties", "ml", "properties", "top_classes", "properties", "class_name", "type"), equalTo(expectedType)); } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InferenceIngestIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InferenceIngestIT.java index 27e83e04b412b..9320d393a4f66 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InferenceIngestIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InferenceIngestIT.java @@ -9,15 +9,19 @@ import org.elasticsearch.action.ingest.SimulateDocumentBaseResult; import org.elasticsearch.action.ingest.SimulatePipelineResponse; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; -import org.elasticsearch.xpack.core.ml.inference.InferenceToXContentCompressor; -import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelDefinitionDoc; +import org.elasticsearch.xpack.core.ml.action.DeleteTrainedModelAction; +import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction; +import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; +import org.junit.After; import org.junit.Before; import java.io.IOException; @@ -34,26 +38,14 @@ public class InferenceIngestIT extends MlNativeAutodetectIntegTestCase { @Before public void createBothModels() throws Exception { - assertThat(client().prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME) - .setId("test_classification") - .setSource(CLASSIFICATION_CONFIG, XContentType.JSON) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get().status(), equalTo(RestStatus.CREATED)); - assertThat(client().prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME) - .setId(TrainedModelDefinitionDoc.docId("test_classification", 0)) - .setSource(buildClassificationModelDoc(), XContentType.JSON) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get().status(), equalTo(RestStatus.CREATED)); - assertThat(client().prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME) - .setId("test_regression") - .setSource(REGRESSION_CONFIG, XContentType.JSON) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get().status(), equalTo(RestStatus.CREATED)); - assertThat(client().prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME) - .setId(TrainedModelDefinitionDoc.docId("test_regression", 0)) - .setSource(buildRegressionModelDoc(), XContentType.JSON) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get().status(), equalTo(RestStatus.CREATED)); + client().execute(PutTrainedModelAction.INSTANCE, new PutTrainedModelAction.Request(buildClassificationModel())).actionGet(); + client().execute(PutTrainedModelAction.INSTANCE, new PutTrainedModelAction.Request(buildRegressionModel())).actionGet(); + } + + @After + public void deleteBothModels() { + client().execute(DeleteTrainedModelAction.INSTANCE, new DeleteTrainedModelAction.Request("test_classification")).actionGet(); + client().execute(DeleteTrainedModelAction.INSTANCE, new DeleteTrainedModelAction.Request("test_regression")).actionGet(); } public void testPipelineCreationAndDeletion() throws Exception { @@ -391,6 +383,7 @@ private Map generateSourceDoc() { " \"input\":{\"field_names\":[\"col1\",\"col2\",\"col3\",\"col4\"]}," + " \"description\": \"test model for regression\",\n" + " \"version\": \"8.0.0\",\n" + + " \"definition\": " + REGRESSION_DEFINITION + ","+ " \"license_level\": \"platinum\",\n" + " \"created_by\": \"ml_test\",\n" + " \"estimated_heap_memory_usage_bytes\": 0," + @@ -477,7 +470,7 @@ private Map generateSourceDoc() { " },\n" + " {\n" + " \"node_index\": 2,\n" + - " \"leaf_value\": 2\n" + + " \"leaf_value\": 0\n" + " }\n" + " ],\n" + " \"target_type\": \"regression\"\n" + @@ -507,7 +500,7 @@ private Map generateSourceDoc() { " },\n" + " {\n" + " \"node_index\": 2,\n" + - " \"leaf_value\": 2\n" + + " \"leaf_value\": 0\n" + " }\n" + " ],\n" + " \"target_type\": \"regression\"\n" + @@ -518,28 +511,27 @@ private Map generateSourceDoc() { " }\n" + "}"; - private static String buildClassificationModelDoc() throws IOException { - String compressed = - InferenceToXContentCompressor.deflate(new BytesArray(CLASSIFICATION_DEFINITION.getBytes(StandardCharsets.UTF_8))); - return modelDocString(compressed, "test_classification"); + private TrainedModelConfig buildClassificationModel() throws IOException { + try (XContentParser parser = XContentHelper.createParser(xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + new BytesArray(CLASSIFICATION_CONFIG), + XContentType.JSON)) { + return TrainedModelConfig.LENIENT_PARSER.apply(parser, null).build(); + } } - private static String buildRegressionModelDoc() throws IOException { - String compressed = InferenceToXContentCompressor.deflate(new BytesArray(REGRESSION_DEFINITION.getBytes(StandardCharsets.UTF_8))); - return modelDocString(compressed, "test_regression"); + private TrainedModelConfig buildRegressionModel() throws IOException { + try (XContentParser parser = XContentHelper.createParser(xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + new BytesArray(REGRESSION_CONFIG), + XContentType.JSON)) { + return TrainedModelConfig.LENIENT_PARSER.apply(parser, null).build(); + } } - private static String modelDocString(String compressedDefinition, String modelId) { - return "" + - "{" + - "\"model_id\": \"" + modelId + "\",\n" + - "\"doc_num\": 0,\n" + - "\"doc_type\": \"trained_model_definition_doc\",\n" + - " \"compression_version\": " + 1 + ",\n" + - " \"total_definition_length\": " + compressedDefinition.length() + ",\n" + - " \"definition_length\": " + compressedDefinition.length() + ",\n" + - "\"definition\": \"" + compressedDefinition + "\"\n" + - "}"; + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); } private static final String CLASSIFICATION_CONFIG = "" + @@ -547,9 +539,10 @@ private static String modelDocString(String compressedDefinition, String modelId " \"model_id\": \"test_classification\",\n" + " \"input\":{\"field_names\":[\"col1\",\"col2\",\"col3\",\"col4\"]}," + " \"description\": \"test model for classification\",\n" + + " \"definition\": " + CLASSIFICATION_DEFINITION + ","+ " \"version\": \"8.0.0\",\n" + " \"license_level\": \"platinum\",\n" + - " \"created_by\": \"benwtrent\",\n" + + " \"created_by\": \"es_test\",\n" + " \"estimated_heap_memory_usage_bytes\": 0," + " \"estimated_operations\": 0," + " \"created_time\": 0\n" + diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java index 2d790260dac12..3315727df57c6 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.analyses.BoostedTreeParams; -import org.elasticsearch.xpack.core.ml.dataframe.analyses.BoostedTreeParamsTests; import org.elasticsearch.xpack.core.ml.dataframe.analyses.Regression; import org.junit.After; @@ -53,7 +52,14 @@ public void testSingleNumericFeatureAndMixedTrainingAndNonTrainingRows() throws initialize("regression_single_numeric_feature_and_mixed_data_set"); indexData(sourceIndex, 300, 50); - DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, new Regression(DEPENDENT_VARIABLE_FIELD)); + DataFrameAnalyticsConfig config = buildAnalytics(jobId, sourceIndex, destIndex, null, + new Regression( + DEPENDENT_VARIABLE_FIELD, + BoostedTreeParams.builder().setNumTopFeatureImportanceValues(1).build(), + null, + null, + null) + ); registerAnalytics(config); putAnalytics(config); @@ -78,6 +84,7 @@ public void testSingleNumericFeatureAndMixedTrainingAndNonTrainingRows() throws assertThat(resultsObject.containsKey("variable_prediction"), is(true)); assertThat(resultsObject.containsKey("is_training"), is(true)); assertThat(resultsObject.get("is_training"), is(destDoc.containsKey(DEPENDENT_VARIABLE_FIELD))); + assertThat(resultsObject.containsKey("feature_importance." + NUMERICAL_FEATURE_FIELD), is(true)); } assertProgress(jobId, 100, 100, 100, 100); @@ -141,7 +148,7 @@ public void testWithOnlyTrainingRowsAndTrainingPercentIsFifty() throws Exception sourceIndex, destIndex, null, - new Regression(DEPENDENT_VARIABLE_FIELD, BoostedTreeParamsTests.createRandom(), null, 50.0, null)); + new Regression(DEPENDENT_VARIABLE_FIELD, BoostedTreeParams.builder().build(), null, 50.0, null)); registerAnalytics(config); putAnalytics(config); @@ -244,7 +251,13 @@ public void testTwoJobsWithSameRandomizeSeedUseSameTrainingSet() throws Exceptio String firstJobId = "regression_two_jobs_with_same_randomize_seed_1"; String firstJobDestIndex = firstJobId + "_dest"; - BoostedTreeParams boostedTreeParams = new BoostedTreeParams(1.0, 1.0, 1.0, 1, 1.0); + BoostedTreeParams boostedTreeParams = BoostedTreeParams.builder() + .setLambda(1.0) + .setGamma(1.0) + .setEta(1.0) + .setFeatureBagFraction(1.0) + .setMaximumNumberTrees(1) + .build(); DataFrameAnalyticsConfig firstJob = buildAnalytics(firstJobId, sourceIndex, firstJobDestIndex, null, new Regression(DEPENDENT_VARIABLE_FIELD, boostedTreeParams, null, 50.0, null)); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/TrainedModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/TrainedModelIT.java index 72c677d1aa40d..0aec6bc337412 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/TrainedModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/TrainedModelIT.java @@ -6,10 +6,18 @@ package org.elasticsearch.xpack.ml.integration; import org.apache.http.util.EntityUtils; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.ml.inference.TrainedModelConfig; +import org.elasticsearch.client.ml.inference.TrainedModelDefinition; +import org.elasticsearch.client.ml.inference.TrainedModelInput; +import org.elasticsearch.client.ml.inference.trainedmodel.TargetType; +import org.elasticsearch.client.ml.inference.trainedmodel.TrainedModel; +import org.elasticsearch.client.ml.inference.trainedmodel.ensemble.Ensemble; +import org.elasticsearch.client.ml.inference.trainedmodel.ensemble.WeightedSum; +import org.elasticsearch.client.ml.inference.trainedmodel.tree.Tree; +import org.elasticsearch.client.ml.inference.trainedmodel.tree.TreeNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -18,26 +26,19 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.license.License; import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; -import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinition; -import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; -import org.elasticsearch.xpack.core.ml.inference.InferenceToXContentCompressor; import org.elasticsearch.xpack.core.ml.integration.MlRestTestStateCleaner; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModelTests; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelDefinitionDoc; import org.junit.After; import java.io.IOException; -import java.time.Instant; import java.util.Arrays; import java.util.Collections; +import java.util.List; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; @@ -62,22 +63,8 @@ protected boolean preserveTemplatesUponCompletion() { public void testGetTrainedModels() throws IOException { String modelId = "a_test_regression_model"; String modelId2 = "a_test_regression_model-2"; - Request model1 = new Request("PUT", - InferenceIndexConstants.LATEST_INDEX_NAME + "/_doc/" + modelId); - model1.setJsonEntity(buildRegressionModel(modelId)); - assertThat(client().performRequest(model1).getStatusLine().getStatusCode(), equalTo(201)); - - Request modelDefinition1 = new Request("PUT", - InferenceIndexConstants.LATEST_INDEX_NAME + "/_doc/" + TrainedModelDefinitionDoc.docId(modelId, 0)); - modelDefinition1.setJsonEntity(buildRegressionModelDefinitionDoc(modelId)); - assertThat(client().performRequest(modelDefinition1).getStatusLine().getStatusCode(), equalTo(201)); - - Request model2 = new Request("PUT", - InferenceIndexConstants.LATEST_INDEX_NAME + "/_doc/" + modelId2); - model2.setJsonEntity(buildRegressionModel(modelId2)); - assertThat(client().performRequest(model2).getStatusLine().getStatusCode(), equalTo(201)); - - adminClient().performRequest(new Request("POST", InferenceIndexConstants.LATEST_INDEX_NAME + "/_refresh")); + putRegressionModel(modelId); + putRegressionModel(modelId2); Response getModel = client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "inference/" + modelId)); @@ -164,17 +151,7 @@ public void testGetTrainedModels() throws IOException { public void testDeleteTrainedModels() throws IOException { String modelId = "test_delete_regression_model"; - Request model1 = new Request("PUT", - InferenceIndexConstants.LATEST_INDEX_NAME + "/_doc/" + modelId); - model1.setJsonEntity(buildRegressionModel(modelId)); - assertThat(client().performRequest(model1).getStatusLine().getStatusCode(), equalTo(201)); - - Request modelDefinition1 = new Request("PUT", - InferenceIndexConstants.LATEST_INDEX_NAME + "/_doc/" + TrainedModelDefinitionDoc.docId(modelId, 0)); - modelDefinition1.setJsonEntity(buildRegressionModelDefinitionDoc(modelId)); - assertThat(client().performRequest(modelDefinition1).getStatusLine().getStatusCode(), equalTo(201)); - - adminClient().performRequest(new Request("POST", InferenceIndexConstants.LATEST_INDEX_NAME + "/_refresh")); + putRegressionModel(modelId); Response delModel = client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "inference/" + modelId)); @@ -208,42 +185,68 @@ public void testGetPrePackagedModels() throws IOException { assertThat(response, containsString("\"definition\"")); } - private static String buildRegressionModel(String modelId) throws IOException { + private void putRegressionModel(String modelId) throws IOException { try(XContentBuilder builder = XContentFactory.jsonBuilder()) { + TrainedModelDefinition.Builder definition = new TrainedModelDefinition.Builder() + .setPreProcessors(Collections.emptyList()) + .setTrainedModel(buildRegression()); TrainedModelConfig.builder() + .setDefinition(definition) .setModelId(modelId) .setInput(new TrainedModelInput(Arrays.asList("col1", "col2", "col3"))) - .setCreatedBy("ml_test") - .setVersion(Version.CURRENT) - .setCreateTime(Instant.now()) - .setEstimatedOperations(0) - .setLicenseLevel(License.OperationMode.PLATINUM.description()) - .setEstimatedHeapMemory(0) - .build() - .toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true"))); - return XContentHelper.convertToJson(BytesReference.bytes(builder), false, XContentType.JSON); + .build().toXContent(builder, ToXContent.EMPTY_PARAMS); + Request model = new Request("PUT", "_ml/inference/" + modelId); + model.setJsonEntity(XContentHelper.convertToJson(BytesReference.bytes(builder), false, XContentType.JSON)); + assertThat(client().performRequest(model).getStatusLine().getStatusCode(), equalTo(200)); } } - private static String buildRegressionModelDefinitionDoc(String modelId) throws IOException { - try(XContentBuilder builder = XContentFactory.jsonBuilder()) { - TrainedModelDefinition definition = new TrainedModelDefinition.Builder() - .setPreProcessors(Collections.emptyList()) - .setTrainedModel(LocalModelTests.buildRegression()) - .build(); - String compressedString = InferenceToXContentCompressor.deflate(definition); - TrainedModelDefinitionDoc doc = new TrainedModelDefinitionDoc.Builder().setDocNum(0) - .setCompressedString(compressedString) - .setTotalDefinitionLength(compressedString.length()) - .setDefinitionLength(compressedString.length()) - .setCompressionVersion(1) - .setModelId(modelId).build(); - doc.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true"))); - return XContentHelper.convertToJson(BytesReference.bytes(builder), false, XContentType.JSON); - } + private static TrainedModel buildRegression() { + List featureNames = Arrays.asList("field.foo", "field.bar", "animal_cat", "animal_dog"); + Tree tree1 = Tree.builder() + .setFeatureNames(featureNames) + .setNodes(TreeNode.builder(0) + .setLeftChild(1) + .setRightChild(2) + .setSplitFeature(0) + .setThreshold(0.5), + TreeNode.builder(1).setLeafValue(0.3), + TreeNode.builder(2) + .setThreshold(0.0) + .setSplitFeature(3) + .setLeftChild(3) + .setRightChild(4), + TreeNode.builder(3).setLeafValue(0.1), + TreeNode.builder(4).setLeafValue(0.2)) + .build(); + Tree tree2 = Tree.builder() + .setFeatureNames(featureNames) + .setNodes(TreeNode.builder(0) + .setLeftChild(1) + .setRightChild(2) + .setSplitFeature(2) + .setThreshold(1.0), + TreeNode.builder(1).setLeafValue(1.5), + TreeNode.builder(2).setLeafValue(0.9)) + .build(); + Tree tree3 = Tree.builder() + .setFeatureNames(featureNames) + .setNodes(TreeNode.builder(0) + .setLeftChild(1) + .setRightChild(2) + .setSplitFeature(1) + .setThreshold(0.2), + TreeNode.builder(1).setLeafValue(1.5), + TreeNode.builder(2).setLeafValue(0.9)) + .build(); + return Ensemble.builder() + .setTargetType(TargetType.REGRESSION) + .setFeatureNames(featureNames) + .setTrainedModels(Arrays.asList(tree1, tree2, tree3)) + .setOutputAggregator(new WeightedSum(Arrays.asList(0.5, 0.5, 0.5))) + .build(); } - @After public void clearMlState() throws Exception { new MlRestTestStateCleaner(logger, adminClient()).clearMlMetadata(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 5216cf6a267fc..b933ea65caed7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -113,6 +113,7 @@ import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; import org.elasticsearch.xpack.core.ml.action.PutFilterAction; import org.elasticsearch.xpack.core.ml.action.PutJobAction; +import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction; import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; @@ -184,6 +185,7 @@ import org.elasticsearch.xpack.ml.action.TransportPutDatafeedAction; import org.elasticsearch.xpack.ml.action.TransportPutFilterAction; import org.elasticsearch.xpack.ml.action.TransportPutJobAction; +import org.elasticsearch.xpack.ml.action.TransportPutTrainedModelAction; import org.elasticsearch.xpack.ml.action.TransportRevertModelSnapshotAction; import org.elasticsearch.xpack.ml.action.TransportSetUpgradeModeAction; import org.elasticsearch.xpack.ml.action.TransportStartDataFrameAnalyticsAction; @@ -276,6 +278,7 @@ import org.elasticsearch.xpack.ml.rest.inference.RestDeleteTrainedModelAction; import org.elasticsearch.xpack.ml.rest.inference.RestGetTrainedModelsAction; import org.elasticsearch.xpack.ml.rest.inference.RestGetTrainedModelsStatsAction; +import org.elasticsearch.xpack.ml.rest.inference.RestPutTrainedModelAction; import org.elasticsearch.xpack.ml.rest.job.RestCloseJobAction; import org.elasticsearch.xpack.ml.rest.job.RestDeleteForecastAction; import org.elasticsearch.xpack.ml.rest.job.RestDeleteJobAction; @@ -761,7 +764,8 @@ public List getRestHandlers(Settings settings, RestController restC new RestExplainDataFrameAnalyticsAction(restController), new RestGetTrainedModelsAction(restController), new RestDeleteTrainedModelAction(restController), - new RestGetTrainedModelsStatsAction(restController) + new RestGetTrainedModelsStatsAction(restController), + new RestPutTrainedModelAction(restController) ); } @@ -837,6 +841,7 @@ public List getRestHandlers(Settings settings, RestController restC new ActionHandler<>(GetTrainedModelsAction.INSTANCE, TransportGetTrainedModelsAction.class), new ActionHandler<>(DeleteTrainedModelAction.INSTANCE, TransportDeleteTrainedModelAction.class), new ActionHandler<>(GetTrainedModelsStatsAction.INSTANCE, TransportGetTrainedModelsStatsAction.class), + new ActionHandler<>(PutTrainedModelAction.INSTANCE, TransportPutTrainedModelAction.class), usageAction, infoAction); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java index bc4bb6e16e53c..1ff0cf2208853 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.tasks.Task; @@ -46,7 +47,7 @@ public class TransportDeleteExpiredDataAction extends HandledTransportAction { + + private final TrainedModelProvider trainedModelProvider; + private final XPackLicenseState licenseState; + private final NamedXContentRegistry xContentRegistry; + private final Client client; + + @Inject + public TransportPutTrainedModelAction(TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, XPackLicenseState licenseState, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Client client, + TrainedModelProvider trainedModelProvider, NamedXContentRegistry xContentRegistry) { + super(PutTrainedModelAction.NAME, transportService, clusterService, threadPool, actionFilters, Request::new, + indexNameExpressionResolver); + this.licenseState = licenseState; + this.trainedModelProvider = trainedModelProvider; + this.xContentRegistry = xContentRegistry; + this.client = client; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response read(StreamInput in) throws IOException { + return new Response(in); + } + + @Override + protected void masterOperation(Task task, + PutTrainedModelAction.Request request, + ClusterState state, + ActionListener listener) { + try { + request.getTrainedModelConfig().ensureParsedDefinition(xContentRegistry); + request.getTrainedModelConfig().getModelDefinition().getTrainedModel().validate(); + } catch (IOException ex) { + listener.onFailure(ExceptionsHelper.badRequestException("Failed to parse definition for [{}]", + ex, + request.getTrainedModelConfig().getModelId())); + return; + } catch (ElasticsearchException ex) { + listener.onFailure(ExceptionsHelper.badRequestException("Definition for [{}] has validation failures.", + ex, + request.getTrainedModelConfig().getModelId())); + return; + } + + TrainedModelConfig trainedModelConfig = new TrainedModelConfig.Builder(request.getTrainedModelConfig()) + .setVersion(Version.CURRENT) + .setCreateTime(Instant.now()) + .setCreatedBy("api_user") + .setLicenseLevel(License.OperationMode.PLATINUM.description()) + .setEstimatedHeapMemory(request.getTrainedModelConfig().getModelDefinition().ramBytesUsed()) + .setEstimatedOperations(request.getTrainedModelConfig().getModelDefinition().getTrainedModel().estimatedNumOperations()) + .build(); + + ActionListener tagsModelIdCheckListener = ActionListener.wrap( + r -> trainedModelProvider.storeTrainedModel(trainedModelConfig, ActionListener.wrap( + storedConfig -> listener.onResponse(new PutTrainedModelAction.Response(trainedModelConfig)), + listener::onFailure + )), + listener::onFailure + ); + + ActionListener modelIdTagCheckListener = ActionListener.wrap( + r -> checkTagsAgainstModelIds(request.getTrainedModelConfig().getTags(), tagsModelIdCheckListener), + listener::onFailure + ); + + checkModelIdAgainstTags(request.getTrainedModelConfig().getModelId(), modelIdTagCheckListener); + } + + private void checkModelIdAgainstTags(String modelId, ActionListener listener) { + QueryBuilder builder = QueryBuilders.constantScoreQuery( + QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(TrainedModelConfig.TAGS.getPreferredName(), modelId))); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(builder).size(0).trackTotalHitsUpTo(1); + SearchRequest searchRequest = new SearchRequest(InferenceIndexConstants.INDEX_PATTERN).source(sourceBuilder); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), + ML_ORIGIN, + searchRequest, + ActionListener.wrap( + response -> { + if (response.getHits().getTotalHits().value > 0) { + listener.onFailure( + ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.INFERENCE_MODEL_ID_AND_TAGS_UNIQUE, modelId))); + return; + } + listener.onResponse(null); + }, + listener::onFailure + ), + client::search); + } + + private void checkTagsAgainstModelIds(List tags, ActionListener listener) { + if (tags.isEmpty()) { + listener.onResponse(null); + return; + } + + QueryBuilder builder = QueryBuilders.constantScoreQuery( + QueryBuilders.boolQuery() + .filter(QueryBuilders.termsQuery(TrainedModelConfig.MODEL_ID.getPreferredName(), tags))); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(builder).size(0).trackTotalHitsUpTo(1); + SearchRequest searchRequest = new SearchRequest(InferenceIndexConstants.INDEX_PATTERN).source(sourceBuilder); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), + ML_ORIGIN, + searchRequest, + ActionListener.wrap( + response -> { + if (response.getHits().getTotalHits().value > 0) { + listener.onFailure( + ExceptionsHelper.badRequestException(Messages.getMessage(Messages.INFERENCE_TAGS_AND_MODEL_IDS_UNIQUE, tags))); + return; + } + listener.onResponse(null); + }, + listener::onFailure + ), + client::search); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + if (licenseState.isMachineLearningAllowed()) { + super.doExecute(task, request, listener); + } else { + listener.onFailure(LicenseUtils.newComplianceException(XPackField.MACHINE_LEARNING)); + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java index 47ecb0ec2b6b8..ec3e192ab4ceb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndex.java @@ -24,6 +24,8 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSortConfig; +import org.elasticsearch.index.mapper.FieldAliasMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; @@ -38,6 +40,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; /** @@ -155,21 +158,36 @@ private static Integer findMaxSettingValue(GetSettingsResponse settingsResponse, return maxValue; } + @SuppressWarnings("unchecked") private static Map createAdditionalMappings(DataFrameAnalyticsConfig config, Map mappingsProperties) { Map properties = new HashMap<>(); - properties.put(ID_COPY, Map.of("type", "keyword")); + properties.put(ID_COPY, Map.of("type", KeywordFieldMapper.CONTENT_TYPE)); for (Map.Entry entry : config.getAnalysis().getExplicitlyMappedFields(config.getDest().getResultsField()).entrySet()) { String destFieldPath = entry.getKey(); String sourceFieldPath = entry.getValue(); - Object sourceFieldMapping = mappingsProperties.get(sourceFieldPath); - if (sourceFieldMapping != null) { + Object sourceFieldMapping = extractMapping(sourceFieldPath, mappingsProperties); + if (sourceFieldMapping instanceof Map) { + Map sourceFieldMappingAsMap = (Map) sourceFieldMapping; + // If the source field is an alias, fetch the concrete field that the alias points to. + if (FieldAliasMapper.CONTENT_TYPE.equals(sourceFieldMappingAsMap.get("type"))) { + String path = (String) sourceFieldMappingAsMap.get(FieldAliasMapper.Names.PATH); + sourceFieldMapping = extractMapping(path, mappingsProperties); + } + } + // We may have updated the value of {@code sourceFieldMapping} in the "if" block above. + // Hence, we need to check the "instanceof" condition again. + if (sourceFieldMapping instanceof Map) { properties.put(destFieldPath, sourceFieldMapping); } } return properties; } + private static Object extractMapping(String path, Map mappingsProperties) { + return extractValue(String.join("." + PROPERTIES + ".", path.split("\\.")), mappingsProperties); + } + private static Map createMetaData(String analyticsId, Clock clock) { Map metadata = new HashMap<>(); metadata.put(CREATION_DATE_MILLIS, clock.millis()); @@ -227,4 +245,3 @@ private static void checkResultsFieldIsNotPresentInProperties(DataFrameAnalytics } } } - diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java index 00fea87a05b20..5168c9296d2f9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinition; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsTask.ProgressTracker; import org.elasticsearch.xpack.ml.dataframe.process.results.AnalyticsResult; import org.elasticsearch.xpack.ml.dataframe.process.results.RowResults; @@ -173,7 +174,7 @@ private TrainedModelConfig createTrainedModelConfig(TrainedModelDefinition.Build .collect(toList()); return TrainedModelConfig.builder() .setModelId(modelId) - .setCreatedBy("data-frame-analytics") + .setCreatedBy(XPackUser.NAME) .setVersion(Version.CURRENT) .setCreateTime(createTime) .setTags(Collections.singletonList(analytics.getId())) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java index 993de87b07193..7ae8004f14a25 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java @@ -174,10 +174,12 @@ private void storeTrainedModelAndDefinition(TrainedModelConfig trainedModelConfi r -> { assert r.getItems().length == 2; if (r.getItems()[0].isFailed()) { + logger.error(new ParameterizedMessage( "[{}] failed to store trained model config for inference", trainedModelConfig.getModelId()), r.getItems()[0].getFailure().getCause()); + wrappedListener.onFailure(r.getItems()[0].getFailure().getCause()); return; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java index 53526e2a4753d..2598177ce7c10 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.ml.job.persistence; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -22,7 +22,7 @@ class BatchedBucketsIterator extends BatchedResultsIterator { - BatchedBucketsIterator(Client client, String jobId) { + BatchedBucketsIterator(OriginSettingClient client, String jobId) { super(client, jobId, Bucket.RESULT_TYPE_VALUE); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java index fe8bd3aaa3af7..35a88ed0f3e14 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.ml.job.persistence; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -21,7 +21,7 @@ import java.io.InputStream; class BatchedInfluencersIterator extends BatchedResultsIterator { - BatchedInfluencersIterator(Client client, String jobId) { + BatchedInfluencersIterator(OriginSettingClient client, String jobId) { super(client, jobId, Influencer.RESULT_TYPE_VALUE); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedJobsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedJobsIterator.java index 1b72c1901d9bb..f933769c9454f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedJobsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedJobsIterator.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.ml.job.persistence; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; @@ -23,7 +23,7 @@ public class BatchedJobsIterator extends BatchedDocumentsIterator { - public BatchedJobsIterator(Client client, String index) { + public BatchedJobsIterator(OriginSettingClient client, String index) { super(client, index); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java index 22c107f771ba5..989dd61c72d8b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.ml.job.persistence; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -22,7 +22,7 @@ class BatchedRecordsIterator extends BatchedResultsIterator { - BatchedRecordsIterator(Client client, String jobId) { + BatchedRecordsIterator(OriginSettingClient client, String jobId) { super(client, jobId, AnomalyRecord.RESULT_TYPE_VALUE); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedResultsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedResultsIterator.java index 1c0fdbe08c9c6..61ca1dcc2c8af 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedResultsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedResultsIterator.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.ml.job.persistence; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -16,7 +16,7 @@ public abstract class BatchedResultsIterator extends BatchedDocumentsIterator private final ResultsFilterBuilder filterBuilder; - public BatchedResultsIterator(Client client, String jobId, String resultType) { + public BatchedResultsIterator(OriginSettingClient client, String jobId, String resultType) { super(client, AnomalyDetectorsIndex.jobResultsAliasedName(jobId)); this.filterBuilder = new ResultsFilterBuilder(new TermsQueryBuilder(Result.RESULT_TYPE.getPreferredName(), resultType)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedStateDocIdsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedStateDocIdsIterator.java index 65e8b75671151..4c147f3431b28 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedStateDocIdsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedStateDocIdsIterator.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.ml.job.persistence; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; @@ -16,7 +16,7 @@ */ public class BatchedStateDocIdsIterator extends BatchedDocumentsIterator { - public BatchedStateDocIdsIterator(Client client, String index) { + public BatchedStateDocIdsIterator(OriginSettingClient client, String index) { super(client, index); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java index b9359d2b97cd6..38e3e037ab024 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java @@ -37,6 +37,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -130,7 +131,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.clientWithOrigin; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; public class JobResultsProvider { @@ -715,7 +715,7 @@ private void expandBuckets(String jobId, BucketsQueryBuilder query, QueryPage newBatchedBucketsIterator(String jobId) { - return new BatchedBucketsIterator(clientWithOrigin(client, ML_ORIGIN), jobId); + return new BatchedBucketsIterator(new OriginSettingClient(client, ML_ORIGIN), jobId); } /** @@ -727,7 +727,7 @@ public BatchedResultsIterator newBatchedBucketsIterator(String jobId) { * @return a record {@link BatchedResultsIterator} */ public BatchedResultsIterator newBatchedRecordsIterator(String jobId) { - return new BatchedRecordsIterator(clientWithOrigin(client, ML_ORIGIN), jobId); + return new BatchedRecordsIterator(new OriginSettingClient(client, ML_ORIGIN), jobId); } /** @@ -924,7 +924,7 @@ public void influencers(String jobId, InfluencersQuery query, Consumer newBatchedInfluencersIterator(String jobId) { - return new BatchedInfluencersIterator(clientWithOrigin(client, ML_ORIGIN), jobId); + return new BatchedInfluencersIterator(new OriginSettingClient(client, ML_ORIGIN), jobId); } /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java index 2650f3018d951..c6e3fe9dbf670 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.ml.job.retention; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -34,9 +34,9 @@ */ abstract class AbstractExpiredJobDataRemover implements MlDataRemover { - private final Client client; + private final OriginSettingClient client; - AbstractExpiredJobDataRemover(Client client) { + AbstractExpiredJobDataRemover(OriginSettingClient client) { this.client = client; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java index a80b00aaa0792..40611438fda59 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ThreadedActionListener; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; @@ -62,11 +62,11 @@ public class ExpiredForecastsRemover implements MlDataRemover { private static final int MAX_FORECASTS = 10000; private static final String RESULTS_INDEX_PATTERN = AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*"; - private final Client client; + private final OriginSettingClient client; private final ThreadPool threadPool; private final long cutoffEpochMs; - public ExpiredForecastsRemover(Client client, ThreadPool threadPool) { + public ExpiredForecastsRemover(OriginSettingClient client, ThreadPool threadPool) { this.client = Objects.requireNonNull(client); this.threadPool = Objects.requireNonNull(threadPool); this.cutoffEpochMs = Instant.now(Clock.systemDefaultZone()).toEpochMilli(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java index 1153407d5125e..221f9d9debf87 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java @@ -14,7 +14,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; @@ -55,10 +55,10 @@ public class ExpiredModelSnapshotsRemover extends AbstractExpiredJobDataRemover */ private static final int MODEL_SNAPSHOT_SEARCH_SIZE = 10000; - private final Client client; + private final OriginSettingClient client; private final ThreadPool threadPool; - public ExpiredModelSnapshotsRemover(Client client, ThreadPool threadPool) { + public ExpiredModelSnapshotsRemover(OriginSettingClient client, ThreadPool threadPool) { super(client); this.client = Objects.requireNonNull(client); this.threadPool = Objects.requireNonNull(threadPool); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java index 6a17382db0e8c..fff2c23ab75a6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java @@ -9,7 +9,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.reindex.AbstractBulkByScrollRequest; @@ -46,10 +46,10 @@ public class ExpiredResultsRemover extends AbstractExpiredJobDataRemover { private static final Logger LOGGER = LogManager.getLogger(ExpiredResultsRemover.class); - private final Client client; + private final OriginSettingClient client; private final AnomalyDetectionAuditor auditor; - public ExpiredResultsRemover(Client client, AnomalyDetectionAuditor auditor) { + public ExpiredResultsRemover(OriginSettingClient client, AnomalyDetectionAuditor auditor) { super(client); this.client = Objects.requireNonNull(client); this.auditor = Objects.requireNonNull(auditor); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java index 8a1d30382489f..cf1a9aaae4ede 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java @@ -9,7 +9,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.index.query.QueryBuilders; @@ -49,10 +49,10 @@ public class UnusedStateRemover implements MlDataRemover { private static final Logger LOGGER = LogManager.getLogger(UnusedStateRemover.class); - private final Client client; + private final OriginSettingClient client; private final ClusterService clusterService; - public UnusedStateRemover(Client client, ClusterService clusterService) { + public UnusedStateRemover(OriginSettingClient client, ClusterService clusterService) { this.client = Objects.requireNonNull(client); this.clusterService = Objects.requireNonNull(clusterService); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestPutTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestPutTrainedModelAction.java new file mode 100644 index 0000000000000..cb3f4e0eddee2 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestPutTrainedModelAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.rest.inference; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.io.IOException; + +public class RestPutTrainedModelAction extends BaseRestHandler { + + public RestPutTrainedModelAction(RestController controller) { + controller.registerHandler(RestRequest.Method.PUT, + MachineLearning.BASE_PATH + "inference/{" + TrainedModelConfig.MODEL_ID.getPreferredName() + "}", + this); + } + + @Override + public String getName() { + return "xpack_ml_put_trained_model_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String id = restRequest.param(TrainedModelConfig.MODEL_ID.getPreferredName()); + XContentParser parser = restRequest.contentParser(); + PutTrainedModelAction.Request putRequest = PutTrainedModelAction.Request.parseRequest(id, parser); + putRequest.timeout(restRequest.paramAsTime("timeout", putRequest.timeout())); + + return channel -> client.execute(PutTrainedModelAction.INSTANCE, putRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIterator.java index 119dcbdb42822..9b8c1345af1ea 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIterator.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -34,14 +34,14 @@ public abstract class BatchedDocumentsIterator { private static final String CONTEXT_ALIVE_DURATION = "5m"; private static final int BATCH_SIZE = 10000; - private final Client client; + private final OriginSettingClient client; private final String index; private volatile long count; private volatile long totalHits; private volatile String scrollId; private volatile boolean isScrollInitialised; - protected BatchedDocumentsIterator(Client client, String index) { + protected BatchedDocumentsIterator(OriginSettingClient client, String index) { this.client = Objects.requireNonNull(client); this.index = Objects.requireNonNull(index); this.totalHits = 0; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/DocIdBatchedDocumentIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/DocIdBatchedDocumentIterator.java index 55b2cee2ff16d..3dcee716f11af 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/DocIdBatchedDocumentIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/DocIdBatchedDocumentIterator.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.ml.utils.persistence; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.SearchHit; @@ -18,7 +18,7 @@ public class DocIdBatchedDocumentIterator extends BatchedDocumentsIterator testCreateDestinationIndex(DataFrameAnalysis analysi doAnswer(callListenerOnResponse(getSettingsResponse)) .when(client).execute(eq(GetSettingsAction.INSTANCE), getSettingsRequestCaptor.capture(), any()); - Map index1Mappings = + Map indexMappings = Map.of( "properties", - Map.of("field_1", "field_1_mappings", "field_2", "field_2_mappings", DEPENDENT_VARIABLE, Map.of("type", "integer"))); - MappingMetaData index1MappingMetaData = new MappingMetaData("_doc", index1Mappings); - - Map index2Mappings = - Map.of( - "properties", - Map.of("field_1", "field_1_mappings", "field_2", "field_2_mappings", DEPENDENT_VARIABLE, Map.of("type", "integer"))); - MappingMetaData index2MappingMetaData = new MappingMetaData("_doc", index2Mappings); + Map.of( + "field_1", "field_1_mappings", + "field_2", "field_2_mappings", + NUMERICAL_FIELD, Map.of("type", "integer"), + OUTER_FIELD, Map.of("properties", Map.of(INNER_FIELD, Map.of("type", "integer"))), + ALIAS_TO_NUMERICAL_FIELD, Map.of("type", "alias", "path", NUMERICAL_FIELD), + ALIAS_TO_NESTED_FIELD, Map.of("type", "alias", "path", "outer-field.inner-field"))); + MappingMetaData index1MappingMetaData = new MappingMetaData("_doc", indexMappings); + MappingMetaData index2MappingMetaData = new MappingMetaData("_doc", indexMappings); ImmutableOpenMap.Builder mappings = ImmutableOpenMap.builder(); mappings.put("index_1", index1MappingMetaData); @@ -143,7 +148,9 @@ private Map testCreateDestinationIndex(DataFrameAnalysis analysi config, ActionListener.wrap( response -> {}, - e -> fail(e.getMessage()))); + e -> fail(e.getMessage()) + ) + ); GetSettingsRequest capturedGetSettingsRequest = getSettingsRequestCaptor.getValue(); assertThat(capturedGetSettingsRequest.indices(), equalTo(SOURCE_INDEX)); @@ -166,6 +173,10 @@ private Map testCreateDestinationIndex(DataFrameAnalysis analysi assertThat(extractValue("_doc.properties.ml__id_copy.type", map), equalTo("keyword")); assertThat(extractValue("_doc.properties.field_1", map), equalTo("field_1_mappings")); assertThat(extractValue("_doc.properties.field_2", map), equalTo("field_2_mappings")); + assertThat(extractValue("_doc.properties.numerical-field.type", map), equalTo("integer")); + assertThat(extractValue("_doc.properties.outer-field.properties.inner-field.type", map), equalTo("integer")); + assertThat(extractValue("_doc.properties.alias-to-numerical-field.type", map), equalTo("alias")); + assertThat(extractValue("_doc.properties.alias-to-nested-field.type", map), equalTo("alias")); assertThat(extractValue("_doc._meta.analytics", map), equalTo(ANALYTICS_ID)); assertThat(extractValue("_doc._meta.creation_date_in_millis", map), equalTo(CURRENT_TIME_MILLIS)); assertThat(extractValue("_doc._meta.created_by", map), equalTo(CREATED_BY)); @@ -178,13 +189,31 @@ public void testCreateDestinationIndex_OutlierDetection() throws IOException { } public void testCreateDestinationIndex_Regression() throws IOException { - Map map = testCreateDestinationIndex(new Regression(DEPENDENT_VARIABLE)); - assertThat(extractValue("_doc.properties.ml.dep_var_prediction.type", map), equalTo("integer")); + Map map = testCreateDestinationIndex(new Regression(NUMERICAL_FIELD)); + assertThat(extractValue("_doc.properties.ml.numerical-field_prediction.type", map), equalTo("integer")); } public void testCreateDestinationIndex_Classification() throws IOException { - Map map = testCreateDestinationIndex(new Classification(DEPENDENT_VARIABLE)); - assertThat(extractValue("_doc.properties.ml.dep_var_prediction.type", map), equalTo("integer")); + Map map = testCreateDestinationIndex(new Classification(NUMERICAL_FIELD)); + assertThat(extractValue("_doc.properties.ml.numerical-field_prediction.type", map), equalTo("integer")); + assertThat(extractValue("_doc.properties.ml.top_classes.class_name.type", map), equalTo("integer")); + } + + public void testCreateDestinationIndex_Classification_DependentVariableIsNested() throws IOException { + Map map = testCreateDestinationIndex(new Classification(OUTER_FIELD + "." + INNER_FIELD)); + assertThat(extractValue("_doc.properties.ml.outer-field.inner-field_prediction.type", map), equalTo("integer")); + assertThat(extractValue("_doc.properties.ml.top_classes.class_name.type", map), equalTo("integer")); + } + + public void testCreateDestinationIndex_Classification_DependentVariableIsAlias() throws IOException { + Map map = testCreateDestinationIndex(new Classification(ALIAS_TO_NUMERICAL_FIELD)); + assertThat(extractValue("_doc.properties.ml.alias-to-numerical-field_prediction.type", map), equalTo("integer")); + assertThat(extractValue("_doc.properties.ml.top_classes.class_name.type", map), equalTo("integer")); + } + + public void testCreateDestinationIndex_Classification_DependentVariableIsAliasToNested() throws IOException { + Map map = testCreateDestinationIndex(new Classification(ALIAS_TO_NESTED_FIELD)); + assertThat(extractValue("_doc.properties.ml.alias-to-nested-field_prediction.type", map), equalTo("integer")); assertThat(extractValue("_doc.properties.ml.top_classes.class_name.type", map), equalTo("integer")); } @@ -213,10 +242,15 @@ public void testCreateDestinationIndex_ResultsFieldsExistsInSourceIndex() { ); } - private Map testUpdateMappingsToDestIndex(DataFrameAnalysis analysis, - Map properties) throws IOException { + private Map testUpdateMappingsToDestIndex(DataFrameAnalysis analysis) throws IOException { DataFrameAnalyticsConfig config = createConfig(analysis); + Map properties = Map.of( + NUMERICAL_FIELD, Map.of("type", "integer"), + OUTER_FIELD, Map.of("properties", Map.of(INNER_FIELD, Map.of("type", "integer"))), + ALIAS_TO_NUMERICAL_FIELD, Map.of("type", "alias", "path", NUMERICAL_FIELD), + ALIAS_TO_NESTED_FIELD, Map.of("type", "alias", "path", OUTER_FIELD + "." + INNER_FIELD) + ); ImmutableOpenMap.Builder mappings = ImmutableOpenMap.builder(); mappings.put("", new MappingMetaData("_doc", Map.of("properties", properties))); GetIndexResponse getIndexResponse = @@ -252,19 +286,35 @@ private Map testUpdateMappingsToDestIndex(DataFrameAnalysis anal } public void testUpdateMappingsToDestIndex_OutlierDetection() throws IOException { - testUpdateMappingsToDestIndex(new OutlierDetection.Builder().build(), Map.of(DEPENDENT_VARIABLE, Map.of("type", "integer"))); + testUpdateMappingsToDestIndex(new OutlierDetection.Builder().build()); } public void testUpdateMappingsToDestIndex_Regression() throws IOException { - Map map = - testUpdateMappingsToDestIndex(new Regression(DEPENDENT_VARIABLE), Map.of(DEPENDENT_VARIABLE, Map.of("type", "integer"))); - assertThat(extractValue("properties.ml.dep_var_prediction.type", map), equalTo("integer")); + Map map = testUpdateMappingsToDestIndex(new Regression(NUMERICAL_FIELD)); + assertThat(extractValue("properties.ml.numerical-field_prediction.type", map), equalTo("integer")); } public void testUpdateMappingsToDestIndex_Classification() throws IOException { - Map map = - testUpdateMappingsToDestIndex(new Classification(DEPENDENT_VARIABLE), Map.of(DEPENDENT_VARIABLE, Map.of("type", "integer"))); - assertThat(extractValue("properties.ml.dep_var_prediction.type", map), equalTo("integer")); + Map map = testUpdateMappingsToDestIndex(new Classification(NUMERICAL_FIELD)); + assertThat(extractValue("properties.ml.numerical-field_prediction.type", map), equalTo("integer")); + assertThat(extractValue("properties.ml.top_classes.class_name.type", map), equalTo("integer")); + } + + public void testUpdateMappingsToDestIndex_Classification_DependentVariableIsNested() throws IOException { + Map map = testUpdateMappingsToDestIndex(new Classification(OUTER_FIELD + "." + INNER_FIELD)); + assertThat(extractValue("properties.ml.outer-field.inner-field_prediction.type", map), equalTo("integer")); + assertThat(extractValue("properties.ml.top_classes.class_name.type", map), equalTo("integer")); + } + + public void testUpdateMappingsToDestIndex_Classification_DependentVariableIsAlias() throws IOException { + Map map = testUpdateMappingsToDestIndex(new Classification(ALIAS_TO_NUMERICAL_FIELD)); + assertThat(extractValue("properties.ml.alias-to-numerical-field_prediction.type", map), equalTo("integer")); + assertThat(extractValue("properties.ml.top_classes.class_name.type", map), equalTo("integer")); + } + + public void testUpdateMappingsToDestIndex_Classification_DependentVariableIsAliasToNested() throws IOException { + Map map = testUpdateMappingsToDestIndex(new Classification(ALIAS_TO_NESTED_FIELD)); + assertThat(extractValue("properties.ml.alias-to-nested-field_prediction.type", map), equalTo("integer")); assertThat(extractValue("properties.ml.top_classes.class_name.type", map), equalTo("integer")); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessorTests.java index 036023eb8c9aa..09969817374cd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessorTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinition; import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinitionTests; +import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsTask.ProgressTracker; import org.elasticsearch.xpack.ml.dataframe.process.results.AnalyticsResult; import org.elasticsearch.xpack.ml.dataframe.process.results.RowResults; @@ -167,7 +168,7 @@ public void testProcess_GivenInferenceModelIsStoredSuccessfully() { assertThat(storedModel.getLicenseLevel(), equalTo(License.OperationMode.PLATINUM)); assertThat(storedModel.getModelId(), containsString(JOB_ID)); assertThat(storedModel.getVersion(), equalTo(Version.CURRENT)); - assertThat(storedModel.getCreatedBy(), equalTo("data-frame-analytics")); + assertThat(storedModel.getCreatedBy(), equalTo(XPackUser.NAME)); assertThat(storedModel.getTags(), contains(JOB_ID)); assertThat(storedModel.getDescription(), equalTo(JOB_DESCRIPTION)); assertThat(storedModel.getModelDefinition(), equalTo(inferenceModel.build())); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModelTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModelTests.java index faa1717cc4fde..512baa7624a68 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModelTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModelTests.java @@ -56,7 +56,7 @@ public void testClassificationInfer() throws Exception { SingleValueInferenceResults result = getSingleValue(model, fields, new ClassificationConfig(0)); assertThat(result.value(), equalTo(0.0)); - assertThat(result.valueAsString(), is("0.0")); + assertThat(result.valueAsString(), is("0")); ClassificationInferenceResults classificationResult = (ClassificationInferenceResults)getSingleValue(model, fields, new ClassificationConfig(1)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index dcddb842d21b6..cd58db91466d2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -416,7 +416,7 @@ private void setupJobAndDatafeed(String jobId, String datafeedId, TimeValue data GetJobsStatsAction.Response statsResponse = client().execute(GetJobsStatsAction.INSTANCE, new GetJobsStatsAction.Request(job.getId())).actionGet(); assertEquals(JobState.OPENED, statsResponse.getResponse().results().get(0).getState()); - }); + }, 20, TimeUnit.SECONDS); StartDatafeedAction.Request startDatafeedRequest = new StartDatafeedAction.Request(config.getId(), 0L); client().execute(StartDatafeedAction.INSTANCE, startDatafeedRequest).get(); @@ -435,7 +435,7 @@ private void run(String jobId, CheckedRunnable disrupt) throws Except setupJobAndDatafeed(jobId, "data_feed_id", TimeValue.timeValueSeconds(1)); waitForDatafeed(jobId, numDocs1); - client().admin().indices().prepareSyncedFlush().get(); + client().admin().indices().prepareFlush().get(); disrupt.run(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java index bfd92beefddad..a687124066d5c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java @@ -199,7 +199,6 @@ private static TrainedModelConfig.Builder buildTrainedModelConfigBuilder(String return TrainedModelConfig.builder() .setCreatedBy("ml_test") .setParsedDefinition(TrainedModelDefinitionTests.createRandomBuilder()) - .setDescription("trained model config for test") .setModelId(modelId) .setVersion(Version.CURRENT) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockBatchedDocumentsIterator.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockBatchedDocumentsIterator.java index c0d15cab49c16..701599d958ae5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockBatchedDocumentsIterator.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockBatchedDocumentsIterator.java @@ -8,7 +8,9 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.job.results.Result; +import org.elasticsearch.xpack.ml.test.MockOriginSettingClient; import java.util.Deque; import java.util.List; @@ -25,7 +27,7 @@ public class MockBatchedDocumentsIterator extends BatchedResultsIterator { private Boolean requireIncludeInterim; public MockBatchedDocumentsIterator(List>> batches, String resultType) { - super(mock(Client.class), "foo", resultType); + super(MockOriginSettingClient.mockOriginSettingClient(mock(Client.class), ClientHelper.ML_ORIGIN), "foo", resultType); this.batches = batches; index = 0; wasTimeRangeCalled = false; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/ScoresUpdaterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/ScoresUpdaterTests.java index 9836cf93718e5..410c15e52c093 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/ScoresUpdaterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/ScoresUpdaterTests.java @@ -59,12 +59,12 @@ public class ScoresUpdaterTests extends ESTestCase { private Job job; private ScoresUpdater scoresUpdater; - private Bucket generateBucket(Date timestamp) throws IOException { + private Bucket generateBucket(Date timestamp) { return new Bucket(JOB_ID, timestamp, DEFAULT_BUCKET_SPAN); } @Before - public void setUpMocks() throws IOException { + public void setUpMocks() { MockitoAnnotations.initMocks(this); Job.Builder jobBuilder = new Job.Builder(JOB_ID); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java index c5a24fc9e0609..eb29ba06b17ca 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java @@ -6,10 +6,11 @@ package org.elasticsearch.xpack.ml.job.retention; import org.apache.lucene.search.TotalHits; -import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -17,8 +18,10 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobTests; +import org.elasticsearch.xpack.ml.test.MockOriginSettingClient; import org.junit.Before; import java.io.IOException; @@ -32,6 +35,7 @@ import static org.hamcrest.Matchers.is; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -44,7 +48,7 @@ private class ConcreteExpiredJobDataRemover extends AbstractExpiredJobDataRemove private int getRetentionDaysCallCount = 0; - ConcreteExpiredJobDataRemover(Client client) { + ConcreteExpiredJobDataRemover(OriginSettingClient client) { super(client); } @@ -61,17 +65,30 @@ protected void removeDataBefore(Job job, long cutoffEpochMs, ActionListener toXContents) throws IOException { return createSearchResponse(toXContents, toXContents.size()); } + @SuppressWarnings("unchecked") + static void givenJobs(Client client, List jobs) throws IOException { + SearchResponse response = AbstractExpiredJobDataRemoverTests.createSearchResponse(jobs); + + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(response); + return null; + }).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); + } + private static SearchResponse createSearchResponse(List toXContents, int totalHits) throws IOException { SearchHit[] hitsArray = new SearchHit[toXContents.size()]; for (int i = 0; i < toXContents.size(); i++) { @@ -88,14 +105,10 @@ private static SearchResponse createSearchResponse(List to public void testRemoveGivenNoJobs() throws IOException { SearchResponse response = createSearchResponse(Collections.emptyList()); - - @SuppressWarnings("unchecked") - ActionFuture future = mock(ActionFuture.class); - when(future.actionGet()).thenReturn(response); - when(client.search(any())).thenReturn(future); + mockSearchResponse(response); TestListener listener = new TestListener(); - ConcreteExpiredJobDataRemover remover = new ConcreteExpiredJobDataRemover(client); + ConcreteExpiredJobDataRemover remover = new ConcreteExpiredJobDataRemover(originSettingClient); remover.remove(listener, () -> false); listener.waitToCompletion(); @@ -103,6 +116,7 @@ public void testRemoveGivenNoJobs() throws IOException { assertEquals(0, remover.getRetentionDaysCallCount); } + @SuppressWarnings("unchecked") public void testRemoveGivenMultipleBatches() throws IOException { // This is testing AbstractExpiredJobDataRemover.WrappedBatchedJobsIterator int totalHits = 7; @@ -126,13 +140,14 @@ public void testRemoveGivenMultipleBatches() throws IOException { AtomicInteger searchCount = new AtomicInteger(0); - @SuppressWarnings("unchecked") - ActionFuture future = mock(ActionFuture.class); - doAnswer(invocationOnMock -> responses.get(searchCount.getAndIncrement())).when(future).actionGet(); - when(client.search(any())).thenReturn(future); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(responses.get(searchCount.getAndIncrement())); + return null; + }).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); TestListener listener = new TestListener(); - ConcreteExpiredJobDataRemover remover = new ConcreteExpiredJobDataRemover(client); + ConcreteExpiredJobDataRemover remover = new ConcreteExpiredJobDataRemover(originSettingClient); remover.remove(listener, () -> false); listener.waitToCompletion(); @@ -153,13 +168,10 @@ public void testRemoveGivenTimeOut() throws IOException { final int timeoutAfter = randomIntBetween(0, totalHits - 1); AtomicInteger attemptsLeft = new AtomicInteger(timeoutAfter); - @SuppressWarnings("unchecked") - ActionFuture future = mock(ActionFuture.class); - when(future.actionGet()).thenReturn(response); - when(client.search(any())).thenReturn(future); + mockSearchResponse(response); TestListener listener = new TestListener(); - ConcreteExpiredJobDataRemover remover = new ConcreteExpiredJobDataRemover(client); + ConcreteExpiredJobDataRemover remover = new ConcreteExpiredJobDataRemover(originSettingClient); remover.remove(listener, () -> (attemptsLeft.getAndDecrement() <= 0)); listener.waitToCompletion(); @@ -167,6 +179,15 @@ public void testRemoveGivenTimeOut() throws IOException { assertEquals(timeoutAfter, remover.getRetentionDaysCallCount); } + @SuppressWarnings("unchecked") + private void mockSearchResponse(SearchResponse searchResponse) { + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(searchResponse); + return null; + }).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); + } + static class TestListener implements ActionListener { boolean success; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java index 56c2333cae016..6e332bf148d17 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemoverTests.java @@ -5,24 +5,25 @@ */ package org.elasticsearch.xpack.ml.job.retention; -import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.mock.orig.Mockito; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.action.DeleteModelSnapshotAction; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobTests; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.test.MockOriginSettingClient; import org.junit.After; import org.junit.Before; import org.mockito.invocation.InvocationOnMock; @@ -33,21 +34,23 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.xpack.ml.job.retention.AbstractExpiredJobDataRemoverTests.TestListener; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; import static org.mockito.Matchers.same; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; public class ExpiredModelSnapshotsRemoverTests extends ESTestCase { private Client client; + private OriginSettingClient originSettingClient; private ThreadPool threadPool; private List capturedSearchRequests; private List capturedDeleteModelSnapshotRequests; @@ -59,7 +62,10 @@ public void setUpTests() { capturedSearchRequests = new ArrayList<>(); capturedDeleteModelSnapshotRequests = new ArrayList<>(); searchResponsesPerCall = new ArrayList<>(); + client = mock(Client.class); + originSettingClient = MockOriginSettingClient.mockOriginSettingClient(client, ClientHelper.ML_ORIGIN); + listener = new TestListener(); // Init thread pool @@ -76,8 +82,7 @@ public void shutdownThreadPool() { } public void testRemove_GivenJobsWithoutRetentionPolicy() throws IOException { - givenClientRequestsSucceed(); - givenJobs(Arrays.asList( + givenClientRequestsSucceed(Arrays.asList( JobTests.buildJobBuilder("foo").build(), JobTests.buildJobBuilder("bar").build() )); @@ -86,25 +91,22 @@ public void testRemove_GivenJobsWithoutRetentionPolicy() throws IOException { listener.waitToCompletion(); assertThat(listener.success, is(true)); - verify(client).search(any()); - Mockito.verifyNoMoreInteractions(client); + verify(client).execute(eq(SearchAction.INSTANCE), any(), any()); } public void testRemove_GivenJobWithoutActiveSnapshot() throws IOException { - givenClientRequestsSucceed(); - givenJobs(Collections.singletonList(JobTests.buildJobBuilder("foo").setModelSnapshotRetentionDays(7L).build())); + givenClientRequestsSucceed(Collections.singletonList(JobTests.buildJobBuilder("foo").setModelSnapshotRetentionDays(7L).build())); createExpiredModelSnapshotsRemover().remove(listener, () -> false); listener.waitToCompletion(); assertThat(listener.success, is(true)); - verify(client).search(any()); - Mockito.verifyNoMoreInteractions(client); + verify(client).execute(eq(SearchAction.INSTANCE), any(), any()); } public void testRemove_GivenJobsWithMixedRetentionPolicies() throws IOException { - givenClientRequestsSucceed(); - givenJobs(Arrays.asList( + givenClientRequestsSucceed( + Arrays.asList( JobTests.buildJobBuilder("none").build(), JobTests.buildJobBuilder("snapshots-1").setModelSnapshotRetentionDays(7L).setModelSnapshotId("active").build(), JobTests.buildJobBuilder("snapshots-2").setModelSnapshotRetentionDays(17L).setModelSnapshotId("active").build() @@ -140,8 +142,8 @@ public void testRemove_GivenJobsWithMixedRetentionPolicies() throws IOException } public void testRemove_GivenTimeout() throws IOException { - givenClientRequestsSucceed(); - givenJobs(Arrays.asList( + givenClientRequestsSucceed( + Arrays.asList( JobTests.buildJobBuilder("snapshots-1").setModelSnapshotRetentionDays(7L).setModelSnapshotId("active").build(), JobTests.buildJobBuilder("snapshots-2").setModelSnapshotRetentionDays(17L).setModelSnapshotId("active").build() )); @@ -162,8 +164,8 @@ public void testRemove_GivenTimeout() throws IOException { } public void testRemove_GivenClientSearchRequestsFail() throws IOException { - givenClientSearchRequestsFail(); - givenJobs(Arrays.asList( + givenClientSearchRequestsFail( + Arrays.asList( JobTests.buildJobBuilder("none").build(), JobTests.buildJobBuilder("snapshots-1").setModelSnapshotRetentionDays(7L).setModelSnapshotId("active").build(), JobTests.buildJobBuilder("snapshots-2").setModelSnapshotRetentionDays(17L).setModelSnapshotId("active").build() @@ -188,8 +190,8 @@ public void testRemove_GivenClientSearchRequestsFail() throws IOException { } public void testRemove_GivenClientDeleteSnapshotRequestsFail() throws IOException { - givenClientDeleteModelSnapshotRequestsFail(); - givenJobs(Arrays.asList( + givenClientDeleteModelSnapshotRequestsFail( + Arrays.asList( JobTests.buildJobBuilder("none").build(), JobTests.buildJobBuilder("snapshots-1").setModelSnapshotRetentionDays(7L).setModelSnapshotId("active").build(), JobTests.buildJobBuilder("snapshots-2").setModelSnapshotRetentionDays(17L).setModelSnapshotId("active").build() @@ -216,59 +218,47 @@ public void testRemove_GivenClientDeleteSnapshotRequestsFail() throws IOExceptio assertThat(deleteSnapshotRequest.getSnapshotId(), equalTo("snapshots-1_1")); } - @SuppressWarnings("unchecked") - private void givenJobs(List jobs) throws IOException { - SearchResponse response = AbstractExpiredJobDataRemoverTests.createSearchResponse(jobs); - - ActionFuture future = mock(ActionFuture.class); - when(future.actionGet()).thenReturn(response); - when(client.search(any())).thenReturn(future); - } - private ExpiredModelSnapshotsRemover createExpiredModelSnapshotsRemover() { - return new ExpiredModelSnapshotsRemover(client, threadPool); + return new ExpiredModelSnapshotsRemover(originSettingClient, threadPool); } private static ModelSnapshot createModelSnapshot(String jobId, String snapshotId) { return new ModelSnapshot.Builder(jobId).setSnapshotId(snapshotId).build(); } -// private static SearchResponse createSearchResponse(List modelSnapshots) throws IOException { -// SearchHit[] hitsArray = new SearchHit[modelSnapshots.size()]; -// for (int i = 0; i < modelSnapshots.size(); i++) { -// hitsArray[i] = new SearchHit(randomInt()); -// XContentBuilder jsonBuilder = JsonXContent.contentBuilder(); -// modelSnapshots.get(i).toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); -// hitsArray[i].sourceRef(BytesReference.bytes(jsonBuilder)); -// } -// SearchHits hits = new SearchHits(hitsArray, new TotalHits(hitsArray.length, TotalHits.Relation.EQUAL_TO), 1.0f); -// SearchResponse searchResponse = mock(SearchResponse.class); -// when(searchResponse.getHits()).thenReturn(hits); -// return searchResponse; -// } - - private void givenClientRequestsSucceed() { - givenClientRequests(true, true); + private void givenClientRequestsSucceed(List jobs) throws IOException { + givenClientRequests(jobs, true, true); } - private void givenClientSearchRequestsFail() { - givenClientRequests(false, true); + private void givenClientSearchRequestsFail(List jobs) throws IOException { + givenClientRequests(jobs, false, true); } - private void givenClientDeleteModelSnapshotRequestsFail() { - givenClientRequests(true, false); + private void givenClientDeleteModelSnapshotRequestsFail(List jobs) throws IOException { + givenClientRequests(jobs, true, false); } @SuppressWarnings("unchecked") - private void givenClientRequests(boolean shouldSearchRequestsSucceed, boolean shouldDeleteSnapshotRequestsSucceed) { + private void givenClientRequests(List jobs, + boolean shouldSearchRequestsSucceed, boolean shouldDeleteSnapshotRequestsSucceed) throws IOException { + SearchResponse response = AbstractExpiredJobDataRemoverTests.createSearchResponse(jobs); + doAnswer(new Answer() { int callCount = 0; + AtomicBoolean isJobQuery = new AtomicBoolean(true); @Override public Void answer(InvocationOnMock invocationOnMock) { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + + if (isJobQuery.get()) { + listener.onResponse(response); + isJobQuery.set(false); + return null; + } + SearchRequest searchRequest = (SearchRequest) invocationOnMock.getArguments()[1]; capturedSearchRequests.add(searchRequest); - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; if (shouldSearchRequestsSucceed) { listener.onResponse(searchResponsesPerCall.get(callCount++)); } else { @@ -277,6 +267,7 @@ public Void answer(InvocationOnMock invocationOnMock) { return null; } }).when(client).execute(same(SearchAction.INSTANCE), any(), any()); + doAnswer(new Answer() { @Override public Void answer(InvocationOnMock invocationOnMock) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java index f5acae02b4f87..b4c5a051fb8c1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java @@ -5,22 +5,19 @@ */ package org.elasticsearch.xpack.ml.job.retention; -import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.DeleteByQueryRequest; -import org.elasticsearch.mock.orig.Mockito; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.job.config.JobTests; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; +import org.elasticsearch.xpack.ml.test.MockOriginSettingClient; import org.junit.Before; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -34,6 +31,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; import static org.mockito.Matchers.same; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -43,6 +41,7 @@ public class ExpiredResultsRemoverTests extends ESTestCase { private Client client; + private OriginSettingClient originSettingClient; private List capturedDeleteByQueryRequests; private ActionListener listener; @@ -50,37 +49,26 @@ public class ExpiredResultsRemoverTests extends ESTestCase { @SuppressWarnings("unchecked") public void setUpTests() { capturedDeleteByQueryRequests = new ArrayList<>(); - client = mock(Client.class); - ThreadPool threadPool = mock(ThreadPool.class); - when(client.threadPool()).thenReturn(threadPool); - when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) throws Throwable { - capturedDeleteByQueryRequests.add((DeleteByQueryRequest) invocationOnMock.getArguments()[1]); - ActionListener listener = - (ActionListener) invocationOnMock.getArguments()[2]; - listener.onResponse(null); - return null; - } - }).when(client).execute(same(DeleteByQueryAction.INSTANCE), any(), any()); + + client = org.mockito.Mockito.mock(Client.class); + originSettingClient = MockOriginSettingClient.mockOriginSettingClient(client, ClientHelper.ML_ORIGIN); listener = mock(ActionListener.class); } public void testRemove_GivenNoJobs() throws IOException { givenClientRequestsSucceed(); - givenJobs(Collections.emptyList()); + AbstractExpiredJobDataRemoverTests.givenJobs(client, Collections.emptyList()); createExpiredResultsRemover().remove(listener, () -> false); + verify(client).execute(eq(SearchAction.INSTANCE), any(), any()); verify(listener).onResponse(true); - verify(client).search(any()); - Mockito.verifyNoMoreInteractions(client); } public void testRemove_GivenJobsWithoutRetentionPolicy() throws IOException { givenClientRequestsSucceed(); - givenJobs(Arrays.asList( + AbstractExpiredJobDataRemoverTests.givenJobs(client, + Arrays.asList( JobTests.buildJobBuilder("foo").build(), JobTests.buildJobBuilder("bar").build() )); @@ -88,13 +76,13 @@ public void testRemove_GivenJobsWithoutRetentionPolicy() throws IOException { createExpiredResultsRemover().remove(listener, () -> false); verify(listener).onResponse(true); - verify(client).search(any()); - Mockito.verifyNoMoreInteractions(client); + verify(client).execute(eq(SearchAction.INSTANCE), any(), any()); } public void testRemove_GivenJobsWithAndWithoutRetentionPolicy() throws Exception { givenClientRequestsSucceed(); - givenJobs(Arrays.asList( + AbstractExpiredJobDataRemoverTests.givenJobs(client, + Arrays.asList( JobTests.buildJobBuilder("none").build(), JobTests.buildJobBuilder("results-1").setResultsRetentionDays(10L).build(), JobTests.buildJobBuilder("results-2").setResultsRetentionDays(20L).build() @@ -112,7 +100,8 @@ public void testRemove_GivenJobsWithAndWithoutRetentionPolicy() throws Exception public void testRemove_GivenTimeout() throws Exception { givenClientRequestsSucceed(); - givenJobs(Arrays.asList( + AbstractExpiredJobDataRemoverTests.givenJobs(client, + Arrays.asList( JobTests.buildJobBuilder("results-1").setResultsRetentionDays(10L).build(), JobTests.buildJobBuilder("results-2").setResultsRetentionDays(20L).build() )); @@ -128,7 +117,8 @@ public void testRemove_GivenTimeout() throws Exception { public void testRemove_GivenClientRequestsFailed() throws IOException { givenClientRequestsFailed(); - givenJobs(Arrays.asList( + AbstractExpiredJobDataRemoverTests.givenJobs(client, + Arrays.asList( JobTests.buildJobBuilder("none").build(), JobTests.buildJobBuilder("results-1").setResultsRetentionDays(10L).build(), JobTests.buildJobBuilder("results-2").setResultsRetentionDays(20L).build() @@ -154,7 +144,7 @@ private void givenClientRequestsFailed() { private void givenClientRequests(boolean shouldSucceed) { doAnswer(new Answer() { @Override - public Void answer(InvocationOnMock invocationOnMock) throws Throwable { + public Void answer(InvocationOnMock invocationOnMock) { capturedDeleteByQueryRequests.add((DeleteByQueryRequest) invocationOnMock.getArguments()[1]); ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; @@ -170,16 +160,7 @@ public Void answer(InvocationOnMock invocationOnMock) throws Throwable { }).when(client).execute(same(DeleteByQueryAction.INSTANCE), any(), any()); } - @SuppressWarnings("unchecked") - private void givenJobs(List jobs) throws IOException { - SearchResponse response = AbstractExpiredJobDataRemoverTests.createSearchResponse(jobs); - - ActionFuture future = mock(ActionFuture.class); - when(future.actionGet()).thenReturn(response); - when(client.search(any())).thenReturn(future); - } - private ExpiredResultsRemover createExpiredResultsRemover() { - return new ExpiredResultsRemover(client, mock(AnomalyDetectionAuditor.class)); + return new ExpiredResultsRemover(originSettingClient, mock(AnomalyDetectionAuditor.class)); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/ProcessResultsParserTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/ProcessResultsParserTests.java index dff432ab938d9..77428c2304961 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/ProcessResultsParserTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/ProcessResultsParserTests.java @@ -41,7 +41,7 @@ public void testParse_GivenUnknownObject() throws IOException { XContentParseException e = expectThrows(XContentParseException.class, () -> parser.parseResults(inputStream).forEachRemaining(a -> { })); - assertEquals("[1:3] [test_result] unknown field [unknown], parser not found", e.getMessage()); + assertEquals("[1:3] [test_result] unknown field [unknown]", e.getMessage()); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/test/MockOriginSettingClient.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/test/MockOriginSettingClient.java new file mode 100644 index 0000000000000..b47245a40b93a --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/test/MockOriginSettingClient.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ml.test; + + +import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.threadpool.ThreadPool; +import org.mockito.Mockito; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * OriginSettingClient is a final class that cannot be mocked by mockito. + * The solution is to wrap a non-mocked OriginSettingClient around a + * mocked Client. All the mocking should take place on the client parameter. + */ +public class MockOriginSettingClient { + + /** + * Create a OriginSettingClient on a mocked client. + * + * @param client The mocked client + * @param origin Whatever + * @return A OriginSettingClient using a mocked client + */ + public static OriginSettingClient mockOriginSettingClient(Client client, String origin) { + + if (Mockito.mockingDetails(client).isMock() == false) { + throw new AssertionError("client should be a mock"); + } + ThreadContext tc = new ThreadContext(Settings.EMPTY); + + ThreadPool tp = mock(ThreadPool.class); + when(tp.getThreadContext()).thenReturn(tc); + + when(client.threadPool()).thenReturn(tp); + + return new OriginSettingClient(client, origin); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java index 381ff0612abe2..8373a75bfa117 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java @@ -6,12 +6,16 @@ package org.elasticsearch.xpack.ml.utils.persistence; import org.apache.lucene.search.TotalHits; -import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.search.ClearScrollRequestBuilder; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.ClearScrollAction; +import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -19,6 +23,8 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.ml.test.MockOriginSettingClient; import org.elasticsearch.xpack.ml.test.SearchHitBuilder; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -30,9 +36,13 @@ import java.util.Deque; import java.util.List; import java.util.NoSuchElementException; +import java.util.concurrent.atomic.AtomicInteger; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -42,6 +52,7 @@ public class BatchedDocumentsIteratorTests extends ESTestCase { private static final String SCROLL_ID = "someScrollId"; private Client client; + private OriginSettingClient originSettingClient; private boolean wasScrollCleared; private TestIterator testIterator; @@ -52,8 +63,9 @@ public class BatchedDocumentsIteratorTests extends ESTestCase { @Before public void setUpMocks() { client = Mockito.mock(Client.class); + originSettingClient = MockOriginSettingClient.mockOriginSettingClient(client, ClientHelper.ML_ORIGIN); wasScrollCleared = false; - testIterator = new TestIterator(client, INDEX_NAME); + testIterator = new TestIterator(originSettingClient, INDEX_NAME); givenClearScrollRequest(); } @@ -122,14 +134,14 @@ private String createJsonDoc(String value) { return "{\"foo\":\"" + value + "\"}"; } + @SuppressWarnings("unchecked") private void givenClearScrollRequest() { - ClearScrollRequestBuilder requestBuilder = mock(ClearScrollRequestBuilder.class); - when(client.prepareClearScroll()).thenReturn(requestBuilder); - when(requestBuilder.setScrollIds(Collections.singletonList(SCROLL_ID))).thenReturn(requestBuilder); - when(requestBuilder.get()).thenAnswer((invocation) -> { + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; wasScrollCleared = true; + listener.onResponse(mock(ClearScrollResponse.class)); return null; - }); + }).when(client).execute(eq(ClearScrollAction.INSTANCE), any(), any()); } private void assertSearchRequest() { @@ -156,6 +168,8 @@ private class ScrollResponsesMocker { private long totalHits = 0; private List responses = new ArrayList<>(); + private AtomicInteger responseIndex = new AtomicInteger(0); + ScrollResponsesMocker addBatch(String... hits) { totalHits += hits.length; batches.add(hits); @@ -173,33 +187,23 @@ void finishMock() { givenNextResponse(batches.get(i)); } if (responses.size() > 0) { - ActionFuture first = wrapResponse(responses.get(0)); - if (responses.size() > 1) { - List> rest = new ArrayList<>(); - for (int i = 1; i < responses.size(); ++i) { - rest.add(wrapResponse(responses.get(i))); - } - - when(client.searchScroll(searchScrollRequestCaptor.capture())).thenReturn( - first, rest.toArray(new ActionFuture[rest.size() - 1])); - } else { - when(client.searchScroll(searchScrollRequestCaptor.capture())).thenReturn(first); - } + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(responses.get(responseIndex.getAndIncrement())); + return null; + }).when(client).execute(eq(SearchScrollAction.INSTANCE), searchScrollRequestCaptor.capture(), any()); } } + @SuppressWarnings("unchecked") private void givenInitialResponse(String... hits) { SearchResponse searchResponse = createSearchResponseWithHits(hits); - ActionFuture future = wrapResponse(searchResponse); - when(future.actionGet()).thenReturn(searchResponse); - when(client.search(searchRequestCaptor.capture())).thenReturn(future); - } - @SuppressWarnings("unchecked") - private ActionFuture wrapResponse(SearchResponse searchResponse) { - ActionFuture future = mock(ActionFuture.class); - when(future.actionGet()).thenReturn(searchResponse); - return future; + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(searchResponse); + return null; + }).when(client).execute(eq(SearchAction.INSTANCE), searchRequestCaptor.capture(), any()); } private void givenNextResponse(String... hits) { @@ -224,7 +228,7 @@ private SearchHits createHits(String... values) { } private static class TestIterator extends BatchedDocumentsIterator { - TestIterator(Client client, String jobId) { + TestIterator(OriginSettingClient client, String jobId) { super(client, jobId); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index f7da3118c5e29..7dd46eb460cea 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -367,6 +367,7 @@ public void testToXContent() throws IOException { + "\"expiry_date\":\"2017-08-07T12:03:22.133Z\"," + "\"expiry_date_in_millis\":1502107402133," + "\"max_nodes\":2," + + "\"max_resource_units\":null," + "\"issued_to\":\"customer\"," + "\"issuer\":\"elasticsearch\"," + "\"start_date_in_millis\":-1" diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Literal.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Literal.java index 1813799aad19b..0064edc26740e 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Literal.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Literal.java @@ -9,7 +9,6 @@ import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; -import org.elasticsearch.xpack.ql.type.DataTypeConversion; import org.elasticsearch.xpack.ql.type.DataTypes; import java.util.Objects; @@ -19,9 +18,9 @@ */ public class Literal extends LeafExpression { - public static final Literal TRUE = Literal.of(Source.EMPTY, Boolean.TRUE); - public static final Literal FALSE = Literal.of(Source.EMPTY, Boolean.FALSE); - public static final Literal NULL = Literal.of(Source.EMPTY, null); + public static final Literal TRUE = new Literal(Source.EMPTY, Boolean.TRUE, DataType.BOOLEAN); + public static final Literal FALSE = new Literal(Source.EMPTY, Boolean.FALSE, DataType.BOOLEAN); + public static final Literal NULL = new Literal(Source.EMPTY, null, DataType.NULL); private final Object value; private final DataType dataType; @@ -29,7 +28,7 @@ public class Literal extends LeafExpression { public Literal(Source source, Object value, DataType dataType) { super(source); this.dataType = dataType; - this.value = DataTypeConversion.convert(value, dataType); + this.value = value; } @Override diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/function/UnresolvedFunction.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/function/UnresolvedFunction.java index 7f0ee174554c1..f2a3deb60da02 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/function/UnresolvedFunction.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/function/UnresolvedFunction.java @@ -195,7 +195,7 @@ public UnresolvedFunction preprocessStar(UnresolvedFunction uf) { // dedicated count optimization if (uf.name.toUpperCase(Locale.ROOT).equals("COUNT")) { return new UnresolvedFunction(uf.source(), uf.name(), uf.resolutionType, - singletonList(Literal.of(uf.arguments().get(0).source(), Integer.valueOf(1)))); + singletonList(new Literal(uf.arguments().get(0).source(), Integer.valueOf(1), DataType.INTEGER))); } return uf; } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/operator/comparison/In.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/operator/comparison/In.java index 435a3447f46cc..e2bf7a59030c4 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/operator/comparison/In.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/operator/comparison/In.java @@ -74,14 +74,13 @@ public Nullability nullable() { @Override public boolean foldable() { return Expressions.foldable(children()) || - (Expressions.foldable(list) && list().stream().allMatch(e -> e.dataType() == DataType.NULL)); + (Expressions.foldable(list) && list().stream().allMatch(Expressions::isNull)); } @Override public Boolean fold() { // Optimization for early return and Query folding to LocalExec - if (value.dataType() == DataType.NULL || - list.size() == 1 && list.get(0).dataType() == DataType.NULL) { + if (Expressions.isNull(value) || list.size() == 1 && Expressions.isNull(list.get(0))) { return null; } return InProcessor.apply(value.fold(), Foldables.valuesOf(list, value.dataType())); diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DataTypeConversion.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DataTypeConversion.java index cff0e13cfb846..e3f97fa7240e3 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DataTypeConversion.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DataTypeConversion.java @@ -86,14 +86,18 @@ public static DataType commonType(DataType left, DataType right) { // interval and dates if (left == DATE) { - if (right.isInterval()) { + if (right.isYearMonthInterval()) { return left; } + // promote + return DATETIME; } if (right == DATE) { - if (left.isInterval()) { + if (left.isYearMonthInterval()) { return right; } + // promote + return DATETIME; } if (left == TIME) { if (right == DATE) { diff --git a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/expression/LiteralTests.java b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/expression/LiteralTests.java index 01e0a2ba5f4f0..fca1ca0726a08 100644 --- a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/expression/LiteralTests.java +++ b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/expression/LiteralTests.java @@ -51,7 +51,8 @@ static class ValueAndCompatibleTypes { public static Literal randomLiteral() { ValueAndCompatibleTypes gen = randomFrom(GENERATORS); - return new Literal(SourceTests.randomSource(), gen.valueSupplier.get(), randomFrom(gen.validDataTypes)); + DataType dataType = randomFrom(gen.validDataTypes); + return new Literal(SourceTests.randomSource(), DataTypeConversion.convert(gen.valueSupplier.get(), dataType), dataType); } @Override diff --git a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/tree/NodeSubclassTests.java b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/tree/NodeSubclassTests.java index 836450cff5cf2..48fe6cdf22318 100644 --- a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/tree/NodeSubclassTests.java +++ b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/tree/NodeSubclassTests.java @@ -8,6 +8,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ql.expression.Expression; @@ -52,10 +53,14 @@ import java.util.Collection; import java.util.EnumSet; import java.util.HashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.Supplier; +import java.util.jar.JarEntry; +import java.util.jar.JarInputStream; import static java.util.Collections.emptyList; import static java.util.stream.Collectors.toList; @@ -603,60 +608,93 @@ public > T makeNode(Class nodeClass) throws Excep * Cache of subclasses. We use a cache because it significantly speeds up * the test. */ - private static final Map, List> subclassCache = new HashMap<>(); + private static final Map, Set> subclassCache = new HashMap<>(); + /** * Find all subclasses of a particular class. */ - public static List> subclassesOf(Class clazz) throws IOException { + public static Set> subclassesOf(Class clazz) throws IOException { @SuppressWarnings("unchecked") // The map is built this way - List> lookup = (List>) subclassCache.get(clazz); + Set> lookup = (Set>) subclassCache.get(clazz); if (lookup != null) { return lookup; } - List> results = new ArrayList<>(); + Set> results = new LinkedHashSet<>(); String[] paths = System.getProperty("java.class.path").split(System.getProperty("path.separator")); for (String path: paths) { Path root = PathUtils.get(path); int rootLength = root.toString().length() + 1; - Files.walkFileTree(root, new SimpleFileVisitor() { - - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - if (Files.isRegularFile(file) && file.getFileName().toString().endsWith(".class")) { - String className = file.toString(); - // Chop off the root and file extension - className = className.substring(rootLength, className.length() - ".class".length()); - // Go from "path" style to class style - className = className.replace(PathUtils.getDefaultFileSystem().getSeparator(), "."); - - // filter the class that are not interested - // (and IDE folders like eclipse) - if (!className.startsWith("org.elasticsearch.xpack.ql") && !className.startsWith("org.elasticsearch.xpack.sql")) { - return FileVisitResult.CONTINUE; - } - - Class c; - try { - c = Class.forName(className); - } catch (ClassNotFoundException e) { - throw new IOException("Couldn't find " + file, e); - } - if (false == Modifier.isAbstract(c.getModifiers()) - && false == c.isAnonymousClass() - && clazz.isAssignableFrom(c)) { - Class s = c.asSubclass(clazz); - results.add(s); + // load classes from jar files + // NIO FileSystem API is not used since it trips the SecurityManager + // https://bugs.openjdk.java.net/browse/JDK-8160798 + // so iterate the jar "by hand" + if (path.endsWith(".jar") && path.contains("x-pack-ql")) { + try (JarInputStream jar = jarStream(root)) { + JarEntry je = null; + while ((je = jar.getNextJarEntry()) != null) { + String name = je.getName(); + if (name.endsWith(".class")) { + String className = name.substring(0, name.length() - ".class".length()).replace("/", "."); + maybeLoadClass(clazz, className, root + "!/" + name, results); } } - return FileVisitResult.CONTINUE; } - }); + } + // for folders, just use the FileSystems API + else { + Files.walkFileTree(root, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().endsWith(".class")) { + String fileName = file.toString(); + // Chop off the root and file extension + String className = fileName.substring(rootLength, fileName.length() - ".class".length()); + // Go from "path" style to class style + className = className.replace(PathUtils.getDefaultFileSystem().getSeparator(), "."); + maybeLoadClass(clazz, className, fileName, results); + } + return FileVisitResult.CONTINUE; + } + }); + } } subclassCache.put(clazz, results); return results; } + @SuppressForbidden(reason = "test reads from jar") + private static JarInputStream jarStream(Path path) throws IOException { + return new JarInputStream(path.toUri().toURL().openStream()); + } + + /** + * Load classes from predefined packages (hack to limit the scope) and if they match the hierarchy, add them to the cache + */ + private static void maybeLoadClass(Class clazz, String className, String location, Set> results) + throws IOException { + + // filter the class that are not interested + // (and IDE folders like eclipse) + if (className.startsWith("org.elasticsearch.xpack.ql") == false && className.startsWith("org.elasticsearch.xpack.sql") == false) { + return; + } + + Class c; + try { + c = Class.forName(className); + } catch (ClassNotFoundException e) { + throw new IOException("Couldn't load " + location, e); + } + + if (false == Modifier.isAbstract(c.getModifiers()) + && false == c.isAnonymousClass() + && clazz.isAssignableFrom(c)) { + Class s = c.asSubclass(clazz); + results.add(s); + } + } + /** * The test class for some subclass of node or {@code null} * if there isn't such a class or it doesn't extend diff --git a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/type/DataTypeConversionTests.java b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/type/DataTypeConversionTests.java index 77c5f097131fa..1258e05432b1f 100644 --- a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/type/DataTypeConversionTests.java @@ -640,8 +640,8 @@ public void testCommonType() { assertEquals(DATETIME, commonType(randomInterval(), DATETIME)); assertEquals(DATETIME, commonType(DATE, TIME)); assertEquals(DATETIME, commonType(TIME, DATE)); - assertEquals(DATE, commonType(DATE, randomInterval())); - assertEquals(DATE, commonType(randomInterval(), DATE)); + assertEquals(DATE, commonType(DATE, INTERVAL_YEAR)); + assertEquals(DATETIME, commonType(DATE, INTERVAL_HOUR_TO_MINUTE)); assertEquals(TIME, commonType(TIME, randomInterval())); assertEquals(TIME, commonType(randomInterval(), TIME)); diff --git a/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScoreQuery.java b/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScoreQuery.java index 140e26c5e974c..9a8f4fbe38bd1 100644 --- a/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScoreQuery.java +++ b/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScoreQuery.java @@ -123,11 +123,14 @@ public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOExcepti @Override public Scorer get(long leadCost) throws IOException { final Scorer innerScorer = innerScorerSupplier.get(leadCost); - // short-circuit if scores will not need capping - innerScorer.advanceShallow(0); - if (innerScorer.getMaxScore(DocIdSetIterator.NO_MORE_DOCS) <= maxScore) { - return innerScorer; - } + // test scoreMode to avoid NPE - see https://github.com/elastic/elasticsearch/issues/51034 + if (scoreMode == ScoreMode.TOP_SCORES) { + // short-circuit if scores will not need capping + innerScorer.advanceShallow(0); + if (innerScorer.getMaxScore(DocIdSetIterator.NO_MORE_DOCS) <= maxScore) { + return innerScorer; + } + } return new CappedScorer(innerWeight, innerScorer, maxScore); } diff --git a/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java b/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java index 9f80e1c8ec91d..6aa69a2325a8f 100644 --- a/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java +++ b/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESIntegTestCase; @@ -141,6 +142,31 @@ public void testPinnedPromotions() throws Exception { } + /** + * Test scoring the entire set of documents, which uses a slightly different logic when creating scorers. + */ + public void testExhaustiveScoring() throws Exception { + assertAcked(prepareCreate("test") + .setMapping(jsonBuilder().startObject().startObject("_doc").startObject("properties") + .startObject("field1").field("analyzer", "whitespace").field("type", "text").endObject() + .startObject("field2").field("analyzer", "whitespace").field("type", "text").endObject() + .endObject().endObject().endObject()) + .setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 1))); + + client().prepareIndex("test").setId("1").setSource("field1", "foo").get(); + client().prepareIndex("test").setId("2").setSource("field1", "foo", "field2", "foo").get(); + + refresh(); + + QueryStringQueryBuilder organicQuery = QueryBuilders.queryStringQuery("foo"); + PinnedQueryBuilder pqb = new PinnedQueryBuilder(organicQuery, "2"); + SearchResponse searchResponse = client().prepareSearch().setQuery(pqb).setTrackTotalHits(true) + .setSearchType(DFS_QUERY_THEN_FETCH).get(); + + long numHits = searchResponse.getHits().getTotalHits().value; + assertThat(numHits, equalTo(2L)); + } + public void testExplain() throws Exception { assertAcked(prepareCreate("test").setMapping( jsonBuilder().startObject().startObject("_doc").startObject("properties").startObject("field1") diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 82dda5ffa3998..116f089d70b46 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -19,6 +19,11 @@ dependencyLicenses { mapping from: /bc.*/, to: 'bouncycastle' } +forbiddenPatterns { + exclude '**/*.p12' + exclude '**/*.jks' +} + if (BuildParams.inFipsJvm) { test.enabled = false testingConventions.enabled = false diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertGenUtils.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertGenUtils.java index bc2b27df58047..876fcfbf992fd 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertGenUtils.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertGenUtils.java @@ -157,6 +157,14 @@ private static X509Certificate generateSignedCertificate(X500Principal principal throw new IllegalArgumentException("the certificate must be valid for at least one day"); } final ZonedDateTime notAfter = notBefore.plusDays(days); + return generateSignedCertificate(principal, subjectAltNames, keyPair, caCert, caPrivKey, isCa, notBefore, notAfter, + signatureAlgorithm); + } + + public static X509Certificate generateSignedCertificate(X500Principal principal, GeneralNames subjectAltNames, KeyPair keyPair, + X509Certificate caCert, PrivateKey caPrivKey, boolean isCa, + ZonedDateTime notBefore, ZonedDateTime notAfter, String signatureAlgorithm) + throws NoSuchAlgorithmException, CertIOException, OperatorCreationException, CertificateException { final BigInteger serial = CertGenUtils.getSerial(); JcaX509ExtensionUtils extUtils = new JcaX509ExtensionUtils(); diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java index e3a0f4e7112c4..ad5cfd5e05b1d 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java @@ -142,6 +142,7 @@ public static void main(String[] args) throws Exception { subcommands.put("csr", new SigningRequestCommand()); subcommands.put("cert", new GenerateCertificateCommand()); subcommands.put("ca", new CertificateAuthorityCommand()); + subcommands.put("http", new HttpCertificateCommand()); } @@ -920,8 +921,8 @@ static Collection parseFile(Path file) throws Exception } } - private static PEMEncryptor getEncrypter(char[] password) { - return new JcePEMEncryptorBuilder("DES-EDE3-CBC").setProvider(BC_PROV).build(password); + static PEMEncryptor getEncrypter(char[] password) { + return new JcePEMEncryptorBuilder("AES-128-CBC").setProvider(BC_PROV).build(password); } private static T withPassword(String description, char[] password, Terminal terminal, @@ -1036,7 +1037,7 @@ private static PrivateKey readPrivateKey(Path path, char[] password, Terminal te } } - private static GeneralNames getSubjectAlternativeNamesValue(List ipAddresses, List dnsNames, List commonNames) { + static GeneralNames getSubjectAlternativeNamesValue(List ipAddresses, List dnsNames, List commonNames) { Set generalNameList = new HashSet<>(); for (String ip : ipAddresses) { generalNameList.add(new GeneralName(GeneralName.iPAddress, ip)); @@ -1056,7 +1057,7 @@ private static GeneralNames getSubjectAlternativeNamesValue(List ipAddre return new GeneralNames(generalNameList.toArray(new GeneralName[0])); } - private static boolean isAscii(char[] str) { + static boolean isAscii(char[] str) { return ASCII_ENCODER.canEncode(CharBuffer.wrap(str)); } diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java new file mode 100644 index 0000000000000..604482ffb6f77 --- /dev/null +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java @@ -0,0 +1,1201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.cli; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import org.bouncycastle.asn1.DERIA5String; +import org.bouncycastle.asn1.x509.GeneralNames; +import org.bouncycastle.cert.CertIOException; +import org.bouncycastle.openssl.jcajce.JcaMiscPEMGenerator; +import org.bouncycastle.openssl.jcajce.JcaPEMWriter; +import org.bouncycastle.operator.OperatorCreationException; +import org.bouncycastle.operator.OperatorException; +import org.bouncycastle.pkcs.PKCS10CertificationRequest; +import org.bouncycastle.util.io.pem.PemObjectGenerator; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.SuppressForbidden; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.ssl.CertParsingUtils; +import org.elasticsearch.xpack.core.ssl.PemUtils; + +import javax.security.auth.x500.X500Principal; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermission; +import java.security.GeneralSecurityException; +import java.security.Key; +import java.security.KeyPair; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.time.Period; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +import static org.elasticsearch.xpack.security.cli.CertGenUtils.generateSignedCertificate; + +/** + * This command is the "elasticsearch-certutil http" command. It provides a guided process for creating + * certificates or CSRs for the Rest (http/s) interface of Elasticsearch and configuring other stack products + * to trust this certificate. + */ +class HttpCertificateCommand extends EnvironmentAwareCommand { + + static final int DEFAULT_CERT_KEY_SIZE = 2048; + static final Period DEFAULT_CERT_VALIDITY = Period.ofYears(5); + + static final X500Principal DEFAULT_CA_NAME = new X500Principal("CN=Elasticsearch HTTP CA"); + static final int DEFAULT_CA_KEY_SIZE = DEFAULT_CERT_KEY_SIZE; + static final Period DEFAULT_CA_VALIDITY = DEFAULT_CERT_VALIDITY; + + private static final String ES_README_CSR = "es-readme-csr.txt"; + private static final String ES_YML_CSR = "es-sample-csr.yml"; + private static final String ES_README_P12 = "es-readme-p12.txt"; + private static final String ES_YML_P12 = "es-sample-p12.yml"; + private static final String CA_README_P12 = "ca-readme-p12.txt"; + private static final String KIBANA_README = "kibana-readme.txt"; + private static final String KIBANA_YML = "kibana-sample.yml"; + + /** + * Magic bytes for a non-empty PKCS#12 file + */ + private static final byte[] MAGIC_BYTES1_PKCS12 = new byte[] { (byte) 0x30, (byte) 0x82 }; + /** + * Magic bytes for an empty PKCS#12 file + */ + private static final byte[] MAGIC_BYTES2_PKCS12 = new byte[] { (byte) 0x30, (byte) 0x56 }; + /** + * Magic bytes for a JKS keystore + */ + private static final byte[] MAGIC_BYTES_JKS = new byte[] { (byte) 0xFE, (byte) 0xED }; + + enum FileType { + PKCS12, + JKS, + PEM_CERT, + PEM_KEY, + PEM_CERT_CHAIN, + UNRECOGNIZED; + } + + private class CertOptions { + final String name; + final X500Principal subject; + final List dnsNames; + final List ipNames; + final int keySize; + final Period validity; + + private CertOptions(String name, X500Principal subject, List dnsNames, List ipNames, int keySize, Period validity) { + this.name = name; + this.subject = subject; + this.dnsNames = dnsNames; + this.ipNames = ipNames; + this.keySize = keySize; + this.validity = validity; + } + } + + HttpCertificateCommand() { + super("generate a new certificate (or certificate request) for the Elasticsearch HTTP interface"); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + printHeader("Elasticsearch HTTP Certificate Utility", terminal); + + terminal.println("The 'http' command guides you through the process of generating certificates"); + terminal.println("for use on the HTTP (Rest) interface for Elasticsearch."); + terminal.println(""); + terminal.println("This tool will ask you a number of questions in order to generate the right"); + terminal.println("set of files for your needs."); + + final CertificateTool.CAInfo caInfo; + final Period validity; + final boolean csr = askCertSigningRequest(terminal); + if (csr) { + caInfo = null; + validity = null; + } else { + final boolean existingCa = askExistingCertificateAuthority(terminal); + if (existingCa) { + caInfo = findExistingCA(terminal, env); + } else { + caInfo = createNewCA(terminal); + } + terminal.println(Terminal.Verbosity.VERBOSE, "Using the following CA:"); + terminal.println(Terminal.Verbosity.VERBOSE, "\tSubject: " + caInfo.certAndKey.cert.getSubjectX500Principal()); + terminal.println(Terminal.Verbosity.VERBOSE, "\tIssuer: " + caInfo.certAndKey.cert.getIssuerX500Principal()); + terminal.println(Terminal.Verbosity.VERBOSE, "\tSerial: " + caInfo.certAndKey.cert.getSerialNumber()); + terminal.println(Terminal.Verbosity.VERBOSE, "\tExpiry: " + caInfo.certAndKey.cert.getNotAfter()); + terminal.println(Terminal.Verbosity.VERBOSE, "\tSignature Algorithm: " + caInfo.certAndKey.cert.getSigAlgName()); + + validity = getCertificateValidityPeriod(terminal); + } + + final boolean multipleCertificates = askMultipleCertificates(terminal); + final List certificates = new ArrayList<>(); + + String nodeDescription = multipleCertificates ? "node #1" : "your nodes"; + while (true) { + final CertOptions cert = getCertificateConfiguration(terminal, multipleCertificates, nodeDescription, validity, csr); + terminal.println(Terminal.Verbosity.VERBOSE, "Generating the following " + (csr ? "CSR" : "Certificate") + ":"); + terminal.println(Terminal.Verbosity.VERBOSE, "\tName: " + cert.name); + terminal.println(Terminal.Verbosity.VERBOSE, "\tSubject: " + cert.subject); + terminal.println(Terminal.Verbosity.VERBOSE, "\tDNS Names: " + Strings.collectionToCommaDelimitedString(cert.dnsNames)); + terminal.println(Terminal.Verbosity.VERBOSE, "\tIP Names: " + Strings.collectionToCommaDelimitedString(cert.ipNames)); + terminal.println(Terminal.Verbosity.VERBOSE, "\tKey Size: " + cert.keySize); + terminal.println(Terminal.Verbosity.VERBOSE, "\tValidity: " + toString(cert.validity)); + certificates.add(cert); + + if (multipleCertificates && terminal.promptYesNo("Generate additional certificates?", true)) { + nodeDescription = "node #" + (certificates.size() + 1); + } else { + break; + } + } + + printHeader("What password do you want for your private key(s)?", terminal); + char[] password; + if (csr) { + terminal.println("Your private key(s) will be stored as a PEM formatted file."); + terminal.println("We recommend that you protect your private keys with a password"); + terminal.println(""); + terminal.println("If you do not wish to use a password, simply press at the prompt below."); + password = readPassword(terminal, "Provide a password for the private key: ", true); + } else { + terminal.println("Your private key(s) will be stored in a PKCS#12 keystore file named \"http.p12\"."); + terminal.println("This type of keystore is always password protected, but it is possible to use a"); + terminal.println("blank password."); + terminal.println(""); + terminal.println("If you wish to use a blank password, simply press at the prompt below."); + password = readPassword(terminal, "Provide a password for the \"http.p12\" file: ", true); + } + + printHeader("Where should we save the generated files?", terminal); + if (csr) { + terminal.println("A number of files will be generated including your private key(s),"); + terminal.println("certificate request(s), and sample configuration options for Elastic Stack products."); + } else { + terminal.println("A number of files will be generated including your private key(s),"); + terminal.println("public certificate(s), and sample configuration options for Elastic Stack products."); + } + terminal.println(""); + terminal.println("These files will be included in a single zip archive."); + terminal.println(""); + Path output = resolvePath("elasticsearch-ssl-http.zip"); + output = tryReadInput(terminal, "What filename should be used for the output zip file?", output, this::resolvePath); + + writeZip(output, password, caInfo, certificates, env); + terminal.println(""); + terminal.println("Zip file written to " + output); + } + + /** + * Resolve a filename as a Path (suppressing forbidden APIs). + * Protected so tests can map String path-names to real path objects + */ + @SuppressForbidden(reason = "CLI tool resolves files against working directory") + protected Path resolvePath(String name) { + return PathUtils.get(name).normalize().toAbsolutePath(); + } + + private void writeZip(Path file, char[] password, CertificateTool.CAInfo caInfo, List certificates, + Environment env) throws UserException { + if (Files.exists(file)) { + throw new UserException(ExitCodes.IO_ERROR, "Output file '" + file + "' already exists"); + } + + boolean success = false; + try { + try (OutputStream fileStream = Files.newOutputStream(file, StandardOpenOption.CREATE_NEW); + ZipOutputStream zipStream = new ZipOutputStream(fileStream, StandardCharsets.UTF_8)) { + + createZipDirectory(zipStream, "elasticsearch"); + if (certificates.size() == 1) { + writeCertificateAndKeyDetails(zipStream, "elasticsearch", certificates.get(0), caInfo, password, env); + } else { + for (CertOptions cert : certificates) { + final String dirName = "elasticsearch/" + cert.name; + createZipDirectory(zipStream, dirName); + writeCertificateAndKeyDetails(zipStream, dirName, cert, caInfo, password, env); + } + } + + if (caInfo != null && caInfo.generated) { + createZipDirectory(zipStream, "ca"); + writeCertificateAuthority(zipStream, "ca", caInfo, env); + } + + createZipDirectory(zipStream, "kibana"); + writeKibanaInfo(zipStream, "kibana", caInfo, env); + + /* TODO + createZipDirectory(zipStream, "beats"); + writeBeatsInfo(zipStream, "beats", caInfo); + + createZipDirectory(zipStream, "logstash"); + writeLogstashInfo(zipStream, "logstash", caInfo); + + createZipDirectory(zipStream, "lang-clients"); + writeLangClientInfo(zipStream, "lang-clients", caInfo); + + createZipDirectory(zipStream, "other"); + writeMiscellaneousInfo(zipStream, "other", caInfo); + */ + + // set permissions to 600 + PosixFileAttributeView view = Files.getFileAttributeView(file, PosixFileAttributeView.class); + if (view != null) { + view.setPermissions(Sets.newHashSet(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE)); + } + + success = true; + } finally { + if (success == false) { + Files.deleteIfExists(file); + } + } + } catch (IOException e) { + throw new ElasticsearchException("Failed to write ZIP file '" + file + "'", e); + } + } + + private void createZipDirectory(ZipOutputStream zip, String name) throws IOException { + ZipEntry entry = new ZipEntry(name + "/"); + assert entry.isDirectory(); + zip.putNextEntry(entry); + } + + private void writeCertificateAndKeyDetails(ZipOutputStream zip, String dirName, CertOptions cert, CertificateTool.CAInfo ca, + char[] password, Environment env) { + // TODO : Should we add support for configuring PKI in ES? + try { + final KeyPair keyPair = CertGenUtils.generateKeyPair(cert.keySize); + final GeneralNames sanList = CertificateTool.getSubjectAlternativeNamesValue(cert.ipNames, cert.dnsNames, List.of()); + final boolean hasPassword = password != null && password.length > 0; + // TODO Add info to the READMEs so that the user could regenerate these certs if needed. + // (i.e. show them the certutil cert command that they would need). + if (ca == null) { + // No local CA, generate a CSR instead + final PKCS10CertificationRequest csr = CertGenUtils.generateCSR(keyPair, cert.subject, sanList); + final String csrFile = "http-" + cert.name + ".csr"; + final String keyFile = "http-" + cert.name + ".key"; + final String certName = "http-" + cert.name + ".crt"; + final String ymlFile = "sample-elasticsearch.yml"; + final Map substitutions = buildSubstitutions(env, Map.ofEntries( + Map.entry("CSR", csrFile), + Map.entry("KEY", keyFile), + Map.entry("CERT", certName), + Map.entry("YML", ymlFile), + Map.entry("PASSWORD", hasPassword ? "*" : ""))); + writeTextFile(zip, dirName + "/README.txt", ES_README_CSR, substitutions); + writePemEntry(zip, dirName + "/" + csrFile, new JcaMiscPEMGenerator(csr)); + writePemEntry(zip, dirName + "/" + keyFile, generator(keyPair.getPrivate(), password)); + writeTextFile(zip, dirName + "/" + ymlFile, ES_YML_CSR, substitutions); + } else { + final ZonedDateTime notBefore = ZonedDateTime.now(ZoneOffset.UTC); + final ZonedDateTime notAfter = notBefore.plus(cert.validity); + Certificate certificate = CertGenUtils.generateSignedCertificate(cert.subject, sanList, keyPair, ca.certAndKey.cert, + ca.certAndKey.key, false, notBefore, notAfter, null); + + final String p12Name = "http.p12"; + final String ymlFile = "sample-elasticsearch.yml"; + final Map substitutions = buildSubstitutions(env, Map.ofEntries( + Map.entry("P12", p12Name), + Map.entry("YML", ymlFile), + Map.entry("PASSWORD", hasPassword ? "*" : ""))); + writeTextFile(zip, dirName + "/README.txt", ES_README_P12, substitutions); + writeKeyStore(zip, dirName + "/" + p12Name, certificate, keyPair.getPrivate(), password, ca.certAndKey.cert); + writeTextFile(zip, dirName + "/" + ymlFile, ES_YML_P12, substitutions); + } + } catch (OperatorException | IOException | GeneralSecurityException e) { + throw new ElasticsearchException("Failed to write certificate to ZIP file", e); + } + } + + private void writeCertificateAuthority(ZipOutputStream zip, String dirName, CertificateTool.CAInfo ca, Environment env) { + assert ca != null; + assert ca.generated; + + try { + writeTextFile(zip, dirName + "/README.txt", CA_README_P12, + buildSubstitutions(env, Map.of( + "P12", "ca.p12", + "DN", ca.certAndKey.cert.getSubjectX500Principal().getName(), + "PASSWORD", ca.password == null || ca.password.length == 0 ? "" : "*" + ))); + final KeyStore pkcs12 = KeyStore.getInstance("PKCS12"); + pkcs12.load(null); + pkcs12.setKeyEntry("ca", ca.certAndKey.key, ca.password, new Certificate[] { ca.certAndKey.cert }); + try (ZipEntryStream entry = new ZipEntryStream(zip, dirName + "/ca.p12")) { + pkcs12.store(entry, ca.password); + } + } catch (KeyStoreException | IOException | CertificateException | NoSuchAlgorithmException e) { + throw new ElasticsearchException("Failed to write CA to ZIP file", e); + } + } + + private void writeKibanaInfo(ZipOutputStream zip, String dirName, CertificateTool.CAInfo ca, Environment env) { + final String caCertName = "elasticsearch-ca.pem"; + final String caCert = ca == null ? "" : caCertName; + final String ymlFile = "sample-kibana.yml"; + + final Map substitutions = buildSubstitutions(env, Map.ofEntries( + Map.entry("CA_CERT_NAME", caCertName), + Map.entry("CA_CERT", caCert), + Map.entry("YML", ymlFile) + )); + + // TODO : Should we add support for client certs from Kibana to ES? + + try { + writeTextFile(zip, dirName + "/README.txt", KIBANA_README, substitutions); + if (ca != null) { + writePemEntry(zip, dirName + "/" + caCert, new JcaMiscPEMGenerator(ca.certAndKey.cert)); + } + writeTextFile(zip, dirName + "/" + ymlFile, KIBANA_YML, substitutions); + } catch (IOException e) { + throw new ElasticsearchException("Failed to write Kibana details ZIP file", e); + } + } + + /** + * Loads {@code resource} from the classpath, performs variable substitution on it, and then writes it to {@code writer}. + */ + private void writeTextFile(ZipOutputStream zip, String outputName, String resource, Map substitutions) { + try (InputStream stream = getClass().getResourceAsStream("certutil-http/" + resource); + ZipEntryStream entry = new ZipEntryStream(zip, outputName); + PrintWriter writer = new PrintWriter(entry, false, StandardCharsets.UTF_8)) { + if (stream == null) { + throw new IllegalStateException("Cannot find internal resource " + resource); + } + copyWithSubstitutions(stream, writer, substitutions); + writer.flush(); + } catch (IOException e) { + throw new UncheckedIOException("Cannot add resource " + resource + " to zip file", e); + } + } + + /** + * Copies the input stream to the writer, while performing variable substitutions. + * The variable substitution processing supports 2 constructs + *
      + *
    1. + * For each key in @{code substitutions}, any sequence of ${key} in the input is replaced with the + * substitution value. + *
    2. + *
    3. + * Any line in the input that has the form #if KEY causes the following block to be output + * if-only-if KEY exists with a non-empty value in {@code substitutions}. + * A block is terminated with {@code #endif}. Lines with {@code #else} are also supported. Nested blocks are not supported. + *
    4. + *
    + */ + static void copyWithSubstitutions(InputStream stream, PrintWriter writer, Map substitutions) throws IOException { + boolean skip = false; + for (String line : Streams.readAllLines(stream)) { + for (Map.Entry subst : substitutions.entrySet()) { + line = line.replace("${" + subst.getKey() + "}", subst.getValue()); + } + if (line.startsWith("#if ")) { + final String key = line.substring(4).trim(); + skip = Strings.isNullOrEmpty(substitutions.get(key)); + continue; + } else if (line.equals("#else")) { + skip = !skip; + continue; + } else if (line.equals("#endif")) { + skip = false; + continue; + } else if (skip) { + continue; + } + writer.println(line); + } + } + + private Map buildSubstitutions(Environment env, Map entries) { + final Map map = new HashMap<>(entries.size() + 4); + ZonedDateTime now = ZonedDateTime.now().withNano(0); + map.put("DATE", now.format(DateTimeFormatter.ISO_LOCAL_DATE)); + map.put("TIME", now.format(DateTimeFormatter.ISO_OFFSET_TIME)); + map.put("VERSION", Version.CURRENT.toString()); + map.put("CONF_DIR", env.configFile().toAbsolutePath().toString()); + map.putAll(entries); + return map; + } + + private void writeKeyStore(ZipOutputStream zip, String name, Certificate certificate, PrivateKey key, char[] password, + X509Certificate caCert) throws IOException, GeneralSecurityException { + final KeyStore pkcs12 = KeyStore.getInstance("PKCS12"); + pkcs12.load(null); + pkcs12.setKeyEntry("http", key, password, new Certificate[] { certificate }); + if (caCert != null) { + pkcs12.setCertificateEntry("ca", caCert); + } + try (ZipEntryStream entry = new ZipEntryStream(zip, name)) { + pkcs12.store(entry, password); + } + } + + private void writePemEntry(ZipOutputStream zip, String name, PemObjectGenerator generator) throws IOException { + try (ZipEntryStream entry = new ZipEntryStream(zip, name); + JcaPEMWriter pem = new JcaPEMWriter(new OutputStreamWriter(entry, StandardCharsets.UTF_8))) { + pem.writeObject(generator); + pem.flush(); + } + } + + private JcaMiscPEMGenerator generator(PrivateKey privateKey, char[] password) throws IOException { + if (password == null || password.length == 0) { + return new JcaMiscPEMGenerator(privateKey); + } + return new JcaMiscPEMGenerator(privateKey, CertificateTool.getEncrypter(password)); + } + + private Period getCertificateValidityPeriod(Terminal terminal) { + printHeader("How long should your certificates be valid?", terminal); + terminal.println("Every certificate has an expiry date. When the expiry date is reached clients"); + terminal.println("will stop trusting your certificate and TLS connections will fail."); + terminal.println(""); + terminal.println("Best practice suggests that you should either:"); + terminal.println("(a) set this to a short duration (90 - 120 days) and have automatic processes"); + terminal.println("to generate a new certificate before the old one expires, or"); + terminal.println("(b) set it to a longer duration (3 - 5 years) and then perform a manual update"); + terminal.println("a few months before it expires."); + terminal.println(""); + terminal.println("You may enter the validity period in years (e.g. 3Y), months (e.g. 18M), or days (e.g. 90D)"); + terminal.println(""); + + return readPeriodInput(terminal, "For how long should your certificate be valid?", DEFAULT_CERT_VALIDITY, 60); + } + + private boolean askMultipleCertificates(Terminal terminal) { + printHeader("Do you wish to generate one certificate per node?", terminal); + terminal.println("If you have multiple nodes in your cluster, then you may choose to generate a"); + terminal.println("separate certificate for each of these nodes. Each certificate will have its"); + terminal.println("own private key, and will be issued for a specific hostname or IP address."); + terminal.println(""); + terminal.println("Alternatively, you may wish to generate a single certificate that is valid"); + terminal.println("across all the hostnames or addresses in your cluster."); + terminal.println(""); + terminal.println("If all of your nodes will be accessed through a single domain"); + terminal.println("(e.g. node01.es.example.com, node02.es.example.com, etc) then you may find it"); + terminal.println("simpler to generate one certificate with a wildcard hostname (*.es.example.com)"); + terminal.println("and use that across all of your nodes."); + terminal.println(""); + terminal.println("However, if you do not have a common domain name, and you expect to add"); + terminal.println("additional nodes to your cluster in the future, then you should generate a"); + terminal.println("certificate per node so that you can more easily generate new certificates when"); + terminal.println("you provision new nodes."); + terminal.println(""); + return terminal.promptYesNo("Generate a certificate per node?", false); + } + + private CertOptions getCertificateConfiguration(Terminal terminal, boolean multipleCertificates, String nodeDescription, + Period validity, boolean csr) { + + String certName = null; + if (multipleCertificates) { + printHeader("What is the name of " + nodeDescription + "?", terminal); + terminal.println("This name will be used as part of the certificate file name, and as a"); + terminal.println("descriptive name within the certificate."); + terminal.println(""); + terminal.println("You can use any descriptive name that you like, but we recommend using the name"); + terminal.println("of the Elasticsearch node."); + terminal.println(""); + certName = terminal.readText(nodeDescription + " name: "); + nodeDescription = certName; + } + + printHeader("Which hostnames will be used to connect to " + nodeDescription + "?", terminal); + terminal.println("These hostnames will be added as \"DNS\" names in the \"Subject Alternative Name\""); + terminal.println("(SAN) field in your certificate."); + terminal.println(""); + terminal.println("You should list every hostname and variant that people will use to connect to"); + terminal.println("your cluster over http."); + terminal.println("Do not list IP addresses here, you will be asked to enter them later."); + terminal.println(""); + terminal.println("If you wish to use a wildcard certificate (for example *.es.example.com) you"); + terminal.println("can enter that here."); + + final List dnsNames = new ArrayList<>(); + while (true) { + terminal.println(""); + terminal.println("Enter all the hostnames that you need, one per line." ); + terminal.println("When you are done, press once more to move on to the next step."); + terminal.println(""); + + dnsNames.addAll(readMultiLineInput(terminal, this::validateHostname)); + if (dnsNames.isEmpty()) { + terminal.println(Terminal.Verbosity.SILENT, "You did not enter any hostnames."); + terminal.println("Clients are likely to encounter TLS hostname verification errors if they"); + terminal.println("connect to your cluster using a DNS name."); + } else { + terminal.println(Terminal.Verbosity.SILENT, "You entered the following hostnames."); + terminal.println(Terminal.Verbosity.SILENT, ""); + dnsNames.forEach(s -> terminal.println(Terminal.Verbosity.SILENT, " - " + s)); + } + terminal.println(""); + if (terminal.promptYesNo("Is this correct", true)) { + break; + } else { + dnsNames.clear(); + } + } + + printHeader("Which IP addresses will be used to connect to " + nodeDescription + "?", terminal); + terminal.println("If your clients will ever connect to your nodes by numeric IP address, then you"); + terminal.println("can list these as valid IP \"Subject Alternative Name\" (SAN) fields in your"); + terminal.println("certificate."); + terminal.println(""); + terminal.println("If you do not have fixed IP addresses, or not wish to support direct IP access"); + terminal.println("to your cluster then you can just press to skip this step."); + + final List ipNames = new ArrayList<>(); + while (true) { + terminal.println(""); + terminal.println("Enter all the IP addresses that you need, one per line."); + terminal.println("When you are done, press once more to move on to the next step."); + terminal.println(""); + + ipNames.addAll(readMultiLineInput(terminal, this::validateIpAddress)); + if (ipNames.isEmpty()) { + terminal.println(Terminal.Verbosity.SILENT, "You did not enter any IP addresses."); + } else { + terminal.println(Terminal.Verbosity.SILENT, "You entered the following IP addresses."); + terminal.println(Terminal.Verbosity.SILENT, ""); + ipNames.forEach(s -> terminal.println(Terminal.Verbosity.SILENT, " - " + s)); + } + terminal.println(""); + if (terminal.promptYesNo("Is this correct", true)) { + break; + } else { + ipNames.clear(); + } + } + + printHeader("Other certificate options", terminal); + terminal.println("The generated certificate will have the following additional configuration"); + terminal.println("values. These values have been selected based on a combination of the"); + terminal.println("information you have provided above and secure defaults. You should not need to"); + terminal.println("change these values unless you have specific requirements."); + terminal.println(""); + + if (certName == null) { + certName = dnsNames.stream().filter(n -> n.indexOf('*') == -1).findFirst() + .or(() -> dnsNames.stream().map(s -> s.replace("*.", "")).findFirst()) + .orElse("elasticsearch"); + } + X500Principal dn = buildDistinguishedName(certName); + int keySize = DEFAULT_CERT_KEY_SIZE; + while (true) { + terminal.println(Terminal.Verbosity.SILENT, "Key Name: " + certName); + terminal.println(Terminal.Verbosity.SILENT, "Subject DN: " + dn); + terminal.println(Terminal.Verbosity.SILENT, "Key Size: " + keySize); + terminal.println(Terminal.Verbosity.SILENT, ""); + if (terminal.promptYesNo("Do you wish to change any of these options?", false) == false) { + break; + } + + printHeader("What should your key be named?", terminal); + if (csr) { + terminal.println("This will be included in the name of the files that are generated"); + } else { + terminal.println("This will be the entry name in the PKCS#12 keystore that is generated"); + } + terminal.println("It is helpful to have a meaningful name for this key"); + terminal.println(""); + certName = tryReadInput(terminal, "Key Name", certName, Function.identity()); + + printHeader("What subject DN should be used for your certificate?", terminal); + terminal.println("This will be visible to clients."); + terminal.println("It is helpful to have a meaningful name for each certificate"); + terminal.println(""); + dn = tryReadInput(terminal, "Subject DN", dn, name -> { + try { + if (name.contains("=")) { + return new X500Principal(name); + } else { + return new X500Principal("CN=" + name); + } + } catch (IllegalArgumentException e) { + terminal.println(Terminal.Verbosity.SILENT, "'" + name + "' is not a valid DN (" + e.getMessage() + ")"); + return null; + } + }); + + printHeader("What key size should your certificate have?", terminal); + terminal.println("The RSA private key for your certificate has a fixed 'key size' (in bits)."); + terminal.println("Larger key sizes are generally more secure, but are also slower."); + terminal.println(""); + terminal.println("We recommend that you use one of 2048, 3072 or 4096 bits for your key."); + + keySize = readKeySize(terminal, keySize); + terminal.println(""); + } + + return new CertOptions(certName, dn, dnsNames, ipNames, keySize, validity); + } + + private String validateHostname(String name) { + if (DERIA5String.isIA5String(name)) { + return null; + } else { + return name + " is not a valid DNS name"; + } + } + + private String validateIpAddress(String ip) { + if (InetAddresses.isInetAddress(ip)) { + return null; + } else { + return ip + " is not a valid IP address"; + } + } + + private X500Principal buildDistinguishedName(String name) { + return new X500Principal("CN=" + name.replace(".", ",DC=")); + } + + private List readMultiLineInput(Terminal terminal, Function validator) { + final List lines = new ArrayList<>(); + while (true) { + String input = terminal.readText(""); + if (Strings.isEmpty(input)) { + break; + } else { + final String error = validator.apply(input); + if (error == null) { + lines.add(input); + } else { + terminal.println("Error: " + error); + } + } + } + return lines; + } + + + private boolean askCertSigningRequest(Terminal terminal) { + printHeader("Do you wish to generate a Certificate Signing Request (CSR)?", terminal); + + terminal.println("A CSR is used when you want your certificate to be created by an existing"); + terminal.println("Certificate Authority (CA) that you do not control (that is, you don't have"); + terminal.println("access to the keys for that CA). "); + terminal.println(""); + terminal.println("If you are in a corporate environment with a central security team, then you"); + terminal.println("may have an existing Corporate CA that can generate your certificate for you."); + terminal.println("Infrastructure within your organisation may already be configured to trust this"); + terminal.println("CA, so it may be easier for clients to connect to Elasticsearch if you use a"); + terminal.println("CSR and send that request to the team that controls your CA."); + terminal.println(""); + terminal.println("If you choose not to generate a CSR, this tool will generate a new certificate"); + terminal.println("for you. That certificate will be signed by a CA under your control. This is a"); + terminal.println("quick and easy way to secure your cluster with TLS, but you will need to"); + terminal.println("configure all your clients to trust that custom CA."); + + terminal.println(""); + return terminal.promptYesNo("Generate a CSR?", false); + } + + private CertificateTool.CAInfo findExistingCA(Terminal terminal, Environment env) throws UserException { + printHeader("What is the path to your CA?", terminal); + + terminal.println("Please enter the full pathname to the Certificate Authority that you wish to"); + terminal.println("use for signing your new http certificate. This can be in PKCS#12 (.p12), JKS"); + terminal.println("(.jks) or PEM (.crt, .key, .pem) format."); + + final Path caPath = requestPath("CA Path: ", terminal, env, true); + final FileType fileType = guessFileType(caPath, terminal); + switch (fileType) { + + case PKCS12: + case JKS: + terminal.println(Terminal.Verbosity.VERBOSE, "CA file " + caPath + " appears to be a " + fileType + " keystore"); + return readKeystoreCA(caPath, fileType, terminal); + + case PEM_KEY: + printHeader("What is the path to your CA certificate?", terminal); + terminal.println(caPath + " appears to be a PEM formatted private key file."); + terminal.println("In order to use it for signing we also need access to the certificate"); + terminal.println("that corresponds to that key."); + terminal.println(""); + final Path caCertPath = requestPath("CA Certificate: ", terminal, env, true); + return readPemCA(caCertPath, caPath, terminal); + + case PEM_CERT: + printHeader("What is the path to your CA key?", terminal); + terminal.println(caPath + " appears to be a PEM formatted certificate file."); + terminal.println("In order to use it for signing we also need access to the private key"); + terminal.println("that corresponds to that certificate."); + terminal.println(""); + final Path caKeyPath = requestPath("CA Key: ", terminal, env, true); + return readPemCA(caPath, caKeyPath, terminal); + + case PEM_CERT_CHAIN: + terminal.println(Terminal.Verbosity.SILENT, "The file at " + caPath + " contains multiple certificates."); + terminal.println("That type of file typically represents a certificate-chain"); + terminal.println("This tool requires a single certificate for the CA"); + throw new UserException(ExitCodes.DATA_ERROR, caPath + ": Unsupported file type (certificate chain)"); + + + case UNRECOGNIZED: + default: + terminal.println(Terminal.Verbosity.SILENT, "The file at " + caPath + " isn't a file type that this tool recognises."); + terminal.println("Please try again with a CA in PKCS#12, JKS or PEM format"); + throw new UserException(ExitCodes.DATA_ERROR, caPath + ": Unrecognized file type"); + } + } + + private CertificateTool.CAInfo createNewCA(Terminal terminal) { + terminal.println("A new Certificate Authority will be generated for you"); + + printHeader("CA Generation Options", terminal); + terminal.println("The generated certificate authority will have the following configuration values."); + terminal.println("These values have been selected based on secure defaults."); + terminal.println("You should not need to change these values unless you have specific requirements."); + terminal.println(""); + + X500Principal dn = DEFAULT_CA_NAME; + Period validity = DEFAULT_CA_VALIDITY; + int keySize = DEFAULT_CA_KEY_SIZE; + while (true) { + terminal.println(Terminal.Verbosity.SILENT, "Subject DN: " + dn); + terminal.println(Terminal.Verbosity.SILENT, "Validity: " + toString(validity)); + terminal.println(Terminal.Verbosity.SILENT, "Key Size: " + keySize); + terminal.println(Terminal.Verbosity.SILENT, ""); + if (terminal.promptYesNo("Do you wish to change any of these options?", false) == false) { + break; + } + + printHeader("What should your CA be named?", terminal); + terminal.println("Every client that connects to your Elasticsearch cluster will need to trust"); + terminal.println("this custom Certificate Authority."); + terminal.println("It is helpful to have a meaningful name for this CA"); + terminal.println(""); + dn = tryReadInput(terminal, "CA Name", dn, name -> { + try { + if (name.contains("=")) { + return new X500Principal(name); + } else { + return new X500Principal("CN=" + name); + } + } catch (IllegalArgumentException e) { + terminal.println(Terminal.Verbosity.SILENT, "'" + name + "' is not a valid CA name (" + e.getMessage() + ")"); + return null; + } + }); + + printHeader("How long should your CA be valid?", terminal); + terminal.println("Every certificate has an expiry date. When the expiry date is reached, clients"); + terminal.println("will stop trusting your Certificate Authority and TLS connections will fail."); + terminal.println(""); + terminal.println("We recommend that you set this to a long duration (3 - 5 years) and then perform a"); + terminal.println("manual update a few months before it expires."); + terminal.println("You may enter the validity period in years (e.g. 3Y), months (e.g. 18M), or days (e.g. 90D)"); + + validity = readPeriodInput(terminal, "CA Validity", validity, 90); + + printHeader("What key size should your CA have?", terminal); + terminal.println("The RSA private key for your Certificate Authority has a fixed 'key size' (in bits)."); + terminal.println("Larger key sizes are generally more secure, but are also slower."); + terminal.println(""); + terminal.println("We recommend that you use one of 2048, 3072 or 4096 bits for your key."); + + keySize = readKeySize(terminal, keySize); + terminal.println(""); + } + + try { + final KeyPair keyPair = CertGenUtils.generateKeyPair(keySize); + final ZonedDateTime notBefore = ZonedDateTime.now(ZoneOffset.UTC); + final ZonedDateTime notAfter = notBefore.plus(validity); + X509Certificate caCert = generateSignedCertificate(dn, null, keyPair, null, null, true, notBefore, notAfter, null); + + printHeader("CA password", terminal); + terminal.println("We recommend that you protect your CA private key with a strong password."); + terminal.println("If your key does not have a password (or the password can be easily guessed)"); + terminal.println("then anyone who gets a copy of the key file will be able to generate new certificates"); + terminal.println("and impersonate your Elasticsearch cluster."); + terminal.println(""); + terminal.println("IT IS IMPORTANT THAT YOU REMEMBER THIS PASSWORD AND KEEP IT SECURE"); + terminal.println(""); + final char[] password = readPassword(terminal, "CA password: ", true); + return new CertificateTool.CAInfo(caCert, keyPair.getPrivate(), true, password.length == 0 ? null : password); + } catch (GeneralSecurityException | CertIOException | OperatorCreationException e) { + throw new IllegalArgumentException("Cannot generate CA key pair", e); + } + } + + /** + * Read input from the terminal as a {@link Period}. + * Package protected for testing purposes. + */ + Period readPeriodInput(Terminal terminal, String prompt, Period defaultValue, int recommendedMinimumDays) { + Period period = tryReadInput(terminal, prompt, defaultValue, input -> { + String periodInput = input.replaceAll("[,\\s]", ""); + if (input.charAt(0) != 'P') { + periodInput = "P" + periodInput; + } + try { + final Period parsed = Period.parse(periodInput); + final long approxDays = 30 * parsed.toTotalMonths() + parsed.getDays(); + if (approxDays < recommendedMinimumDays) { + terminal.println("The period '" + toString(parsed) + "' is less than the recommended period"); + if (terminal.promptYesNo("Are you sure?", false) == false) { + return null; + } + } + return parsed; + } catch (DateTimeParseException e) { + terminal.println("Sorry, I do not understand '" + input + "' (" + e.getMessage() + ")"); + return null; + } + }); + return period; + } + + private Integer readKeySize(Terminal terminal, int keySize) { + return tryReadInput(terminal, "Key Size", keySize, input -> { + try { + final int size = Integer.parseInt(input); + if (size < 1024) { + terminal.println("Keys must be at least 1024 bits"); + return null; + } + if (size > 8192) { + terminal.println("Keys cannot be larger than 8192 bits"); + return null; + } + if (size % 1024 != 0) { + terminal.println("The key size should be a multiple of 1024 bits"); + return null; + } + return size; + } catch (NumberFormatException e) { + terminal.println("The key size must be a positive integer"); + return null; + } + }); + } + + private char[] readPassword(Terminal terminal, String prompt, boolean confirm) { + while (true) { + final char[] password = terminal.readSecret(prompt + " [ for none]"); + if (password.length == 0) { + return password; + } + if (CertificateTool.isAscii(password)) { + if (confirm) { + final char[] again = terminal.readSecret("Repeat password to confirm: "); + if (Arrays.equals(password, again) == false) { + terminal.println("Passwords do not match"); + continue; + } + } + return password; + } else { + terminal.println(Terminal.Verbosity.SILENT, "Passwords must be plain ASCII"); + } + } + } + + private CertificateTool.CAInfo readKeystoreCA(Path ksPath, FileType fileType, Terminal terminal) throws UserException { + final String storeType = fileType == FileType.PKCS12 ? "PKCS12" : "jks"; + terminal.println("Reading a " + storeType + " keystore requires a password."); + terminal.println("It is possible for the keystore's password to be blank,"); + terminal.println("in which case you can simply press at the prompt"); + final char[] password = terminal.readSecret("Password for " + ksPath.getFileName() + ":"); + try { + final Map keys = CertParsingUtils.readKeyPairsFromKeystore(ksPath, storeType, password, alias -> password); + + if (keys.size() != 1) { + if (keys.isEmpty()) { + terminal.println(Terminal.Verbosity.SILENT, "The keystore at " + ksPath + " does not contain any keys "); + } else { + terminal.println(Terminal.Verbosity.SILENT, "The keystore at " + ksPath + " contains " + keys.size() + " keys,"); + terminal.println(Terminal.Verbosity.SILENT, "but this command requires a keystore with a single key"); + } + terminal.println("Please try again with a keystore that contains exactly 1 private key entry"); + throw new UserException(ExitCodes.DATA_ERROR, "The CA keystore " + ksPath + " contains " + keys.size() + " keys"); + } + final Map.Entry pair = keys.entrySet().iterator().next(); + return new CertificateTool.CAInfo((X509Certificate) pair.getKey(), (PrivateKey) pair.getValue()); + } catch (IOException | GeneralSecurityException e) { + throw new ElasticsearchException("Failed to read keystore " + ksPath, e); + } + } + + private CertificateTool.CAInfo readPemCA(Path certPath, Path keyPath, Terminal terminal) throws UserException { + final X509Certificate cert = readCertificate(certPath, terminal); + final PrivateKey key = readPrivateKey(keyPath, terminal); + return new CertificateTool.CAInfo(cert, key); + } + + private X509Certificate readCertificate(Path path, Terminal terminal) throws UserException { + try { + final X509Certificate[] certificates = CertParsingUtils.readX509Certificates(List.of(path)); + switch (certificates.length) { + case 0: + terminal.errorPrintln("Could not read any certificates from " + path); + throw new UserException(ExitCodes.DATA_ERROR, path + ": No certificates found"); + case 1: + return certificates[0]; + default: + terminal.errorPrintln("Read [" + certificates.length + "] certificates from " + path + " but expected 1"); + throw new UserException(ExitCodes.DATA_ERROR, path + ": Multiple certificates found"); + } + } catch (CertificateException | IOException e) { + throw new ElasticsearchException("Failed to read certificates from " + path, e); + } + } + + private PrivateKey readPrivateKey(Path path, Terminal terminal) { + try { + return PemUtils.readPrivateKey(path, () -> { + terminal.println(""); + terminal.println("The PEM key stored in " + path + " requires a password."); + terminal.println(""); + return terminal.readSecret("Password for " + path.getFileName() + ":"); + }); + } catch (IOException e) { + throw new ElasticsearchException("Failed to read private key from " + path, e); + } + } + + + private boolean askExistingCertificateAuthority(Terminal terminal) { + printHeader("Do you have an existing Certificate Authority (CA) key-pair that you wish to use to sign your certificate?", terminal); + terminal.println("If you have an existing CA certificate and key, then you can use that CA to"); + terminal.println("sign your new http certificate. This allows you to use the same CA across"); + terminal.println("multiple Elasticsearch clusters which can make it easier to configure clients,"); + terminal.println("and may be easier for you to manage."); + terminal.println(""); + terminal.println("If you do not have an existing CA, one will be generated for you."); + terminal.println(""); + + return terminal.promptYesNo("Use an existing CA?", false); + } + + private T tryReadInput(Terminal terminal, String prompt, T defaultValue, Function parser) { + final String defaultStr = defaultValue instanceof Period ? toString((Period) defaultValue) : String.valueOf(defaultValue); + while (true) { + final String input = terminal.readText(prompt + " [" + defaultStr + "] "); + if (Strings.isEmpty(input)) { + return defaultValue; + } + T parsed = parser.apply(input); + if (parsed != null) { + return parsed; + } + } + } + + static String toString(Period period) { + if (period == null) { + return "N/A"; + } + if (period.isZero()) { + return "0d"; + } + List parts = new ArrayList<>(3); + if (period.getYears() != 0) { + parts.add(period.getYears() + "y"); + } + if (period.getMonths() != 0) { + parts.add(period.getMonths() + "m"); + } + if (period.getDays() != 0) { + parts.add(period.getDays() + "d"); + } + return Strings.collectionToCommaDelimitedString(parts); + } + + private Path requestPath(String prompt, Terminal terminal, Environment env, boolean requireExisting) { + for (; ; ) { + final String input = terminal.readText(prompt); + final Path path = env.configFile().resolve(input).toAbsolutePath(); + + if (path.getFileName() == null) { + terminal.println(Terminal.Verbosity.SILENT, input + " is not a valid file"); + continue; + } + if (requireExisting == false || Files.isReadable(path)) { + return path; + } + + if (Files.notExists(path)) { + terminal.println(Terminal.Verbosity.SILENT, "The file " + path + " does not exist"); + } else { + terminal.println(Terminal.Verbosity.SILENT, "The file " + path + " cannot be read"); + } + } + } + + static FileType guessFileType(Path path, Terminal terminal) { + // trust the extension for some file-types rather than inspecting the contents + // we don't rely on filename for PEM files because + // (a) users have a tendency to get things mixed up (e.g. naming something "key.crt") + // (b) we need to distinguish between Certs & Keys, so a ".pem" file is ambiguous + final String fileName = path == null ? "" : path.getFileName().toString().toLowerCase(Locale.ROOT); + if (fileName.endsWith(".p12") || fileName.endsWith(".pfx") || fileName.endsWith(".pkcs12")) { + return FileType.PKCS12; + } + if (fileName.endsWith(".jks")) { + return FileType.JKS; + } + // Sniff the file. We could just try loading them, but then we need to catch a variety of exceptions + // and guess what they mean. For example, loading a PKCS#12 needs a password, so we would need to + // distinguish between a "wrong/missing password" exception and a "not a PKCS#12 file" exception. + try (InputStream in = Files.newInputStream(path)) { + byte[] leadingBytes = new byte[2]; + final int read = in.read(leadingBytes); + if (read < leadingBytes.length) { + // No supported file type has less than 2 bytes + return FileType.UNRECOGNIZED; + } + if (Arrays.equals(leadingBytes, MAGIC_BYTES1_PKCS12) || Arrays.equals(leadingBytes, MAGIC_BYTES2_PKCS12)) { + return FileType.PKCS12; + } + if (Arrays.equals(leadingBytes, MAGIC_BYTES_JKS)) { + return FileType.JKS; + } + } catch (IOException e) { + terminal.errorPrintln("Failed to read from file " + path); + terminal.errorPrintln(e.toString()); + return FileType.UNRECOGNIZED; + } + // Probably a PEM file, but we need to know what type of object(s) it holds + try (Stream lines = Files.lines(path, StandardCharsets.UTF_8)) { + final List types = lines.filter(s -> s.startsWith("-----BEGIN")).map(s -> { + if (s.contains("BEGIN CERTIFICATE")) { + return FileType.PEM_CERT; + } else if (s.contains("PRIVATE KEY")) { + return FileType.PEM_KEY; + } else { + return null; + } + }).filter(ft -> ft != null).collect(Collectors.toList()); + switch (types.size()) { + case 0: + // Not a PEM + return FileType.UNRECOGNIZED; + case 1: + return types.get(0); + default: + if (types.contains(FileType.PEM_KEY)) { + // A Key and something else. Could be a cert + key pair, but we don't support that + terminal.errorPrintln("Cannot determine a type for the PEM file " + path + " because it contains: [" + + Strings.collectionToCommaDelimitedString(types) + "]"); + } else { + // Multiple certificates = chain + return FileType.PEM_CERT_CHAIN; + } + } + } catch (UncheckedIOException | IOException e) { + terminal.errorPrintln("Cannot determine the file type for " + path); + terminal.errorPrintln(e.toString()); + return FileType.UNRECOGNIZED; + } + return FileType.UNRECOGNIZED; + } + + private void printHeader(String text, Terminal terminal) { + terminal.println(""); + terminal.println(Terminal.Verbosity.SILENT, "## " + text); + terminal.println(""); + } + + /** + * The standard zip output stream cannot be wrapped safely in another stream, because its close method closes the + * zip file, not just the entry. + * This class handles close correctly for a single entry + */ + private class ZipEntryStream extends OutputStream { + + private final ZipOutputStream zip; + + ZipEntryStream(ZipOutputStream zip, String name) throws IOException { + this(zip, new ZipEntry(name)); + } + + ZipEntryStream(ZipOutputStream zip, ZipEntry entry) throws IOException { + this.zip = zip; + assert entry.isDirectory() == false; + zip.putNextEntry(entry); + } + + @Override + public void write(int b) throws IOException { + zip.write(b); + } + + @Override + public void write(byte[] b) throws IOException { + zip.write(b); + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + zip.write(b, off, len); + } + + @Override + public void flush() throws IOException { + zip.flush(); + } + + @Override + public void close() throws IOException { + zip.closeEntry(); + } + } + + // For testing + OptionParser getParser() { + return parser; + } +} diff --git a/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/ca-readme-p12.txt b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/ca-readme-p12.txt new file mode 100644 index 0000000000000..dcd9c7c189811 --- /dev/null +++ b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/ca-readme-p12.txt @@ -0,0 +1,33 @@ +There are two files in this directory: + +1. This README file +2. ${P12} + +## ${P12} + +The "${P12}" file is a PKCS#12 format keystore. +It contains a copy of the certificate and private key for your Certificate Authority. + +You should keep this file secure, and should not provide it to anyone else. + +The sole purpose for this keystore is to generate new certificates if you add additional nodes to your Elasticsearch cluster, or need to +update the server names (hostnames or IP addresses) of your nodes. + +This keystore is not required in order to operate any Elastic product or client. +We recommended that you keep the file somewhere safe, and do not deploy it to your production servers. + +#if PASSWORD +Your keystore is protected by a password. +Your password has not been stored anywhere - it is your responsibility to keep it safe. +#else +Your keystore has a blank password. +It is important that you protect this file - if someone else gains access to your private key they can impersonate your Elasticsearch node. +#endif + + +If you wish to create additional certificates for the nodes in your cluster you can provide this keystore to the "elasticsearch-certutil" +utility as shown in the example below: + + elasticsearch-certutil cert --ca ${P12} --dns "hostname.of.your.node" --pass + +See the elasticsearch-certutil documentation for additional options. diff --git a/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/es-readme-csr.txt b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/es-readme-csr.txt new file mode 100644 index 0000000000000..d6e1fce20275a --- /dev/null +++ b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/es-readme-csr.txt @@ -0,0 +1,56 @@ +There are four files in this directory: + +1. This README file +2. ${CSR} +3. ${KEY} +4. ${YML} + +## ${CSR} + +The "${CSR}" file is a Certificate Signing Request. +You should provide a copy this file to a Certificate Authority ("CA"), and they will provide you with a signed Certificate. + +In many large organisations there is a central security team that operates an internal Certificate Authority that can generate your +certificate for you. Alternatively, it may be possible to have a your certificate generated by a commercial Certificate Authority. + +In either case, you need to provide the ${CSR} file to the certificate authority, and they will provide you with your signed certificate. +For the purposes of this document, we assume that when they send you your certificate, you will save it as a file named "${CERT}". + +The certificate authority might also provide you with a copy of their signing certificate. If they do, you should keep a copy of that +certificate, as you may need it when configuring clients such as Kibana. + +## ${KEY} + +The "${KEY}" file is your private key. +You should keep this file secure, and should not provide it to anyone else (not even the CA). + +Once you have a copy of your certificate (from the CA), you will configure your Elasticsearch nodes to use the certificate +and this private key. +You will need to copy both of those files to your elasticsearch configuration directory. + +#if PASSWORD +Your private key is protected by a passphrase. +Your password has not been stored anywhere - it is your responsibility to keep it safe. + +When you configure elasticsearch to enable SSL (but not before then), you will need to provide the key's password as a secure +configuration setting in Elasticsearch so that it can decrypt your private key. + +The command for this is: + + elasticsearch-keystore add "xpack.security.http.ssl.secure_key_passphrase" + +#else +Your private key is not password protected. +It is important that you protect this file - if someone else gains access to your private key they can impersonate your Elasticsearch node. +#endif + +## ${YML} + +This is a sample configuration for Elasticsearch to enable SSL on the http interface. +You can use this sample to update the "elasticsearch.yml" configuration file in your config directory. +The location of this directory can vary depending on how you installed Elasticsearch, but based on your system it appears that your config +directory is ${CONF_DIR} + +You will not be able to configure Elasticsearch until the Certificate Authority processes your CSR and provides you with a copy of your +certificate. When you have a copy of the certificate you should copy it and the private key ("${KEY}") to the config directory. +The sample config assumes that the certificate is named "${CERT}". diff --git a/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/es-readme-p12.txt b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/es-readme-p12.txt new file mode 100644 index 0000000000000..06f435116e3a0 --- /dev/null +++ b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/es-readme-p12.txt @@ -0,0 +1,38 @@ +There are three files in this directory: + +1. This README file +2. ${P12} +3. ${YML} + +## ${P12} + +The "${P12}" file is a PKCS#12 format keystore. +It contains a copy of your certificate and the associated private key. +You should keep this file secure, and should not provide it to anyone else. + +You will need to copy this file to your elasticsearch configuration directory. + +#if PASSWORD +Your keystore is protected by a password. +Your password has not been stored anywhere - it is your responsibility to keep it safe. + +When you configure elasticsearch to enable SSL (but not before then), you will need to provide the keystore's password as a secure +configuration setting in Elasticsearch so that it can access your private key. + +The command for this is: + + elasticsearch-keystore add "xpack.security.http.ssl.keystore.secure_password" + +#else +Your keystore has a blank password. +It is important that you protect this file - if someone else gains access to your private key they can impersonate your Elasticsearch node. +#endif + +## ${YML} + +This is a sample configuration for Elasticsearch to enable SSL on the http interface. +You can use this sample to update the "elasticsearch.yml" configuration file in your config directory. +The location of this directory can vary depending on how you installed Elasticsearch, but based on your system it appears that your config +directory is ${CONF_DIR} + +This sample configuration assumes that you have copied your ${P12} file directly into the config directory without renaming it. \ No newline at end of file diff --git a/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/es-sample-csr.yml b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/es-sample-csr.yml new file mode 100644 index 0000000000000..9ce1624430ad8 --- /dev/null +++ b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/es-sample-csr.yml @@ -0,0 +1,32 @@ +# +# SAMPLE ELASTICSEARCH CONFIGURATION FOR ENABLING SSL ON THE HTTP INTERFACE +# +# This is a sample configuration snippet for Elasticsearch that enables and configures SSL for the HTTP (Rest) interface +# +# This was automatically generated at: ${DATE} ${TIME} +# This configuration was intended for Elasticsearch version ${VERSION} +# +# You should review these settings, and then update the main configuration file at +# ${CONF_DIR}/elasticsearch.yml +# + +# This turns on SSL for the HTTP (Rest) interface +xpack.security.http.ssl.enabled: true + +# This configures the certificate to use. +# This certificate will be generated by your Certificate Authority, based on the CSR that you sent to them. +xpack.security.http.ssl.certificate: "${CERT}" + +# This configures the private key for your certificate. +#if PASSWORD +# Because your private key is encrypted, you will also need to add the passphrase to the Elasticsearch keystore +# elasticsearch-keystore add "xpack.security.http.ssl.secure_key_passphrase" +#endif +xpack.security.http.ssl.key: "${KEY}" + +# If your Certificate Authorities provides you with a copy of their certificate you can configure it here. +# This is not strictly necessary, but can make it easier when running other elasticsearch utilities such as the "setup-passwords" tool. +# +#xpack.security.http.ssl.certificate_authorities: [ "ca.crt" ] +# + diff --git a/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/es-sample-p12.yml b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/es-sample-p12.yml new file mode 100644 index 0000000000000..7658f56b47ddc --- /dev/null +++ b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/es-sample-p12.yml @@ -0,0 +1,22 @@ +# +# SAMPLE ELASTICSEARCH CONFIGURATION FOR ENABLING SSL ON THE HTTP INTERFACE +# +# This is a sample configuration snippet for Elasticsearch that enables and configures SSL for the HTTP (Rest) interface +# +# This was automatically generated at: ${DATE} ${TIME} +# This configuration was intended for Elasticsearch version ${VERSION} +# +# You should review these settings, and then update the main configuration file at +# ${CONF_DIR}/elasticsearch.yml +# + +# This turns on SSL for the HTTP (Rest) interface +xpack.security.http.ssl.enabled: true + +# This configures the keystore to use for SSL on HTTP +#if PASSWORD +# Because your keystore has a password, you will also need to add the password to the Elasticsearch keystore +# elasticsearch-keystore add "xpack.security.http.ssl.keystore.secure_password" +#endif +xpack.security.http.ssl.keystore.path: "${P12}" + diff --git a/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/kibana-readme.txt b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/kibana-readme.txt new file mode 100644 index 0000000000000..28d308b10a661 --- /dev/null +++ b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/kibana-readme.txt @@ -0,0 +1,62 @@ +#if CA_CERT +There are three files in this directory: + +1. This README file +2. ${CA_CERT} +3. ${YML} + +#else +There are two files in this directory: + +1. This README file +2. ${YML} + +#endif +#if CA_CERT +## ${CA_CERT} + +The "${CA_CERT}" file is a PEM format X.509 Certificate for the Elasticsearch Certificate Authority. + +You need to configure Kibana to trust this certificate as an issuing CA for TLS connections to your Elasticsearch cluster. +The "${YML}" file, and the instructions below, explain how to do this. + +#else +Because your Elasticsearch certificates are being generated by an external CA (via a Certificate Signing Request), this directory does not +contain a copy of the CA's issuing certificate (we don't know where you will send your CSRs and who will sign them). + +If you are using a public (commercial) CA then it is likely that Kibana will already be configured to trust this CA and you will not need +to do any special configuration. + +However, if you are using a CA that is specific to your organisation, then you will need to configure Kibana to trust that CA. +When your CA issues your certificate, you should ask them for a copy of their certificate chain in PEM format. + +The "${YML}" file, and the instructions below, explain what to do this with this file. + +#endif +## ${YML} + +This is a sample configuration for Kibana to enable SSL for connections to Elasticsearch. +You can use this sample to update the "kibana.yml" configuration file in your Kibana config directory. + +------------------------------------------------------------------------------------------------- +NOTE: + You also need to update the URLs in your "elasticsearch.hosts" setting to use the "https" URL. + e.g. If your kibana.yml file currently has + + elasticsearch.hosts: [ "http://localhost:9200" ] + + then you should change this to: + + elasticsearch.hosts: [ "https://localhost:9200" ] + +------------------------------------------------------------------------------------------------- + +#if CA_CERT +The sample configuration assumes that you have copied the "${CA_CERT}" file directly into the Kibana config +directory without renaming it. +#else +The sample configuration assumes that you have a file named "${CA_CERT_NAME}" which contains your CA's certificate +chain, and have copied that file into the Kibana config directory. +#endif + + diff --git a/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/kibana-sample.yml b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/kibana-sample.yml new file mode 100644 index 0000000000000..78a92782bdd1d --- /dev/null +++ b/x-pack/plugin/security/cli/src/main/resources/org/elasticsearch/xpack/security/cli/certutil-http/kibana-sample.yml @@ -0,0 +1,25 @@ +# +# SAMPLE KIBANA CONFIGURATION FOR ENABLING SSL TO ELASTICSEARCH +# +# This is a sample configuration snippet for Kibana that configures SSL for connections to Elasticsearch +# +# This was automatically generated at: ${DATE} ${TIME} +# This configuration was intended for version ${VERSION} +# +# You should review these settings, and then update the main kibana.yml configuration file. +# +#------------------------------------------------------------------------------------------------- +# You also need to update the URLs in your "elasticsearch.hosts" setting to use the "https" URL. +# e.g. If your kibana.yml file currently has +# +# elasticsearch.hosts: [ "http://localhost:9200" ] +# +# then you should change this to: +# +# elasticsearch.hosts: [ "https://localhost:9200" ] +# +#------------------------------------------------------------------------------------------------- + +# This configures Kibana to trust a specific Certificate Authority for connections to Elasticsearch +elasticsearch.ssl.certificateAuthorities: [ "config/${CA_CERT_NAME}" ] + diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java index 6845edbdc6b38..21a8440a7003c 100644 --- a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java @@ -21,6 +21,7 @@ import org.bouncycastle.asn1.x509.GeneralName; import org.bouncycastle.asn1.x509.GeneralNames; import org.bouncycastle.cert.X509CertificateHolder; +import org.bouncycastle.openssl.PEMDecryptorProvider; import org.bouncycastle.openssl.PEMEncryptedKeyPair; import org.bouncycastle.openssl.PEMParser; import org.bouncycastle.pkcs.PKCS10CertificationRequest; @@ -50,6 +51,7 @@ import org.hamcrest.Matchers; import org.junit.After; import org.junit.BeforeClass; +import org.mockito.Mockito; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; @@ -349,6 +351,16 @@ public void testGeneratingSignedPemCertificates() throws Exception { PEMParser pemParser = new PEMParser(reader); Object parsed = pemParser.readObject(); assertThat(parsed, instanceOf(PEMEncryptedKeyPair.class)); + // Verify we are using AES encryption + final PEMDecryptorProvider pemDecryptorProvider = Mockito.mock(PEMDecryptorProvider.class); + try { + ((PEMEncryptedKeyPair) parsed).decryptKeyPair(pemDecryptorProvider); + } catch (Exception e) { + // Catch error thrown by the empty mock, we are only interested in the argument passed in + } + finally { + Mockito.verify(pemDecryptorProvider).get("AES-128-CBC"); + } char[] zeroChars = new char[caInfo.password.length]; Arrays.fill(zeroChars, (char) 0); assertArrayEquals(zeroChars, caInfo.password); @@ -368,7 +380,13 @@ public void testGeneratingSignedPemCertificates() throws Exception { assertTrue(Files.exists(zipRoot.resolve(filename))); final Path cert = zipRoot.resolve(filename + "/" + filename + ".crt"); assertTrue(Files.exists(cert)); - assertTrue(Files.exists(zipRoot.resolve(filename + "/" + filename + ".key"))); + Path keyFile = zipRoot.resolve(filename + "/" + filename + ".key"); + assertTrue(Files.exists(keyFile)); + if (keyPassword != null) { + assertTrue(Files.readString(keyFile).contains("DEK-Info: AES-128-CBC")); + } else { + assertFalse(Files.readString(keyFile).contains("DEK-Info:")); + } final Path p12 = zipRoot.resolve(filename + "/" + filename + ".p12"); try (InputStream input = Files.newInputStream(cert)) { X509Certificate certificate = readX509Certificate(input); diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommandTests.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommandTests.java new file mode 100644 index 0000000000000..8cb4774d098d2 --- /dev/null +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommandTests.java @@ -0,0 +1,781 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.cli; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import joptsimple.OptionSet; +import org.bouncycastle.asn1.DERIA5String; +import org.bouncycastle.asn1.DEROctetString; +import org.bouncycastle.asn1.DERSequence; +import org.bouncycastle.asn1.pkcs.Attribute; +import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; +import org.bouncycastle.asn1.x509.Extension; +import org.bouncycastle.asn1.x509.Extensions; +import org.bouncycastle.asn1.x509.GeneralName; +import org.bouncycastle.asn1.x509.GeneralNames; +import org.bouncycastle.pkcs.PKCS10CertificationRequest; +import org.bouncycastle.pkcs.jcajce.JcaPKCS10CertificationRequest; +import org.bouncycastle.util.io.pem.PemObject; +import org.bouncycastle.util.io.pem.PemReader; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ssl.CertParsingUtils; +import org.elasticsearch.xpack.core.ssl.PemUtils; +import org.elasticsearch.xpack.security.cli.HttpCertificateCommand.FileType; +import org.hamcrest.Matchers; +import org.junit.Before; +import org.junit.BeforeClass; + +import javax.security.auth.x500.X500Principal; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.net.InetAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.GeneralSecurityException; +import java.security.Key; +import java.security.KeyStore; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.security.Signature; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.CertificateParsingException; +import java.security.cert.X509Certificate; +import java.security.interfaces.RSAKey; +import java.time.Instant; +import java.time.Period; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.test.FileMatchers.isDirectory; +import static org.elasticsearch.test.FileMatchers.isRegularFile; +import static org.elasticsearch.test.FileMatchers.pathExists; +import static org.elasticsearch.xpack.security.cli.HttpCertificateCommand.guessFileType; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.in; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.oneOf; + +public class HttpCertificateCommandTests extends ESTestCase { + private static final String CA_PASSWORD = "ca-password"; + private FileSystem jimfs; + private Path testRoot; + + @Before + public void createTestDir() throws Exception { + Configuration conf = Configuration.unix().toBuilder().setAttributeViews("posix").build(); + jimfs = Jimfs.newFileSystem(conf); + testRoot = jimfs.getPath(getClass().getSimpleName() + "-" + getTestName()); + IOUtils.rm(testRoot); + Files.createDirectories(testRoot); + } + + @BeforeClass + public static void muteInFips() { + assumeFalse("Can't run in a FIPS JVM", inFipsJvm()); + } + + public void testGenerateSingleCertificateSigningRequest() throws Exception { + final Path outFile = testRoot.resolve("csr.zip").toAbsolutePath(); + + final List hostNames = randomHostNames(); + final List ipAddresses = randomIpAddresses(); + final String certificateName = hostNames.get(0); + + final HttpCertificateCommand command = new PathAwareHttpCertificateCommand(outFile); + + final MockTerminal terminal = new MockTerminal(); + + terminal.addTextInput("y"); // generate CSR + + terminal.addTextInput(randomBoolean() ? "n" : ""); // cert-per-node + + // enter hostnames + hostNames.forEach(terminal::addTextInput); + terminal.addTextInput(""); // end-of-hosts + terminal.addTextInput(randomBoolean() ? "y" : ""); // yes, correct + + // enter ip names + ipAddresses.forEach(terminal::addTextInput); + terminal.addTextInput(""); // end-of-ips + terminal.addTextInput(randomBoolean() ? "y" : ""); // yes, correct + + terminal.addTextInput(randomBoolean() ? "n" : ""); // don't change advanced settings + + final String password = randomPassword(); + terminal.addSecretInput(password); + terminal.addSecretInput(password); // confirm + + terminal.addTextInput(outFile.toString()); + + final Environment env = newEnvironment(); + final OptionSet options = command.getParser().parse(new String[0]); + command.execute(terminal, options, env); + + Path zipRoot = getZipRoot(outFile); + + assertThat(zipRoot.resolve("elasticsearch"), isDirectory()); + + final Path csrPath = zipRoot.resolve("elasticsearch/http-" + certificateName + ".csr"); + final PKCS10CertificationRequest csr = readPemObject(csrPath, "CERTIFICATE REQUEST", PKCS10CertificationRequest::new); + + final Path keyPath = zipRoot.resolve("elasticsearch/http-" + certificateName + ".key"); + final AtomicBoolean wasEncrypted = new AtomicBoolean(false); + final PrivateKey privateKey = PemUtils.readPrivateKey(keyPath, () -> { + wasEncrypted.set(true); + return password.toCharArray(); + }); + assertTrue("Password should have been required to decrypted key", wasEncrypted.get()); + + final Path esReadmePath = zipRoot.resolve("elasticsearch/README.txt"); + assertThat(esReadmePath, isRegularFile()); + final String esReadme = Files.readString(esReadmePath); + + final Path ymlPath = zipRoot.resolve("elasticsearch/sample-elasticsearch.yml"); + assertThat(ymlPath, isRegularFile()); + final String yml = Files.readString(ymlPath); + + // Verify the CSR was built correctly + verifyCertificationRequest(csr, certificateName, hostNames, ipAddresses); + + // Verify the key + assertMatchingPair(getPublicKey(csr), privateKey); + + final String crtName = keyPath.getFileName().toString().replace(".csr", ".crt"); + + // Verify the README + assertThat(esReadme, containsString(csrPath.getFileName().toString())); + assertThat(esReadme, containsString(crtName)); + assertThat(esReadme, containsString(keyPath.getFileName().toString())); + assertThat(esReadme, containsString(ymlPath.getFileName().toString())); + assertThat(esReadme, not(containsString(password))); + + // Verify the yml + assertThat(yml, not(containsString(csrPath.getFileName().toString()))); + assertThat(yml, containsString(crtName)); + assertThat(yml, containsString(keyPath.getFileName().toString())); + assertThat(yml, not(containsString(password))); + + // Should not be a CA directory in CSR mode + assertThat(zipRoot.resolve("ca"), not(pathExists())); + + // No CA in CSR mode + verifyKibanaDirectory(zipRoot, false, List.of("Certificate Signing Request"), List.of(password, csrPath.getFileName().toString())); + } + + public void testGenerateSingleCertificateWithExistingCA() throws Exception { + final Path outFile = testRoot.resolve("certs.zip").toAbsolutePath(); + + final List hostNames = randomHostNames(); + final List ipAddresses = randomIpAddresses(); + final String certificateName = hostNames.get(0); + + final Path caCertPath = getDataPath("ca.crt"); + assertThat(caCertPath, isRegularFile()); + final Path caKeyPath = getDataPath("ca.key"); + assertThat(caKeyPath, isRegularFile()); + final String caPassword = CA_PASSWORD; + + final int years = randomIntBetween(1, 8); + + final HttpCertificateCommand command = new PathAwareHttpCertificateCommand(outFile); + + final MockTerminal terminal = new MockTerminal(); + + terminal.addTextInput(randomBoolean() ? "n" : ""); // don't generate CSR + terminal.addTextInput("y"); // existing CA + + // randomise between cert+key, key+cert, PKCS12 : the tool is smart enough to handle any of those. + switch (randomFrom(FileType.PEM_CERT, FileType.PEM_KEY, FileType.PKCS12)) { + case PEM_CERT: + terminal.addTextInput(caCertPath.toAbsolutePath().toString()); + terminal.addTextInput(caKeyPath.toAbsolutePath().toString()); + break; + case PEM_KEY: + terminal.addTextInput(caKeyPath.toAbsolutePath().toString()); + terminal.addTextInput(caCertPath.toAbsolutePath().toString()); + break; + case PKCS12: + terminal.addTextInput(getDataPath("ca.p12").toAbsolutePath().toString()); + break; + } + terminal.addSecretInput(caPassword); + + terminal.addTextInput(years + "y"); // validity period + + terminal.addTextInput(randomBoolean() ? "n" : ""); // don't use cert-per-node + + // enter hostnames + hostNames.forEach(terminal::addTextInput); + terminal.addTextInput(""); // end-of-hosts + terminal.addTextInput(randomBoolean() ? "y" : ""); // yes, correct + + // enter ip names + ipAddresses.forEach(terminal::addTextInput); + terminal.addTextInput(""); // end-of-ips + terminal.addTextInput(randomBoolean() ? "y" : ""); // yes, correct + + terminal.addTextInput(randomBoolean() ? "n" : ""); // don't change advanced settings + + final String password = randomPassword(); + terminal.addSecretInput(password); + terminal.addSecretInput(password); // confirm + + terminal.addTextInput(outFile.toString()); + + final Environment env = newEnvironment(); + final OptionSet options = command.getParser().parse(new String[0]); + command.execute(terminal, options, env); + + Path zipRoot = getZipRoot(outFile); + + assertThat(zipRoot.resolve("elasticsearch"), isDirectory()); + + final Path p12Path = zipRoot.resolve("elasticsearch/http.p12"); + + final Path readmePath = zipRoot.resolve("elasticsearch/README.txt"); + assertThat(readmePath, isRegularFile()); + final String readme = Files.readString(readmePath); + + final Path ymlPath = zipRoot.resolve("elasticsearch/sample-elasticsearch.yml"); + assertThat(ymlPath, isRegularFile()); + final String yml = Files.readString(ymlPath); + + final Tuple certAndKey = readCertificateAndKey(p12Path, password.toCharArray()); + + // Verify the Cert was built correctly + verifyCertificate(certAndKey.v1(), certificateName, years, hostNames, ipAddresses); + assertThat(getRSAKeySize(certAndKey.v1().getPublicKey()), is(HttpCertificateCommand.DEFAULT_CERT_KEY_SIZE)); + assertThat(getRSAKeySize(certAndKey.v2()), is(HttpCertificateCommand.DEFAULT_CERT_KEY_SIZE)); + + final X509Certificate caCert = readPemCertificate(caCertPath); + verifyChain(certAndKey.v1(), caCert); + + // Verify the README + assertThat(readme, containsString(p12Path.getFileName().toString())); + assertThat(readme, containsString(ymlPath.getFileName().toString())); + assertThat(readme, not(containsString(password))); + assertThat(readme, not(containsString(caPassword))); + + // Verify the yml + assertThat(yml, containsString(p12Path.getFileName().toString())); + assertThat(yml, not(containsString(password))); + assertThat(yml, not(containsString(caPassword))); + + // Should not be a CA directory when using an existing CA. + assertThat(zipRoot.resolve("ca"), not(pathExists())); + + verifyKibanaDirectory(zipRoot, true, List.of("2. elasticsearch-ca.pem"), + List.of(password, caPassword, caKeyPath.getFileName().toString())); + } + + public void testGenerateMultipleCertificateWithNewCA() throws Exception { + final Path outFile = testRoot.resolve("certs.zip").toAbsolutePath(); + + final int numberCerts = randomIntBetween(3, 6); + final String[] certNames = new String[numberCerts]; + final String[] hostNames = new String[numberCerts]; + for (int i = 0; i < numberCerts; i++) { + certNames[i] = randomAlphaOfLengthBetween(6, 12); + hostNames[i] = randomAlphaOfLengthBetween(4, 8); + } + + final HttpCertificateCommand command = new PathAwareHttpCertificateCommand(outFile); + + final MockTerminal terminal = new MockTerminal(); + + terminal.addTextInput(randomBoolean() ? "n" : ""); // don't generate CSR + terminal.addTextInput(randomBoolean() ? "n" : ""); // no existing CA + + final String caDN; + final int caYears; + final int caKeySize; + // randomise whether to change CA defaults. + if (randomBoolean()) { + terminal.addTextInput("y"); // Change defaults + caDN = "CN=" + randomAlphaOfLengthBetween(3, 8); + caYears = randomIntBetween(1, 3); + caKeySize = randomFrom(2048, 3072, 4096); + terminal.addTextInput(caDN); + terminal.addTextInput(caYears + "y"); + terminal.addTextInput(Integer.toString(caKeySize)); + terminal.addTextInput("n"); // Don't change values + } else { + terminal.addTextInput(randomBoolean() ? "n" : ""); // Don't change defaults + caDN = HttpCertificateCommand.DEFAULT_CA_NAME.toString(); + caYears = HttpCertificateCommand.DEFAULT_CA_VALIDITY.getYears(); + caKeySize = HttpCertificateCommand.DEFAULT_CA_KEY_SIZE; + } + + final String caPassword = randomPassword(); + terminal.addSecretInput(caPassword); + terminal.addSecretInput(caPassword); // confirm + + final int certYears = randomIntBetween(1, 8); + terminal.addTextInput(certYears + "y"); // node cert validity period + + terminal.addTextInput("y"); // cert-per-node + + for (int i = 0; i < numberCerts; i++) { + if (i != 0) { + terminal.addTextInput(randomBoolean() ? "y" : ""); // another cert + } + + // certificate / node name + terminal.addTextInput(certNames[i]); + + // enter hostname + terminal.addTextInput(hostNames[i]); // end-of-hosts + terminal.addTextInput(""); // end-of-hosts + terminal.addTextInput(randomBoolean() ? "y" : ""); // yes, correct + + // no ip + terminal.addTextInput(""); // end-of-ip + terminal.addTextInput(randomBoolean() ? "y" : ""); // yes, correct + + terminal.addTextInput(randomBoolean() ? "n" : ""); // don't change advanced settings + } + terminal.addTextInput("n"); // no more certs + + + final String password = randomPassword(); + terminal.addSecretInput(password); + terminal.addSecretInput(password); // confirm + + terminal.addTextInput(outFile.toString()); + + final Environment env = newEnvironment(); + final OptionSet options = command.getParser().parse(new String[0]); + command.execute(terminal, options, env); + + Path zipRoot = getZipRoot(outFile); + + // Should have a CA directory with the generated CA. + assertThat(zipRoot.resolve("ca"), isDirectory()); + final Path caPath = zipRoot.resolve("ca/ca.p12"); + final Tuple caCertKey = readCertificateAndKey(caPath, caPassword.toCharArray()); + verifyCertificate(caCertKey.v1(), caDN.replaceFirst("CN=", ""), caYears, List.of(), List.of()); + assertThat(getRSAKeySize(caCertKey.v1().getPublicKey()), is(caKeySize)); + assertThat(getRSAKeySize(caCertKey.v2()), is(caKeySize)); + + assertThat(zipRoot.resolve("elasticsearch"), isDirectory()); + + for (int i = 0; i < numberCerts; i++) { + assertThat(zipRoot.resolve("elasticsearch/" + certNames[i]), isDirectory()); + final Path p12Path = zipRoot.resolve("elasticsearch/" + certNames[i] + "/http.p12"); + assertThat(p12Path, isRegularFile()); + + final Path readmePath = zipRoot.resolve("elasticsearch/" + certNames[i] + "/README.txt"); + assertThat(readmePath, isRegularFile()); + final String readme = Files.readString(readmePath); + + final Path ymlPath = zipRoot.resolve("elasticsearch/" + certNames[i] + "/sample-elasticsearch.yml"); + assertThat(ymlPath, isRegularFile()); + final String yml = Files.readString(ymlPath); + + final Tuple certAndKey = readCertificateAndKey(p12Path, password.toCharArray()); + + // Verify the Cert was built correctly + verifyCertificate(certAndKey.v1(), certNames[i], certYears, List.of(hostNames[i]), List.of()); + verifyChain(certAndKey.v1(), caCertKey.v1()); + assertThat(getRSAKeySize(certAndKey.v1().getPublicKey()), is(HttpCertificateCommand.DEFAULT_CERT_KEY_SIZE)); + assertThat(getRSAKeySize(certAndKey.v2()), is(HttpCertificateCommand.DEFAULT_CERT_KEY_SIZE)); + + // Verify the README + assertThat(readme, containsString(p12Path.getFileName().toString())); + assertThat(readme, containsString(ymlPath.getFileName().toString())); + assertThat(readme, not(containsString(password))); + assertThat(readme, not(containsString(caPassword))); + + // Verify the yml + assertThat(yml, containsString(p12Path.getFileName().toString())); + assertThat(yml, not(containsString(password))); + assertThat(yml, not(containsString(caPassword))); + } + + verifyKibanaDirectory(zipRoot, true, List.of("2. elasticsearch-ca.pem"), + List.of(password, caPassword, caPath.getFileName().toString())); + } + + public void testParsingValidityPeriod() throws Exception { + final HttpCertificateCommand command = new HttpCertificateCommand(); + final MockTerminal terminal = new MockTerminal(); + + terminal.addTextInput("2y"); + assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.ofYears(2))); + + terminal.addTextInput("18m"); + assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.ofMonths(18))); + + terminal.addTextInput("90d"); + assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.ofDays(90))); + + terminal.addTextInput("1y, 6m"); + assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.ofYears(1).withMonths(6))); + + // Test: Re-prompt on bad input. + terminal.addTextInput("2m & 4d"); + terminal.addTextInput("2m 4d"); + assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.ofMonths(2).withDays(4))); + + terminal.addTextInput("1y, 6m"); + assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.ofYears(1).withMonths(6))); + + // Test: Accept default value + final Period p = Period.of(randomIntBetween(1, 5), randomIntBetween(0, 11), randomIntBetween(0, 30)); + terminal.addTextInput(""); + assertThat(command.readPeriodInput(terminal, "", p, 1), is(p)); + + final int y = randomIntBetween(1, 5); + final int m = randomIntBetween(1, 11); + final int d = randomIntBetween(1, 30); + terminal.addTextInput(y + "y " + m + "m " + d + "d"); + assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.of(y, m, d))); + + // Test: Minimum Days + final int shortDays = randomIntBetween(1, 20); + + terminal.addTextInput(shortDays + "d"); + terminal.addTextInput("y"); // I'm sure + assertThat(command.readPeriodInput(terminal, "", null, 21), is(Period.ofDays(shortDays))); + + terminal.addTextInput(shortDays + "d"); + terminal.addTextInput("n"); // I'm not sure + terminal.addTextInput("30d"); + assertThat(command.readPeriodInput(terminal, "", null, 21), is(Period.ofDays(30))); + + terminal.addTextInput("2m"); + terminal.addTextInput("n"); // I'm not sure + terminal.addTextInput("2y"); + assertThat(command.readPeriodInput(terminal, "", null, 90), is(Period.ofYears(2))); + } + + public void testValidityPeriodToString() throws Exception { + assertThat(HttpCertificateCommand.toString(Period.ofYears(2)), is("2y")); + assertThat(HttpCertificateCommand.toString(Period.ofMonths(5)), is("5m")); + assertThat(HttpCertificateCommand.toString(Period.ofDays(60)), is("60d")); + assertThat(HttpCertificateCommand.toString(Period.ZERO), is("0d")); + assertThat(HttpCertificateCommand.toString(null), is("N/A")); + + final int y = randomIntBetween(1, 5); + final int m = randomIntBetween(1, 11); + final int d = randomIntBetween(1, 30); + assertThat(HttpCertificateCommand.toString(Period.of(y, m, d)), is(y + "y," + m + "m," + d + "d")); + } + + public void testGuessFileType() throws Exception { + MockTerminal terminal = new MockTerminal(); + + final Path caCert = getDataPath("ca.crt"); + final Path caKey = getDataPath("ca.key"); + assertThat(guessFileType(caCert, terminal), is(FileType.PEM_CERT)); + assertThat(guessFileType(caKey, terminal), is(FileType.PEM_KEY)); + + final Path certChain = testRoot.resolve("ca.pem"); + try (OutputStream out = Files.newOutputStream(certChain)) { + Files.copy(getDataPath("testnode.crt"), out); + Files.copy(caCert, out); + } + assertThat(guessFileType(certChain, terminal), is(FileType.PEM_CERT_CHAIN)); + + final Path tmpP12 = testRoot.resolve("tmp.p12"); + assertThat(guessFileType(tmpP12, terminal), is(FileType.PKCS12)); + final Path tmpJks = testRoot.resolve("tmp.jks"); + assertThat(guessFileType(tmpJks, terminal), is(FileType.JKS)); + + final Path tmpKeystore = testRoot.resolve("tmp.keystore"); + writeDummyKeystore(tmpKeystore, "PKCS12"); + assertThat(guessFileType(tmpKeystore, terminal), is(FileType.PKCS12)); + writeDummyKeystore(tmpKeystore, "jks"); + assertThat(guessFileType(tmpKeystore, terminal), is(FileType.JKS)); + } + + public void testTextFileSubstitutions() throws Exception { + CheckedBiFunction, String, Exception> copy = (source, subs) -> { + try (InputStream in = new ByteArrayInputStream(source.getBytes(StandardCharsets.UTF_8)); + StringWriter out = new StringWriter(); + PrintWriter writer = new PrintWriter(out)) { + HttpCertificateCommand.copyWithSubstitutions(in, writer, subs); + return out.toString().replace("\r\n", "\n"); + } + }; + assertThat(copy.apply("abc\n", Map.of()), is("abc\n")); + assertThat(copy.apply("${not_a_var}\n", Map.of()), is("${not_a_var}\n")); + assertThat(copy.apply("${var}\n", Map.of("var", "xyz")), is("xyz\n")); + assertThat(copy.apply("#if not\nbody\n#endif\n", Map.of()), is("")); + assertThat(copy.apply("#if blank\nbody\n#endif\n", Map.of("blank", "")), is("")); + assertThat(copy.apply("#if yes\nbody\n#endif\n", Map.of("yes", "true")), is("body\n")); + assertThat(copy.apply("#if yes\ntrue\n#else\nfalse\n#endif\n", Map.of("yes", "*")), is("true\n")); + assertThat(copy.apply("#if blank\ntrue\n#else\nfalse\n#endif\n", Map.of("blank", "")), is("false\n")); + assertThat(copy.apply("#if var\n--> ${var} <--\n#else\n(${var})\n#endif\n", Map.of("var", "foo")), is("--> foo <--\n")); + } + + private Path getZipRoot(Path outFile) throws IOException, URISyntaxException { + assertThat(outFile, isRegularFile()); + + FileSystem fileSystem = FileSystems.newFileSystem(new URI("jar:" + outFile.toUri()), Collections.emptyMap()); + return fileSystem.getPath("/"); + } + + private List randomIpAddresses() throws UnknownHostException { + final int ipCount = randomIntBetween(0, 3); + final List ipAddresses = new ArrayList<>(ipCount); + for (int i = 0; i < ipCount; i++) { + String ip = randomIpAddress(); + ipAddresses.add(ip); + } + return ipAddresses; + } + + private String randomIpAddress() throws UnknownHostException { + return formatIpAddress(randomByteArrayOfLength(4)); + } + + private String formatIpAddress(byte[] addr) throws UnknownHostException { + return NetworkAddress.format(InetAddress.getByAddress(addr)); + } + + private List randomHostNames() { + final int hostCount = randomIntBetween(1, 5); + final List hostNames = new ArrayList<>(hostCount); + for (int i = 0; i < hostCount; i++) { + String host = String.join(".", randomArray(1, 4, String[]::new, () -> randomAlphaOfLengthBetween(3, 8))); + if (i > 0 && randomBoolean()) { + host = "*." + host; + } + hostNames.add(host); + } + return hostNames; + } + + private String randomPassword() { + // We want to assert that this password doesn't end up in any output files, so we need to make sure we + // don't randomly generate a real word. + return randomAlphaOfLength(4) + randomFrom('~', '*', '%', '$', '|') + randomAlphaOfLength(4); + } + + private void verifyCertificationRequest(PKCS10CertificationRequest csr, String certificateName, List hostNames, + List ipAddresses) throws IOException { + // We rebuild the DN from the encoding because BC uses openSSL style toString, but we use LDAP style. + assertThat(new X500Principal(csr.getSubject().getEncoded()).toString(), is("CN=" + certificateName.replaceAll("\\.", ", DC="))); + final Attribute[] extensionAttributes = csr.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest); + assertThat(extensionAttributes, arrayWithSize(1)); + assertThat(extensionAttributes[0].getAttributeValues(), arrayWithSize(1)); + assertThat(extensionAttributes[0].getAttributeValues()[0], instanceOf(DERSequence.class)); + + // We register 1 extension - the subject alternative names + final Extensions extensions = Extensions.getInstance(extensionAttributes[0].getAttributeValues()[0]); + assertThat(extensions, notNullValue()); + final GeneralNames names = GeneralNames.fromExtensions(extensions, Extension.subjectAlternativeName); + assertThat(names.getNames(), arrayWithSize(hostNames.size() + ipAddresses.size())); + for (GeneralName name : names.getNames()) { + assertThat(name.getTagNo(), oneOf(GeneralName.dNSName, GeneralName.iPAddress)); + if (name.getTagNo() == GeneralName.dNSName) { + final String dns = DERIA5String.getInstance(name.getName()).getString(); + assertThat(dns, in(hostNames)); + } else if (name.getTagNo() == GeneralName.iPAddress) { + final String ip = formatIpAddress(DEROctetString.getInstance(name.getName()).getOctets()); + assertThat(ip, in(ipAddresses)); + } + } + } + + private void verifyCertificate(X509Certificate cert, String certificateName, int years, + List hostNames, List ipAddresses) throws CertificateParsingException { + assertThat(cert.getSubjectX500Principal().toString(), is("CN=" + certificateName.replaceAll("\\.", ", DC="))); + final Collection> san = cert.getSubjectAlternativeNames(); + final int expectedSanEntries = hostNames.size() + ipAddresses.size(); + if (expectedSanEntries > 0) { + assertThat(san, hasSize(expectedSanEntries)); + for (List name : san) { + assertThat(name, hasSize(2)); + assertThat(name.get(0), Matchers.instanceOf(Integer.class)); + assertThat(name.get(1), Matchers.instanceOf(String.class)); + final Integer tag = (Integer) name.get(0); + final String value = (String) name.get(1); + assertThat(tag, oneOf(GeneralName.dNSName, GeneralName.iPAddress)); + if (tag.intValue() == GeneralName.dNSName) { + assertThat(value, in(hostNames)); + } else if (tag.intValue() == GeneralName.iPAddress) { + assertThat(value, in(ipAddresses)); + } + } + } else if (san != null) { + assertThat(san, hasSize(0)); + } + + // We don't know exactly when the certificate was generated, but it should have been in the last 10 minutes + long now = System.currentTimeMillis(); + long nowMinus10Minutes = now - TimeUnit.MINUTES.toMillis(10); + assertThat(cert.getNotBefore().getTime(), Matchers.lessThanOrEqualTo(now)); + assertThat(cert.getNotBefore().getTime(), Matchers.greaterThanOrEqualTo(nowMinus10Minutes)); + + final ZonedDateTime expiry = Instant.ofEpochMilli(cert.getNotBefore().getTime()).atZone(ZoneOffset.UTC).plusYears(years); + assertThat(cert.getNotAfter().getTime(), is(expiry.toInstant().toEpochMilli())); + } + + private void verifyChain(X509Certificate... chain) throws GeneralSecurityException { + for (int i = 1; i < chain.length; i++) { + assertThat(chain[i - 1].getIssuerX500Principal(), is(chain[i].getSubjectX500Principal())); + chain[i - 1].verify(chain[i].getPublicKey()); + } + final X509Certificate root = chain[chain.length - 1]; + assertThat(root.getIssuerX500Principal(), is(root.getSubjectX500Principal())); + } + + /** + * Checks that a public + private key are a matching pair. + */ + private void assertMatchingPair(PublicKey publicKey, PrivateKey privateKey) throws GeneralSecurityException { + final byte[] bytes = randomByteArrayOfLength(128); + final Signature rsa = Signature.getInstance("SHA512withRSA"); + + rsa.initSign(privateKey); + rsa.update(bytes); + final byte[] signature = rsa.sign(); + + rsa.initVerify(publicKey); + rsa.update(bytes); + assertTrue("PublicKey and PrivateKey are not a matching pair", rsa.verify(signature)); + } + + private void verifyKibanaDirectory(Path zipRoot, boolean expectCAFile, Iterable readmeShouldContain, + Iterable shouldNotContain) throws IOException { + assertThat(zipRoot.resolve("kibana"), isDirectory()); + if (expectCAFile) { + assertThat(zipRoot.resolve("kibana/elasticsearch-ca.pem"), isRegularFile()); + } else { + assertThat(zipRoot.resolve("kibana/elasticsearch-ca.pem"), not(pathExists())); + } + + final Path kibanaReadmePath = zipRoot.resolve("kibana/README.txt"); + assertThat(kibanaReadmePath, isRegularFile()); + final String kibanaReadme = Files.readString(kibanaReadmePath); + + final Path kibanaYmlPath = zipRoot.resolve("kibana/sample-kibana.yml"); + assertThat(kibanaYmlPath, isRegularFile()); + final String kibanaYml = Files.readString(kibanaYmlPath); + + assertThat(kibanaReadme, containsString(kibanaYmlPath.getFileName().toString())); + assertThat(kibanaReadme, containsString("elasticsearch.hosts")); + assertThat(kibanaReadme, containsString("https://")); + assertThat(kibanaReadme, containsString("elasticsearch-ca.pem")); + readmeShouldContain.forEach(s -> assertThat(kibanaReadme, containsString(s))); + shouldNotContain.forEach(s -> assertThat(kibanaReadme, not(containsString(s)))); + + assertThat(kibanaYml, containsString("elasticsearch.ssl.certificateAuthorities: [ \"config/elasticsearch-ca.pem\" ]")); + assertThat(kibanaYml, containsString("https://")); + shouldNotContain.forEach(s -> assertThat(kibanaYml, not(containsString(s)))); + } + + private PublicKey getPublicKey(PKCS10CertificationRequest pkcs) throws GeneralSecurityException { + return new JcaPKCS10CertificationRequest(pkcs).getPublicKey(); + } + + private int getRSAKeySize(Key key) { + assertThat(key, instanceOf(RSAKey.class)); + final RSAKey rsa = (RSAKey) key; + return rsa.getModulus().bitLength(); + } + + private Tuple readCertificateAndKey(Path pkcs12, + char[] password) throws IOException, GeneralSecurityException { + + final Map entries = CertParsingUtils.readPkcs12KeyPairs(pkcs12, password, alias -> password); + assertThat(entries.entrySet(), Matchers.hasSize(1)); + + Certificate cert = entries.keySet().iterator().next(); + Key key = entries.get(cert); + + assertThat(cert, instanceOf(X509Certificate.class)); + assertThat(key, instanceOf(PrivateKey.class)); + assertMatchingPair(cert.getPublicKey(), (PrivateKey) key); + return new Tuple<>((X509Certificate) cert, (PrivateKey) key); + } + + private X509Certificate readPemCertificate(Path caCertPath) throws CertificateException, IOException { + final Certificate[] certificates = CertParsingUtils.readCertificates(List.of(caCertPath)); + assertThat(certificates, arrayWithSize(1)); + final Certificate cert = certificates[0]; + assertThat(cert, instanceOf(X509Certificate.class)); + return (X509Certificate) cert; + } + + private T readPemObject(Path path, String expectedType, + CheckedFunction factory) throws IOException { + assertThat(path, isRegularFile()); + final PemReader csrReader = new PemReader(Files.newBufferedReader(path)); + final PemObject csrPem = csrReader.readPemObject(); + assertThat(csrPem.getType(), is(expectedType)); + return factory.apply(csrPem.getContent()); + } + + private void writeDummyKeystore(Path path, String type) throws GeneralSecurityException, IOException { + Files.deleteIfExists(path); + KeyStore ks = KeyStore.getInstance(type); + ks.load(null); + if (randomBoolean()) { + final X509Certificate cert = readPemCertificate(getDataPath("ca.crt")); + ks.setCertificateEntry(randomAlphaOfLength(4), cert); + } + try (OutputStream out = Files.newOutputStream(path)) { + ks.store(out, randomAlphaOfLength(8).toCharArray()); + } + } + + /** + * A special version of {@link HttpCertificateCommand} that can resolve input strings back to JIMFS paths + */ + private class PathAwareHttpCertificateCommand extends HttpCertificateCommand { + + final Map paths; + + PathAwareHttpCertificateCommand(Path... configuredPaths) { + paths = Stream.of(configuredPaths).collect(Collectors.toUnmodifiableMap(Path::toString, Function.identity())); + } + + @Override + protected Path resolvePath(String name) { + return Optional.ofNullable(this.paths.get(name)).orElseGet(() -> super.resolvePath(name)); + } + } + +} diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/PemToKeystore.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/PemToKeystore.java new file mode 100644 index 0000000000000..52974071dcfa8 --- /dev/null +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/PemToKeystore.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.cli; + +import org.elasticsearch.cli.SuppressForbidden; +import org.elasticsearch.xpack.core.ssl.CertParsingUtils; +import org.elasticsearch.xpack.core.ssl.PemUtils; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.PrivateKey; +import java.security.cert.Certificate; +import java.util.List; + +@SuppressForbidden(reason = "CLI utility for testing only") +public class PemToKeystore { + + public static void main(String[] args) throws IOException, GeneralSecurityException { + if (args.length != 5) { + System.out.println("Usage: " + PemToKeystore.class.getName() + " "); + return; + } + Path keystorePath = Paths.get(args[0]).toAbsolutePath(); + String keystoreType = args[1]; + Path certPath = Paths.get(args[2]).toAbsolutePath(); + Path keyPath = Paths.get(args[3]).toAbsolutePath(); + char[] password = args[4].toCharArray(); + + final Certificate[] certificates = CertParsingUtils.readCertificates(List.of(certPath)); + if (certificates.length == 0) { + throw new IllegalArgumentException("No certificates found in " + certPath); + } + final PrivateKey key = PemUtils.readPrivateKey(keyPath, () -> password); + + KeyStore keyStore = KeyStore.getInstance(keystoreType); + keyStore.load(null); + keyStore.setKeyEntry("key", key, password, certificates); + try (OutputStream out = Files.newOutputStream(keystorePath)) { + keyStore.store(out, password); + } + } + +} diff --git a/x-pack/plugin/security/cli/src/test/resources/org/elasticsearch/xpack/security/cli/ca.crt b/x-pack/plugin/security/cli/src/test/resources/org/elasticsearch/xpack/security/cli/ca.crt new file mode 100644 index 0000000000000..111cf4d2af582 --- /dev/null +++ b/x-pack/plugin/security/cli/src/test/resources/org/elasticsearch/xpack/security/cli/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSTCCAjGgAwIBAgIUe3y1qDBsjh2w16BBfPQjg5bAgjYwDQYJKoZIhvcNAQEL +BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l +cmF0ZWQgQ0EwHhcNMTkxMjAxMTEzNTUwWhcNMzMwODA5MTEzNTUwWjA0MTIwMAYD +VQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJdB/0UGumX8QsWAnhnadnza +HsE0WMB50j6uHgqNh/QieIw7iQGmhbwG2V+O7263j74+YOUcrjvEuR3el1+cjJIU +SP0Zl9wV2cWdltW3N/GhvU4QVnJS13w146yB3JEQROsD/hdtGP6vBGjzpjIcmKPa +pSOqJEzG113CYX260FQK86o/9kAk07kce4sx8RW+Xda/e2eLF5siIH7/7eju9OiF +RvQC1bABj0UpccuWwJWjIr93v5egTmQFHuX/Tlq44hhCKFa+0xh+LxdiAlbaeUGG +e3sd1I20veMJAOTftGCOx6Psatcw0P2+FGsliQh8MIMwkcBwkxauuUNvWZpAd+UC +AwEAAaNTMFEwHQYDVR0OBBYEFB1nLSbpN2TgSed4DBuhpwvC1CNqMB8GA1UdIwQY +MBaAFB1nLSbpN2TgSed4DBuhpwvC1CNqMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBABPMW7mpIi8VyA2QwwBnUo8a3OdHdospwoHm5GIbxpOHILq/ +LWosNiJL/gFDVldt6nubIz/rkVqHjUf5H9DeMtlKdPCYJVZ1Cu9peEG3qPVhnTf6 +G6qwt7a6/a1/AwWIc+2EaC9t6VgqVN+Dbn4dH6uI0m+ZwfsTAVCCn4tQYf9/QcRw +YHyl/C8nlWP0YwH0NDYug+J+MRewTpU+BYZPswH99HG954ZVylK00ZlQbeD6hj2w +T/P8sHl9U2vkRiGeLDhP2ygI4glXFNU5VJQGqv2GWxo9XTHCkAjGovzU8D1wYdfX +dWXUwN+qtcVdX3Ur/MowjzRumc6uWZjqEm12Vu4= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/cli/src/test/resources/org/elasticsearch/xpack/security/cli/ca.key b/x-pack/plugin/security/cli/src/test/resources/org/elasticsearch/xpack/security/cli/ca.key new file mode 100644 index 0000000000000..08ae669ce9908 --- /dev/null +++ b/x-pack/plugin/security/cli/src/test/resources/org/elasticsearch/xpack/security/cli/ca.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,A4FFEDABB4598123 + +Dx7CtxcKyx2clKjaj/c66anbFznGXnueMkWk4FLh9nYiSQmiTqzaE/ajSzH1Fdxm +1/xBvbfFsUT6bdiGkxPEdGpgBCLnsebgjbQWv3lB0wtquhQkTfd8HoyShInnsPAb +Hu5DSHkjvjIABl1MUyHiaZVskBEj/vYKsMil2GinrpWUxwOqg0HKyrKlzb/I7Gph +Hc1NuzbRobtmQgi/JLVnOeVEIJFt3ekVQnEYQxM5+ZUsP85M6WoPOa7q8soGGLnO +OFZ20kihb20/5xaA9SUadpYWFLSwZQYN2471MXj1uWz7mEJjei81mIjdTOem486P +uIqNY1BBHzljjTq+r2mO/RKer5PRR+pbI+cNkRyESQZqitOHWWWPwXSo3K3RhhDK +gaSOSMBLv2qoYjoswafIISIMvSbcnYzNa+p1T/U62Q95STMV1ch2Ulv+20xo4nVG +3Mkr6oESB7MOcRm9XwPYZAb60MbaaFRUOagoId0AM7efLYTIpT6GXbnS0K6PPf2z +cP/LKDh3pOgzjRIAN18+nZBY7D3r6fejWsBonMPlzgEX2hBPjmOLIBgpgO3/Kg2q ++PuSE+F53fPu3t5mxsEdPtM9yJTxfughvrNCxvaxfmajmZfHaMpta1Q2H9iEhv99 +L4nG1UtMJa9MMBPlTsJnkunvLcGQ8KfUMBHtlHwTwd5bP7vSs5aNGJKrdlOoKk3v +O5DGbpfw/UIw2t+2dnqwc1epkYvMJbFc7S9hYMYwJZ1BC3zHxRvBJTJ6LbCxulWC +SLUy/TZVsHSmRNftUJA48ioDSkA/inMziLmb/aqmWfvojiNmSJy/GkPJKyv1C9IK +zPqE+7noy32Cf9hztu933YBBNWPPz9Xh8WC4AluQY9Lg2H8NjBjFadL0Re0QzdBF +ZXEXT5otDthKqZpD5aRQGoleQcTYlIeJkODSgH+Ti7LvuiNJToG0iREyQRXpcsdj +iVBP3jYe4nurHRnozQIfIF0BzArSRi0aRi9PHnregS4gkLtbyKx27T61bB47TYXk +oIPm6qV7wWmVAklBz4+s3UXsTfyiqckdNxDDO+IyGEnEjpml/XePAy52hmGoQ9uI +BCAst7JC0VuKcnad9u/2BL3WN+tyhNQ1zA3OcuNLiMT3mgAghadQq2hBiO2y2cT7 +b9OZLYwA4zLEzacIvo/0X1XtjiRANgZoUaMluyF5yVgnk9X9MmixBOT0pENV8GYx +WbN0xDZPPigynnQTapnLgzOzci/MQZzuWfh1wvnkiKL7y8TXGtl6AvMtYX85yrUE +Fakpleb8clKbSX2RQYlS/7+muO68e/m+svKaIS6ZupAlmu5rhlDsZAK6if+AEPpz +C4AGsV7R9aDn+TZ+Zt+cxd7s+L8rexoMthblCprv3PwCSZ75Q52iLZeajfMhcI9A +KWEra9QFT8kvIX2yuYFItuc9NL8s15zqNcaeUMyiw6gL28yBd7aQLbMj/zADOQGg +qsb5QypRsxV/neh63I7PIQfIsFOJhM3+h9xAFK48nQzc39S7b2SMYdKPOfmEOFLi +ln/q63+Bobl5EotOxb9gsQ0nWmKpQqFHsMzYQSwcJg+gGeBXy6RwIw== +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/cli/src/test/resources/org/elasticsearch/xpack/security/cli/ca.p12 b/x-pack/plugin/security/cli/src/test/resources/org/elasticsearch/xpack/security/cli/ca.p12 new file mode 100644 index 0000000000000..02114990b417f Binary files /dev/null and b/x-pack/plugin/security/cli/src/test/resources/org/elasticsearch/xpack/security/cli/ca.p12 differ diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 3b24042f47efb..e295dfd696bc3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -232,6 +232,7 @@ import org.elasticsearch.xpack.security.rest.action.user.RestHasPrivilegesAction; import org.elasticsearch.xpack.security.rest.action.user.RestPutUserAction; import org.elasticsearch.xpack.security.rest.action.user.RestSetEnabledAction; +import org.elasticsearch.xpack.security.support.ExtensionComponents; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.elasticsearch.xpack.security.support.SecurityStatusChangeListener; import org.elasticsearch.xpack.security.transport.SecurityHttpSettings; @@ -391,10 +392,12 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste final AnonymousUser anonymousUser = new AnonymousUser(settings); final ReservedRealm reservedRealm = new ReservedRealm(env, settings, nativeUsersStore, anonymousUser, securityIndex.get(), threadPool); + final SecurityExtension.SecurityComponents extensionComponents = new ExtensionComponents(env, client, clusterService, + resourceWatcherService, nativeRoleMappingStore); Map realmFactories = new HashMap<>(InternalRealms.getFactories(threadPool, resourceWatcherService, getSslService(), nativeUsersStore, nativeRoleMappingStore, securityIndex.get())); for (SecurityExtension extension : securityExtensions) { - Map newRealms = extension.getRealms(resourceWatcherService); + Map newRealms = extension.getRealms(extensionComponents); for (Map.Entry entry : newRealms.entrySet()) { if (realmFactories.put(entry.getKey(), entry.getValue()) != null) { throw new IllegalArgumentException("Realm type [" + entry.getKey() + "] is already registered"); @@ -412,7 +415,7 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste final NativePrivilegeStore privilegeStore = new NativePrivilegeStore(settings, client, securityIndex.get()); components.add(privilegeStore); - dlsBitsetCache.set(new DocumentSubsetBitsetCache(settings)); + dlsBitsetCache.set(new DocumentSubsetBitsetCache(settings, threadPool)); final FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(settings); final FileRolesStore fileRolesStore = new FileRolesStore(settings, env, resourceWatcherService, getLicenseState(), xContentRegistry); @@ -420,7 +423,7 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste final ReservedRolesStore reservedRolesStore = new ReservedRolesStore(); List, ActionListener>> rolesProviders = new ArrayList<>(); for (SecurityExtension extension : securityExtensions) { - rolesProviders.addAll(extension.getRolesProviders(settings, resourceWatcherService)); + rolesProviders.addAll(extension.getRolesProviders(extensionComponents)); } final ApiKeyService apiKeyService = new ApiKeyService(settings, Clock.systemUTC(), client, getLicenseState(), securityIndex.get(), @@ -436,7 +439,7 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste getLicenseState().addListener(allRolesStore::invalidateAll); getLicenseState().addListener(new SecurityStatusChangeListener(getLicenseState())); - final AuthenticationFailureHandler failureHandler = createAuthenticationFailureHandler(realms); + final AuthenticationFailureHandler failureHandler = createAuthenticationFailureHandler(realms, extensionComponents); authcService.set(new AuthenticationService(settings, realms, auditTrailService, failureHandler, threadPool, anonymousUser, tokenService, apiKeyService)); components.add(authcService.get()); @@ -496,11 +499,12 @@ private AuthorizationEngine getAuthorizationEngine() { return authorizationEngine; } - private AuthenticationFailureHandler createAuthenticationFailureHandler(final Realms realms) { + private AuthenticationFailureHandler createAuthenticationFailureHandler(final Realms realms, + final SecurityExtension.SecurityComponents components) { AuthenticationFailureHandler failureHandler = null; String extensionName = null; for (SecurityExtension extension : securityExtensions) { - AuthenticationFailureHandler extensionFailureHandler = extension.getAuthenticationFailureHandler(); + AuthenticationFailureHandler extensionFailureHandler = extension.getAuthenticationFailureHandler(components); if (extensionFailureHandler != null && failureHandler != null) { throw new IllegalStateException("Extensions [" + extensionName + "] and [" + extension.toString() + "] " + "both set an authentication failure handler"); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheAction.java index 8acc6631920fc..74ce788322293 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authc.Realms; -import org.elasticsearch.xpack.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; import java.io.IOException; import java.util.List; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 42d582997d827..c479722b05098 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -526,7 +526,7 @@ void decodeToken(String token, ActionListener listener) { listener.onResponse(null); } } - } catch (IOException e) { + } catch (Exception e) { // could happen with a token that is not ours if (logger.isDebugEnabled()) { logger.debug("built in token service unable to decode token", e); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java index 2ca8efd4cd5e5..297cb9f500f3a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java @@ -21,9 +21,9 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.ietf.jgss.GSSException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java index 2f104c98cbdc6..388c12076c76f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java @@ -35,8 +35,8 @@ import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper.UserData; import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java index 4e05d82db82f7..8f3f05ea30d39 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java @@ -44,7 +44,7 @@ import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import java.net.URI; import java.net.URISyntaxException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java index ca6e4e09c2d28..525c79379f3d5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java @@ -32,9 +32,9 @@ import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.security.authc.BytesKey; import org.elasticsearch.xpack.security.authc.TokenService; -import org.elasticsearch.xpack.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java index b9508ecd97815..4f8677364dcf7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java @@ -50,7 +50,7 @@ import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.opensaml.core.criterion.EntityIdCriterion; import org.opensaml.saml.common.xml.SAMLConstants; import org.opensaml.saml.criterion.EntityRoleCriterion; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java index fed4e1fb13ee6..1ac41e1c411c8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapper.java index f62c8521a69ff..edfad1f443465 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapper.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapper.java @@ -20,7 +20,9 @@ import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import java.io.IOException; import java.nio.file.Files; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java index 21f6a1e2a8cda..d6aaf838374dd 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java @@ -15,9 +15,9 @@ import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.RealmConfig; -import org.elasticsearch.xpack.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; /** * A {@link UserRoleMapper} that composes one or more delegate role-mappers. diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index f00c48b9d8288..2bd02e6decb1f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -36,8 +36,8 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; -import org.elasticsearch.xpack.security.authc.support.CachingRealm; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index 4b0e99d7290fd..b61716462637e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -524,9 +524,12 @@ private static boolean checkChangePasswordAction(Authentication authentication) } assert realmType != null; - // ensure the user was authenticated by a realm that we can change a password for. The native realm is an internal realm and - // right now only one can exist in the realm configuration - if this changes we should update this check - return ReservedRealm.TYPE.equals(realmType) || NativeRealmSettings.TYPE.equals(realmType); + // Ensure that the user is not authenticated with an access token or an API key. + // Also ensure that the user was authenticated by a realm that we can change a password for. The native realm is an internal realm + // and right now only one can exist in the realm configuration - if this changes we should update this check + final Authentication.AuthenticationType authType = authentication.getAuthenticationType(); + return (authType.equals(Authentication.AuthenticationType.REALM) + && (ReservedRealm.TYPE.equals(realmType) || NativeRealmSettings.TYPE.equals(realmType))); } static class RBACAuthorizationInfo implements AuthorizationInfo { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index 667b53c1c3328..6ff549cbf957a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -41,6 +42,7 @@ import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; import org.elasticsearch.xpack.core.security.support.CacheIteratorHelper; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; @@ -83,6 +85,7 @@ public class CompositeRolesStore { Setting.intSetting("xpack.security.authz.store.roles.negative_lookup_cache.max_size", 10000, Property.NodeScope); private static final Logger logger = LogManager.getLogger(CompositeRolesStore.class); + private final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); private final FileRolesStore fileRolesStore; private final NativeRolesStore nativeRolesStore; @@ -154,6 +157,7 @@ public void roles(Set roleNames, ActionListener roleActionListener final long invalidationCounter = numInvalidation.get(); roleDescriptors(roleNames, ActionListener.wrap( rolesRetrievalResult -> { + logDeprecatedRoles(rolesRetrievalResult.roleDescriptors); final boolean missingRoles = rolesRetrievalResult.getMissingRoles().isEmpty() == false; if (missingRoles) { logger.debug(() -> new ParameterizedMessage("Could not find roles with names {}", @@ -179,6 +183,17 @@ public void roles(Set roleNames, ActionListener roleActionListener } } + void logDeprecatedRoles(Set roleDescriptors) { + roleDescriptors.stream() + .filter(rd -> Boolean.TRUE.equals(rd.getMetadata().get(MetadataUtils.DEPRECATED_METADATA_KEY))) + .forEach(rd -> { + String reason = Objects.toString( + rd.getMetadata().get(MetadataUtils.DEPRECATED_REASON_METADATA_KEY), "Please check the documentation"); + deprecationLogger.deprecatedAndMaybeLog("deprecated_role-" + rd.getName(), "The role [" + rd.getName() + + "] is deprecated and will be removed in a future version of Elasticsearch. " + reason); + }); + } + public void getRoles(User user, Authentication authentication, ActionListener roleActionListener) { // we need to special case the internal users in this method, if we apply the anonymous roles to every user including these system // user accounts then we run into the chance of a deadlock because then we need to get a role that we may be trying to get as the diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/ExtensionComponents.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/ExtensionComponents.java new file mode 100644 index 0000000000000..4a074540e6190 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/ExtensionComponents.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.support; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.SecurityExtension; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; + +/** + * Immutable implementation of {@link SecurityExtension.SecurityComponents}. + */ +public final class ExtensionComponents implements SecurityExtension.SecurityComponents { + private final Environment environment; + private final Client client; + private final ClusterService clusterService; + private final ResourceWatcherService resourceWatcherService; + private final UserRoleMapper roleMapper; + + public ExtensionComponents(Environment environment, Client client, ClusterService clusterService, + ResourceWatcherService resourceWatcherService, UserRoleMapper roleMapper) { + this.environment = environment; + this.client = client; + this.clusterService = clusterService; + this.resourceWatcherService = resourceWatcherService; + this.roleMapper = roleMapper; + } + + @Override + public Settings settings() { + return environment.settings(); + } + + @Override + public Environment environment() { + return environment; + } + + @Override + public Client client() { + return client; + } + + @Override + public ThreadPool threadPool() { + return client.threadPool(); + } + + @Override + public ResourceWatcherService resourceWatcherService() { + return resourceWatcherService; + } + + @Override + public ClusterService clusterService() { + return clusterService; + } + + @Override + public UserRoleMapper roleMapper() { + return roleMapper; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index d5b513284510f..6dff631adc462 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -89,7 +89,7 @@ public static class DummyExtension implements SecurityExtension { } @Override - public Map getRealms(ResourceWatcherService resourceWatcherService) { + public Map getRealms(SecurityComponents components) { return Collections.singletonMap(realmType, config -> null); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java index d2ed6500036c4..9a750409e9aeb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java @@ -51,7 +51,7 @@ import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectRealm; import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectTestCase; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.After; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index f86f01ec4acc5..e24f38824adc8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -61,7 +61,7 @@ import org.elasticsearch.xpack.security.authc.saml.SamlRealm; import org.elasticsearch.xpack.security.authc.saml.SamlRealmTests; import org.elasticsearch.xpack.security.authc.saml.SamlTestCase; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.After; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java index c3d6c5ae07e0e..88c9fd69d7639 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper.UserData; import org.ietf.jgss.GSSException; import javax.security.auth.login.LoginException; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java index fe8220dad4e6e..aa09db07d20ad 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java @@ -27,7 +27,7 @@ import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.support.Exceptions; import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.After; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java index 8d6869404bbaf..3e4d69a2f7f3f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper.UserData; import org.ietf.jgss.GSSException; import javax.security.auth.login.LoginException; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java index 5e69378bea0fc..61177719679b8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.hamcrest.Matchers; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java index 5b5d45b1d41fc..f9123c231cd9a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.BytesKey; import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.junit.Before; import org.mockito.Mockito; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTestHelper.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTestHelper.java index 9e1414b438d34..e737e189e9395 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTestHelper.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTestHelper.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.xpack.core.security.authc.RealmConfig; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.opensaml.saml.common.xml.SAMLConstants; import org.opensaml.saml.saml2.metadata.EntityDescriptor; import org.opensaml.saml.saml2.metadata.IDPSSODescriptor; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java index 9b86075107876..020b58422f13f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java @@ -35,7 +35,7 @@ import org.elasticsearch.xpack.core.ssl.TestsSSLService; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.hamcrest.Matchers; import org.junit.Before; import org.mockito.Mockito; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java index 51ea82fc0e431..07d75079a2588 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java @@ -7,6 +7,7 @@ import com.unboundid.ldap.sdk.DN; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue; import java.util.Locale; @@ -73,4 +74,4 @@ public void testParsingMalformedInput() { private void assertPredicate(Predicate predicate, Object value, boolean expected) { assertThat("Predicate [" + predicate + "] match [" + value + "]", predicate.test(new FieldValue(value)), equalTo(expected)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java index 6f78435a47c14..3b67fb1954c2e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AllExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AnyExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.hamcrest.Matchers; import org.junit.Before; import org.mockito.Mockito; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 787d4069c1e1e..a7af38a05379a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -34,7 +34,7 @@ import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; -import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.hamcrest.Matchers; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index fd84afea365be..3066ee2732a5a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -103,6 +103,7 @@ public void testSameUserPermission() { final String action = changePasswordRequest ? ChangePasswordAction.NAME : AuthenticateAction.NAME; final Authentication authentication = mock(Authentication.class); final Authentication.RealmRef authenticatedBy = mock(Authentication.RealmRef.class); + when(authentication.getAuthenticationType()).thenReturn(Authentication.AuthenticationType.REALM); when(authentication.getUser()).thenReturn(user); when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); when(authenticatedBy.getType()) @@ -126,9 +127,10 @@ public void testSameUserPermissionDoesNotAllowNonMatchingUsername() { final Authentication.RealmRef authenticatedBy = mock(Authentication.RealmRef.class); when(authentication.getUser()).thenReturn(user); when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); - when(authenticatedBy.getType()) - .thenReturn(changePasswordRequest ? randomFrom(ReservedRealm.TYPE, NativeRealmSettings.TYPE) : - randomAlphaOfLengthBetween(4, 12)); + final String authenticationType = changePasswordRequest ? randomFrom(ReservedRealm.TYPE, NativeRealmSettings.TYPE) : + randomAlphaOfLengthBetween(4, 12); + when(authenticatedBy.getType()).thenReturn(authenticationType); + when(authentication.getAuthenticationType()).thenReturn(Authentication.AuthenticationType.REALM); assertThat(request, instanceOf(UserRequest.class)); assertFalse(engine.checkSameUserPermissions(action, request, authentication)); @@ -181,6 +183,7 @@ public void testSameUserPermissionRunAsChecksAuthenticatedBy() { final Authentication authentication = mock(Authentication.class); final Authentication.RealmRef authenticatedBy = mock(Authentication.RealmRef.class); final Authentication.RealmRef lookedUpBy = mock(Authentication.RealmRef.class); + when(authentication.getAuthenticationType()).thenReturn(Authentication.AuthenticationType.REALM); when(authentication.getUser()).thenReturn(user); when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); when(authentication.getLookedUpBy()).thenReturn(lookedUpBy); @@ -199,6 +202,7 @@ public void testSameUserPermissionDoesNotAllowChangePasswordForOtherRealms() { final String action = ChangePasswordAction.NAME; final Authentication authentication = mock(Authentication.class); final Authentication.RealmRef authenticatedBy = mock(Authentication.RealmRef.class); + when(authentication.getAuthenticationType()).thenReturn(Authentication.AuthenticationType.REALM); when(authentication.getUser()).thenReturn(user); when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); when(authenticatedBy.getType()).thenReturn(randomFrom(LdapRealmSettings.LDAP_TYPE, FileRealmSettings.TYPE, @@ -210,6 +214,47 @@ public void testSameUserPermissionDoesNotAllowChangePasswordForOtherRealms() { verify(authenticatedBy).getType(); verify(authentication).getAuthenticatedBy(); verify(authentication, times(2)).getUser(); + verify(authentication).getAuthenticationType(); + verifyNoMoreInteractions(authenticatedBy, authentication); + } + + public void testSameUserPermissionDoesNotAllowChangePasswordForApiKey() { + final User user = new User("joe"); + final ChangePasswordRequest request = new ChangePasswordRequestBuilder(mock(Client.class)).username(user.principal()).request(); + final String action = ChangePasswordAction.NAME; + final Authentication authentication = mock(Authentication.class); + final Authentication.RealmRef authenticatedBy = mock(Authentication.RealmRef.class); + when(authentication.getUser()).thenReturn(user); + when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); + when(authentication.getAuthenticationType()).thenReturn(Authentication.AuthenticationType.API_KEY); + when(authenticatedBy.getType()).thenReturn(ApiKeyService.API_KEY_REALM_TYPE); + + assertThat(request, instanceOf(UserRequest.class)); + assertFalse(engine.checkSameUserPermissions(action, request, authentication)); + verify(authenticatedBy).getType(); + verify(authentication).getAuthenticatedBy(); + verify(authentication, times(2)).getUser(); + verify(authentication).getAuthenticationType(); + verifyNoMoreInteractions(authenticatedBy, authentication); + } + + public void testSameUserPermissionDoesNotAllowChangePasswordForAccessToken() { + final User user = new User("joe"); + final ChangePasswordRequest request = new ChangePasswordRequestBuilder(mock(Client.class)).username(user.principal()).request(); + final String action = ChangePasswordAction.NAME; + final Authentication authentication = mock(Authentication.class); + final Authentication.RealmRef authenticatedBy = mock(Authentication.RealmRef.class); + when(authentication.getUser()).thenReturn(user); + when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); + when(authentication.getAuthenticationType()).thenReturn(Authentication.AuthenticationType.TOKEN); + when(authenticatedBy.getType()).thenReturn(NativeRealmSettings.TYPE); + + assertThat(request, instanceOf(UserRequest.class)); + assertFalse(engine.checkSameUserPermissions(action, request, authentication)); + verify(authenticatedBy).getType(); + verify(authentication).getAuthenticatedBy(); + verify(authentication, times(2)).getUser(); + verify(authentication).getAuthenticationType(); verifyNoMoreInteractions(authenticatedBy, authentication); } @@ -221,6 +266,7 @@ public void testSameUserPermissionDoesNotAllowChangePasswordForLookedUpByOtherRe final Authentication authentication = mock(Authentication.class); final Authentication.RealmRef authenticatedBy = mock(Authentication.RealmRef.class); final Authentication.RealmRef lookedUpBy = mock(Authentication.RealmRef.class); + when(authentication.getAuthenticationType()).thenReturn(Authentication.AuthenticationType.REALM); when(authentication.getUser()).thenReturn(user); when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); when(authentication.getLookedUpBy()).thenReturn(lookedUpBy); @@ -233,6 +279,7 @@ public void testSameUserPermissionDoesNotAllowChangePasswordForLookedUpByOtherRe verify(authentication).getLookedUpBy(); verify(authentication, times(2)).getUser(); verify(lookedUpBy).getType(); + verify(authentication).getAuthenticationType(); verifyNoMoreInteractions(authentication, lookedUpBy, authenticatedBy); } @@ -544,6 +591,117 @@ public void testCheckingIndexPermissionsDefinedOnDifferentPatterns() throws Exce )); } + public void testCheckRestrictedIndexPatternPermission() throws Exception { + User user = new User(randomAlphaOfLengthBetween(4, 12)); + Authentication authentication = mock(Authentication.class); + when(authentication.getUser()).thenReturn(user); + final String patternPrefix = RestrictedIndicesNames.ASYNC_SEARCH_PREFIX.substring(0, + randomIntBetween(2, RestrictedIndicesNames.ASYNC_SEARCH_PREFIX.length() - 2)); + Role role = Role.builder("role") + .add(FieldPermissions.DEFAULT, null, IndexPrivilege.INDEX, false, patternPrefix + "*") + .build(); + RBACAuthorizationInfo authzInfo = new RBACAuthorizationInfo(role, null); + + String prePatternPrefix = patternPrefix.substring(0, randomIntBetween(1, patternPrefix.length() - 1)) + "*"; + HasPrivilegesResponse response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(prePatternPrefix) + .allowRestrictedIndices(randomBoolean()) + .privileges("index") + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(1)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(prePatternPrefix) + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", false).map()).build())); + + String matchesPatternPrefix = RestrictedIndicesNames.ASYNC_SEARCH_PREFIX.substring(0, patternPrefix.length() + 1); + response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(matchesPatternPrefix + "*") + .allowRestrictedIndices(false) + .privileges("index") + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(true)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(1)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(matchesPatternPrefix + "*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).map()).build())); + response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(matchesPatternPrefix + "*") + .allowRestrictedIndices(true) + .privileges("index") + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(1)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(matchesPatternPrefix + "*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", false).map()).build())); + response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(matchesPatternPrefix) + .allowRestrictedIndices(randomBoolean()) + .privileges("index") + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(true)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(1)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(matchesPatternPrefix) + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).map()).build())); + + final String restrictedIndexMatchingWildcard = RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2); + response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(restrictedIndexMatchingWildcard + "*") + .allowRestrictedIndices(true) + .privileges("index") + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(1)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(restrictedIndexMatchingWildcard + "*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", false).map()).build())); + response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(restrictedIndexMatchingWildcard + "*") + .allowRestrictedIndices(false) + .privileges("index") + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(1)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(restrictedIndexMatchingWildcard + "*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", false).map()).build())); + response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(restrictedIndexMatchingWildcard) + .allowRestrictedIndices(randomBoolean()) + .privileges("index") + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(1)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(restrictedIndexMatchingWildcard) + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", false).map()).build())); + + role = Role.builder("role") + .add(FieldPermissions.DEFAULT, null, IndexPrivilege.INDEX, true, patternPrefix + "*") + .build(); + authzInfo = new RBACAuthorizationInfo(role, null); + response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(matchesPatternPrefix + "*") + .allowRestrictedIndices(randomBoolean()) + .privileges("index") + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(true)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(1)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(matchesPatternPrefix + "*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).map()).build())); + } + public void testCheckExplicitRestrictedIndexPermissions() throws Exception { User user = new User(randomAlphaOfLengthBetween(4, 12)); Authentication authentication = mock(Authentication.class); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java index 0c20a7c20d09c..66161ee27ec19 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.authz; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -19,6 +20,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TestMatchers; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; @@ -27,6 +29,7 @@ import org.elasticsearch.xpack.core.security.support.MetadataUtils; import org.hamcrest.Matchers; +import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.LinkedHashSet; @@ -296,4 +299,30 @@ public void testParseIgnoresTransientMetadata() throws Exception { assertEquals(true, parsed.getTransientMetadata().get("enabled")); } + public void testParseIndicesPrivilegesSucceedsWhenExceptFieldsIsSubsetOfGrantedFields() throws IOException { + final boolean grantAll = randomBoolean(); + final String grant = grantAll ? "\"*\"" : "\"f1\",\"f2\""; + final String except = grantAll ? "\"_fx\",\"f8\"" : "\"f1\""; + + final String json = "{ \"indices\": [{\"names\": [\"idx1\",\"idx2\"], \"privileges\": [\"p1\", \"p2\"], \"field_security\" : { " + + "\"grant\" : [" + grant + "], \"except\" : [" + except + "] } }] }"; + final RoleDescriptor rd = RoleDescriptor.parse("test", + new BytesArray(json), false, XContentType.JSON); + assertEquals("test", rd.getName()); + assertEquals(1, rd.getIndicesPrivileges().length); + assertArrayEquals(new String[]{"idx1", "idx2"}, rd.getIndicesPrivileges()[0].getIndices()); + assertArrayEquals((grantAll) ? new String[]{"*"} : new String[]{"f1", "f2"}, rd.getIndicesPrivileges()[0].getGrantedFields()); + assertArrayEquals((grantAll) ? new String[]{"_fx", "f8"} : new String[]{"f1"}, rd.getIndicesPrivileges()[0].getDeniedFields()); + } + + public void testParseIndicesPrivilegesFailsWhenExceptFieldsAreNotSubsetOfGrantedFields() { + final String json = "{ \"indices\": [{\"names\": [\"idx1\",\"idx2\"], \"privileges\": [\"p1\", \"p2\"], \"field_security\" : { " + + "\"grant\" : [\"f1\",\"f2\"], \"except\" : [\"f3\"] } }] }"; + final ElasticsearchParseException epe = expectThrows(ElasticsearchParseException.class, () -> RoleDescriptor.parse("test", + new BytesArray(json), false, XContentType.JSON)); + assertThat(epe, TestMatchers.throwableWithMessage(containsString("must be a subset of the granted fields "))); + assertThat(epe, TestMatchers.throwableWithMessage(containsString("f1"))); + assertThat(epe, TestMatchers.throwableWithMessage(containsString("f2"))); + assertThat(epe, TestMatchers.throwableWithMessage(containsString("f3"))); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java index be73972f3a1e3..ee10a29083ad3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java @@ -322,6 +322,32 @@ public void testSecurityIndicesPermissions() { assertThat(authzMap.get(RestrictedIndicesNames.SECURITY_MAIN_ALIAS).isGranted(), is(true)); } + public void testAsyncSearchIndicesPermissions() { + final Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + final String asyncSearchIndex = RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2); + final MetaData metaData = new MetaData.Builder() + .put(new IndexMetaData.Builder(asyncSearchIndex) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(), true) + .build(); + FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + SortedMap lookup = metaData.getAliasAndIndexLookup(); + + // allow_restricted_indices: false + IndicesPermission.Group group = new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, false, "*"); + Map authzMap = new IndicesPermission(group).authorize(SearchAction.NAME, + Sets.newHashSet(asyncSearchIndex), lookup, fieldPermissionsCache); + assertThat(authzMap.get(asyncSearchIndex).isGranted(), is(false)); + + // allow_restricted_indices: true + group = new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, true, "*"); + authzMap = new IndicesPermission(group).authorize(SearchAction.NAME, + Sets.newHashSet(asyncSearchIndex), lookup, fieldPermissionsCache); + assertThat(authzMap.get(asyncSearchIndex).isGranted(), is(true)); + } + private static FieldPermissionsDefinition fieldPermissionDef(String[] granted, String[] denied) { return new FieldPermissionsDefinition(granted, denied); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 7da88f0231b8c..4b453c77691b2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -28,6 +29,7 @@ import org.elasticsearch.license.TestUtils.UpdatableLicenseState; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequest.Empty; import org.elasticsearch.xpack.core.XPackSettings; @@ -52,6 +54,7 @@ import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; @@ -63,10 +66,14 @@ import java.io.IOException; import java.time.Instant; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -83,6 +90,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; @@ -142,21 +150,14 @@ public void testRolesWhenDlsFlsUnlicensed() throws IOException { }, null); FileRolesStore fileRolesStore = mock(FileRolesStore.class); doCallRealMethod().when(fileRolesStore).accept(any(Set.class), any(ActionListener.class)); - ReservedRolesStore reservedRolesStore = mock(ReservedRolesStore.class); - doCallRealMethod().when(reservedRolesStore).accept(any(Set.class), any(ActionListener.class)); - NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class); - doCallRealMethod().when(nativeRolesStore).accept(any(Set.class), any(ActionListener.class)); when(fileRolesStore.roleDescriptors(Collections.singleton("fls"))).thenReturn(Collections.singleton(flsRole)); when(fileRolesStore.roleDescriptors(Collections.singleton("dls"))).thenReturn(Collections.singleton(dlsRole)); when(fileRolesStore.roleDescriptors(Collections.singleton("fls_dls"))).thenReturn(Collections.singleton(flsDlsRole)); when(fileRolesStore.roleDescriptors(Collections.singleton("no_fls_dls"))).thenReturn(Collections.singleton(noFlsDlsRole)); final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); - CompositeRolesStore compositeRolesStore = new CompositeRolesStore(Settings.EMPTY, fileRolesStore, nativeRolesStore, - reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), - new ThreadContext(Settings.EMPTY), licenseState, cache, mock(ApiKeyService.class), documentSubsetBitsetCache, - rds -> effectiveRoleDescriptors.set(rds)); + CompositeRolesStore compositeRolesStore = buildCompositeRolesStore(Settings.EMPTY, fileRolesStore, null, + null, null, licenseState, null, null, rds -> effectiveRoleDescriptors.set(rds)); PlainActionFuture roleFuture = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton("fls"), roleFuture); @@ -219,20 +220,13 @@ public void testRolesWhenDlsFlsLicensed() throws IOException { }, null); FileRolesStore fileRolesStore = mock(FileRolesStore.class); doCallRealMethod().when(fileRolesStore).accept(any(Set.class), any(ActionListener.class)); - ReservedRolesStore reservedRolesStore = mock(ReservedRolesStore.class); - doCallRealMethod().when(reservedRolesStore).accept(any(Set.class), any(ActionListener.class)); - NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class); - doCallRealMethod().when(nativeRolesStore).accept(any(Set.class), any(ActionListener.class)); when(fileRolesStore.roleDescriptors(Collections.singleton("fls"))).thenReturn(Collections.singleton(flsRole)); when(fileRolesStore.roleDescriptors(Collections.singleton("dls"))).thenReturn(Collections.singleton(dlsRole)); when(fileRolesStore.roleDescriptors(Collections.singleton("fls_dls"))).thenReturn(Collections.singleton(flsDlsRole)); when(fileRolesStore.roleDescriptors(Collections.singleton("no_fls_dls"))).thenReturn(Collections.singleton(noFlsDlsRole)); final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); - CompositeRolesStore compositeRolesStore = new CompositeRolesStore(Settings.EMPTY, fileRolesStore, nativeRolesStore, - reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), - new ThreadContext(Settings.EMPTY), licenseState, cache, mock(ApiKeyService.class), documentSubsetBitsetCache, - rds -> effectiveRoleDescriptors.set(rds)); + CompositeRolesStore compositeRolesStore = buildCompositeRolesStore(Settings.EMPTY, fileRolesStore, null, + null, null, licenseState, null, null, rds -> effectiveRoleDescriptors.set(rds)); PlainActionFuture roleFuture = new PlainActionFuture<>(); compositeRolesStore.roles(Collections.singleton("fls"), roleFuture); @@ -265,6 +259,7 @@ public void testNegativeLookupsAreCached() { final NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class); doCallRealMethod().when(nativeRolesStore).accept(any(Set.class), any(ActionListener.class)); when(fileRolesStore.roleDescriptors(anySetOf(String.class))).thenReturn(Collections.emptySet()); + doAnswer((invocationOnMock) -> { ActionListener callback = (ActionListener) invocationOnMock.getArguments()[1]; callback.onResponse(RoleRetrievalResult.success(Collections.emptySet())); @@ -280,12 +275,9 @@ public void testNegativeLookupsAreCached() { }).when(nativePrivilegeStore).getPrivileges(isA(Set.class), isA(Set.class), any(ActionListener.class)); final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); - final CompositeRolesStore compositeRolesStore = - new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, - nativePrivilegeStore, Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), - new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), - documentSubsetBitsetCache, rds -> effectiveRoleDescriptors.set(rds)); + final CompositeRolesStore compositeRolesStore = buildCompositeRolesStore(SECURITY_ENABLED_SETTINGS, + fileRolesStore, nativeRolesStore, reservedRolesStore, nativePrivilegeStore, null, null, null, + rds -> effectiveRoleDescriptors.set(rds)); verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor final String roleName = randomAlphaOfLengthBetween(1, 10); @@ -321,7 +313,7 @@ public void testNegativeLookupsAreCached() { if (getSuperuserRole && numberOfTimesToCall > 0) { // the superuser role was requested so we get the role descriptors again verify(reservedRolesStore, times(2)).accept(anySetOf(String.class), any(ActionListener.class)); - verify(nativePrivilegeStore).getPrivileges(isA(Set.class),isA(Set.class), any(ActionListener.class)); + verify(nativePrivilegeStore).getPrivileges(isA(Set.class), isA(Set.class), any(ActionListener.class)); } verifyNoMoreInteractions(fileRolesStore, reservedRolesStore, nativeRolesStore, nativePrivilegeStore); } @@ -343,7 +335,7 @@ public void testNegativeLookupsCacheDisabled() { .put("xpack.security.authz.store.roles.negative_lookup_cache.max_size", 0) .build(); final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache documentSubsetBitsetCache = buildBitsetCache(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(settings), new XPackLicenseState(settings), cache, mock(ApiKeyService.class), documentSubsetBitsetCache, @@ -381,7 +373,7 @@ public void testNegativeLookupsAreNotCachedWithFailures() { final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache documentSubsetBitsetCache = buildBitsetCache(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), @@ -421,6 +413,7 @@ public void testNegativeLookupsAreNotCachedWithFailures() { verifyNoMoreInteractions(fileRolesStore, reservedRolesStore, nativeRolesStore); } + public void testCustomRolesProviders() { final FileRolesStore fileRolesStore = mock(FileRolesStore.class); doCallRealMethod().when(fileRolesStore).accept(any(Set.class), any(ActionListener.class)); @@ -467,7 +460,7 @@ public void testCustomRolesProviders() { })); final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache documentSubsetBitsetCache = buildBitsetCache(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Arrays.asList(inMemoryProvider1, inMemoryProvider2), @@ -696,7 +689,7 @@ public void testCustomRolesProviderFailures() throws Exception { (roles, listener) -> listener.onFailure(new Exception("fake failure")); final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache documentSubsetBitsetCache = buildBitsetCache(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Arrays.asList(inMemoryProvider1, failingProvider), @@ -744,7 +737,7 @@ public void testCustomRolesProvidersLicensing() { // these licenses don't allow custom role providers xPackLicenseState.update(randomFrom(OperationMode.BASIC, OperationMode.GOLD, OperationMode.STANDARD), true, null); final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache documentSubsetBitsetCache = buildBitsetCache(); CompositeRolesStore compositeRolesStore = new CompositeRolesStore( Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState, cache, @@ -808,7 +801,7 @@ public void testCacheClearOnIndexHealthChange() { doCallRealMethod().when(reservedRolesStore).accept(any(Set.class), any(ActionListener.class)); NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class); doCallRealMethod().when(nativeRolesStore).accept(any(Set.class), any(ActionListener.class)); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache documentSubsetBitsetCache = buildBitsetCache(); CompositeRolesStore compositeRolesStore = new CompositeRolesStore( Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(Settings.EMPTY), @@ -862,7 +855,7 @@ public void testCacheClearOnIndexOutOfDateChange() { doCallRealMethod().when(reservedRolesStore).accept(any(Set.class), any(ActionListener.class)); NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class); doCallRealMethod().when(nativeRolesStore).accept(any(Set.class), any(ActionListener.class)); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache documentSubsetBitsetCache = buildBitsetCache(); CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), @@ -894,12 +887,9 @@ public void testDefaultRoleUserWithoutRoles() { }).when(nativeRolesStore).getRoleDescriptors(isA(Set.class), any(ActionListener.class)); final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); - final CompositeRolesStore compositeRolesStore = - new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, - mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), - new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), documentSubsetBitsetCache, - rds -> {}); + final CompositeRolesStore compositeRolesStore = buildCompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, + nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class), null, mock(ApiKeyService.class), + null, null); verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor PlainActionFuture rolesFuture = new PlainActionFuture<>(); @@ -935,11 +925,8 @@ public void testAnonymousUserEnabledRoleAdded() { }).when(nativeRolesStore).getRoleDescriptors(isA(Set.class), any(ActionListener.class)); final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); - final CompositeRolesStore compositeRolesStore = - new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore, - mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(settings), - new XPackLicenseState(settings), cache, mock(ApiKeyService.class), documentSubsetBitsetCache, rds -> {}); + final CompositeRolesStore compositeRolesStore = buildCompositeRolesStore(settings, fileRolesStore, nativeRolesStore, + reservedRolesStore, mock(NativePrivilegeStore.class), null, mock(ApiKeyService.class), null, null); verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor PlainActionFuture rolesFuture = new PlainActionFuture<>(); @@ -963,7 +950,7 @@ public void testDoesNotUseRolesStoreForXPackUser() { }).when(nativeRolesStore).getRoleDescriptors(isA(Set.class), any(ActionListener.class)); final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache documentSubsetBitsetCache = buildBitsetCache(); final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, @@ -994,7 +981,7 @@ public void testGetRolesForSystemUserThrowsException() { }).when(nativeRolesStore).getRoleDescriptors(isA(Set.class), any(ActionListener.class)); final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache documentSubsetBitsetCache = buildBitsetCache(); final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, @@ -1030,7 +1017,7 @@ public void testApiKeyAuthUsesApiKeyService() throws IOException { return Void.TYPE; }).when(nativePrivStore).getPrivileges(any(Collection.class), any(Collection.class), any(ActionListener.class)); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache documentSubsetBitsetCache = buildBitsetCache(); final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, @@ -1076,7 +1063,7 @@ public void testApiKeyAuthUsesApiKeyServiceWithScopedRole() throws IOException { return Void.TYPE; }).when(nativePrivStore).getPrivileges(any(Collection.class), any(Collection.class), any(ActionListener.class)); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache documentSubsetBitsetCache = buildBitsetCache(); final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, @@ -1117,13 +1104,11 @@ public void testUsageStats() { }).when(nativeRolesStore).usageStats(any(ActionListener.class)); final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); - final DocumentSubsetBitsetCache documentSubsetBitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); + final DocumentSubsetBitsetCache documentSubsetBitsetCache = buildBitsetCache(); - final CompositeRolesStore compositeRolesStore = - new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, - mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), - new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), documentSubsetBitsetCache, rds -> { - }); + final CompositeRolesStore compositeRolesStore = buildCompositeRolesStore( + SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, null, null, mock(ApiKeyService.class), + documentSubsetBitsetCache, null); PlainActionFuture> usageStatsListener = new PlainActionFuture<>(); compositeRolesStore.usageStats(usageStatsListener); @@ -1133,6 +1118,111 @@ public void testUsageStats() { assertThat(usageStats.get("dls"), is(Map.of("bit_set_cache", documentSubsetBitsetCache.usageStats()))); } + public void testLoggingOfDeprecatedRoles() { + List descriptors = new ArrayList<>(); + Function, RoleDescriptor> newRole = metadata -> new RoleDescriptor( + randomAlphaOfLengthBetween(4, 9), generateRandomStringArray(5, 5, false, true), + null, null, null, null, metadata, null); + + RoleDescriptor deprecated1 = newRole.apply(MetadataUtils.getDeprecatedReservedMetadata("some reason")); + RoleDescriptor deprecated2 = newRole.apply(MetadataUtils.getDeprecatedReservedMetadata("a different reason")); + + // Can't use getDeprecatedReservedMetadata because `Map.of` doesn't accept null values, + // so we clone metadata with a real value and then remove that key + final Map nullReasonMetadata = new HashMap<>(deprecated2.getMetadata()); + nullReasonMetadata.remove(MetadataUtils.DEPRECATED_REASON_METADATA_KEY); + assertThat(nullReasonMetadata.keySet(), hasSize(deprecated2.getMetadata().size() -1)); + RoleDescriptor deprecated3 = newRole.apply(nullReasonMetadata); + + descriptors.add(deprecated1); + descriptors.add(deprecated2); + descriptors.add(deprecated3); + + for (int i = randomIntBetween(2, 10); i > 0; i--) { + // the non-deprecated metadata is randomly one of: + // {}, {_deprecated:null}, {_deprecated:false}, + // {_reserved:true}, {_reserved:true,_deprecated:null}, {_reserved:true,_deprecated:false} + Map metadata = randomBoolean() ? Map.of() : MetadataUtils.DEFAULT_RESERVED_METADATA; + if (randomBoolean()) { + metadata = new HashMap<>(metadata); + metadata.put(MetadataUtils.DEPRECATED_METADATA_KEY, randomBoolean() ? null : false); + } + descriptors.add(newRole.apply(metadata)); + } + Collections.shuffle(descriptors, random()); + + final CompositeRolesStore compositeRolesStore = + buildCompositeRolesStore(SECURITY_ENABLED_SETTINGS, null, null, null, null, null, null, null, null); + + // Use a LHS so that the random-shufle-order of the list is preserved + compositeRolesStore.logDeprecatedRoles(new LinkedHashSet<>(descriptors)); + + assertWarnings( + "The role [" + deprecated1.getName() + "] is deprecated and will be removed in a future version of Elasticsearch." + + " some reason", + "The role [" + deprecated2.getName() + "] is deprecated and will be removed in a future version of Elasticsearch." + + " a different reason", + "The role [" + deprecated3.getName() + "] is deprecated and will be removed in a future version of Elasticsearch." + + " Please check the documentation" + ); + } + + private CompositeRolesStore buildCompositeRolesStore(Settings settings, + @Nullable FileRolesStore fileRolesStore, + @Nullable NativeRolesStore nativeRolesStore, + @Nullable ReservedRolesStore reservedRolesStore, + @Nullable NativePrivilegeStore privilegeStore, + @Nullable XPackLicenseState licenseState, + @Nullable ApiKeyService apiKeyService, + @Nullable DocumentSubsetBitsetCache documentSubsetBitsetCache, + @Nullable Consumer> roleConsumer) { + if (fileRolesStore == null) { + fileRolesStore = mock(FileRolesStore.class); + doCallRealMethod().when(fileRolesStore).accept(any(Set.class), any(ActionListener.class)); + when(fileRolesStore.roleDescriptors(anySetOf(String.class))).thenReturn(Collections.emptySet()); + } + if (nativeRolesStore == null) { + nativeRolesStore = mock(NativeRolesStore.class); + doCallRealMethod().when(nativeRolesStore).accept(any(Set.class), any(ActionListener.class)); + doAnswer((invocationOnMock) -> { + ActionListener callback = (ActionListener) invocationOnMock.getArguments()[1]; + callback.onResponse(RoleRetrievalResult.failure(new RuntimeException("intentionally failed!"))); + return null; + }).when(nativeRolesStore).getRoleDescriptors(isA(Set.class), any(ActionListener.class)); + } + if (reservedRolesStore == null) { + reservedRolesStore = mock(ReservedRolesStore.class); + doCallRealMethod().when(reservedRolesStore).accept(any(Set.class), any(ActionListener.class)); + } + if (privilegeStore == null) { + privilegeStore = mock(NativePrivilegeStore.class); + doAnswer((invocationOnMock) -> { + ActionListener> callback = null; + callback = (ActionListener>) invocationOnMock.getArguments()[2]; + callback.onResponse(Collections.emptyList()); + return null; + }).when(privilegeStore).getPrivileges(isA(Set.class), isA(Set.class), any(ActionListener.class)); + } + if (licenseState == null) { + licenseState = new XPackLicenseState(settings); + } + if (apiKeyService == null) { + apiKeyService = mock(ApiKeyService.class); + } + if (documentSubsetBitsetCache == null) { + documentSubsetBitsetCache = buildBitsetCache(); + } + if (roleConsumer == null) { + roleConsumer = rds -> { }; + } + return new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore, privilegeStore, + Collections.emptyList(), new ThreadContext(settings), licenseState, cache, apiKeyService, documentSubsetBitsetCache, + roleConsumer); + } + + private DocumentSubsetBitsetCache buildBitsetCache() { + return new DocumentSubsetBitsetCache(Settings.EMPTY, mock(ThreadPool.class)); + } private static class InMemoryRolesProvider implements BiConsumer, ActionListener> { private final Function, RoleRetrievalResult> roleDescriptorsFunc; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java index c8b20e1b4604c..6c7fb2abdabf6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java @@ -34,6 +34,7 @@ public void testXPackUserCannotAccessRestrictedIndices() { for (String index : RestrictedIndicesNames.RESTRICTED_NAMES) { assertThat(predicate.test(index), Matchers.is(false)); } + assertThat(predicate.test(RestrictedIndicesNames.ASYNC_SEARCH_PREFIX + randomAlphaOfLengthBetween(0, 2)), Matchers.is(false)); } public void testXPackUserCanReadAuditTrail() { diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java index 7340a1ab9332e..abd2026d437af 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java @@ -41,6 +41,7 @@ import java.util.Map; import java.util.TreeMap; import java.util.function.Function; +import java.util.stream.Collectors; import static java.util.Arrays.asList; import static java.util.Collections.singletonMap; @@ -650,7 +651,8 @@ public void assertLogs() throws Exception { * SQL drops them from the interface. So we might have access to them, but we * don't show them. */ - indices.removeAll(RestrictedIndicesNames.RESTRICTED_NAMES); + indices = indices.stream().filter( + idx -> false == RestrictedIndicesNames.isRestricted(idx)).collect(Collectors.toList()); } } // Use a sorted list for indices for consistent error reporting diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index 3215c6b35efdf..d4cb83485d1e1 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -350,7 +350,7 @@ public void testUseColumnarForTranslateRequest() throws IOException { expectBadRequest(() -> { client().performRequest(request); return Collections.emptyMap(); - }, containsString("unknown field [columnar], parser not found")); + }, containsString("unknown field [columnar]")); } public static void expectBadRequest(CheckedSupplier, Exception> code, Matcher errorMessageMatcher) { diff --git a/x-pack/plugin/sql/qa/src/main/resources/conditionals.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/conditionals.csv-spec index 9f424da2710e3..e6453ad1420e1 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/conditionals.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/conditionals.csv-spec @@ -441,7 +441,7 @@ SELECT ISNULL(birth_date, INTERVAL '23:45' HOUR TO MINUTES + {d '2019-09-17'}) A c:ts | salary:i | birth_date:ts | hire_date:ts ------------------------+-----------------+------------------------+------------------------ 1956-12-13T00:00:00.000Z|74999 |1956-12-13T00:00:00.000Z|1985-11-20T00:00:00.000Z -2019-09-17T00:00:00.000Z|74970 |null |1989-09-02T00:00:00.000Z +2019-09-17T23:45:00.000Z|74970 |null |1989-09-02T00:00:00.000Z 1957-05-23T00:00:00.000Z|74572 |1957-05-23T00:00:00.000Z|1989-02-10T00:00:00.000Z 1962-07-10T00:00:00.000Z|73851 |1962-07-10T00:00:00.000Z|1989-07-07T00:00:00.000Z 1953-01-23T00:00:00.000Z|73717 |1953-01-23T00:00:00.000Z|1999-04-30T00:00:00.000Z diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java index c437b704035d0..31377efc2823d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar; import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Expressions; import org.elasticsearch.xpack.ql.expression.Nullability; import org.elasticsearch.xpack.ql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.ql.expression.gen.processor.Processor; @@ -64,10 +65,7 @@ public Object fold() { @Override public Nullability nullable() { - if (from().isNull()) { - return Nullability.TRUE; - } - return Nullability.UNKNOWN; + return Expressions.isNull(field()) ? Nullability.TRUE : Nullability.UNKNOWN; } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathProcessor.java index 7e1e13b60e9f0..bc13b8ef36f21 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathProcessor.java @@ -8,7 +8,9 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.ql.type.DataTypeConversion; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import java.io.IOException; @@ -21,17 +23,38 @@ public class MathProcessor implements Processor { public enum MathOperation { ABS((Object l) -> { - if (l instanceof Float) { - return Double.valueOf(Math.abs(((Float) l).floatValue())); - } if (l instanceof Double) { return Math.abs(((Double) l).doubleValue()); } + if (l instanceof Float) { + return Math.abs(((Float) l).floatValue()); + } + + // fallback to integer long lo = ((Number) l).longValue(); - //handles the corner-case of Long.MIN_VALUE - return lo >= 0 ? lo : lo == Long.MIN_VALUE ? Double.valueOf(Long.MAX_VALUE) : -lo; - }), + if (lo == Long.MIN_VALUE) { + throw new QlIllegalArgumentException("[" + lo + "] cannot be negated since the result is outside the range"); + } + + lo = lo < 0 ? -lo : lo; + + if (l instanceof Integer) { + if ((int) lo == Integer.MIN_VALUE) { + throw new QlIllegalArgumentException("[" + lo + "] cannot be negated since the result is outside the range"); + } + return DataTypeConversion.safeToInt(lo); + } + + if (l instanceof Short) { + return DataTypeConversion.safeToShort(lo); + } + if (l instanceof Byte) { + return DataTypeConversion.safeToByte(lo); + } + + return lo; + }), ACOS(Math::acos), ASIN(Math::asin), ATAN(Math::atan), @@ -52,15 +75,37 @@ public enum MathOperation { RANDOM((Object l) -> l != null ? new Random(((Number) l).longValue()).nextDouble() : Randomness.get().nextDouble(), true), - SIGN((DoubleFunction) Math::signum), + SIGN((Object l) -> { + if (l instanceof Double) { + return Math.signum((Double) l); + } + if (l instanceof Float) { + return Math.signum((Float) l); + } + + long lo = Long.signum(((Number) l).longValue()); + + if (l instanceof Integer) { + return DataTypeConversion.safeToInt(lo); + } + if (l instanceof Short) { + return DataTypeConversion.safeToShort(lo); + } + if (l instanceof Byte) { + return DataTypeConversion.safeToByte(lo); + } + + //fallback to generic double + return lo; + }), SIN(Math::sin), SINH(Math::sinh), SQRT(Math::sqrt), TAN(Math::tan); - private final Function apply; + private final Function apply; - MathOperation(Function apply) { + MathOperation(Function apply) { this(apply, false); } @@ -69,7 +114,7 @@ public enum MathOperation { * If true, nulls are passed through, otherwise the function is short-circuited * and null returned. */ - MathOperation(Function apply, boolean nullAware) { + MathOperation(Function apply, boolean nullAware) { if (nullAware) { this.apply = apply; } else { @@ -85,7 +130,7 @@ public enum MathOperation { this.apply = l -> supplier.get(); } - public final Double apply(Object l) { + public final Number apply(Object l) { return apply.apply(l); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java index 6778cbfcc8d2c..2eea2da544037 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java @@ -130,19 +130,19 @@ public static Number truncate(Number v, Number s) { return BinaryOptionalMathOperation.TRUNCATE.apply(v, s); } - public static Double abs(Number value) { + public static Number abs(Number value) { return MathOperation.ABS.apply(value); } - public static Double acos(Number value) { + public static Number acos(Number value) { return MathOperation.ACOS.apply(value); } - public static Double asin(Number value) { + public static Number asin(Number value) { return MathOperation.ASIN.apply(value); } - public static Double atan(Number value) { + public static Number atan(Number value) { return MathOperation.ATAN.apply(value); } @@ -150,55 +150,55 @@ public static Number atan2(Number left, Number right) { return BinaryMathOperation.ATAN2.apply(left, right); } - public static Double cbrt(Number value) { + public static Number cbrt(Number value) { return MathOperation.CBRT.apply(value); } - public static Double ceil(Number value) { + public static Number ceil(Number value) { return MathOperation.CEIL.apply(value); } - public static Double cos(Number value) { + public static Number cos(Number value) { return MathOperation.COS.apply(value); } - public static Double cosh(Number value) { + public static Number cosh(Number value) { return MathOperation.COSH.apply(value); } - public static Double cot(Number value) { + public static Number cot(Number value) { return MathOperation.COT.apply(value); } - public static Double degrees(Number value) { + public static Number degrees(Number value) { return MathOperation.DEGREES.apply(value); } - public static Double e(Number value) { + public static Number e(Number value) { return MathOperation.E.apply(value); } - public static Double exp(Number value) { + public static Number exp(Number value) { return MathOperation.EXP.apply(value); } - public static Double expm1(Number value) { + public static Number expm1(Number value) { return MathOperation.EXPM1.apply(value); } - public static Double floor(Number value) { + public static Number floor(Number value) { return MathOperation.FLOOR.apply(value); } - public static Double log(Number value) { + public static Number log(Number value) { return MathOperation.LOG.apply(value); } - public static Double log10(Number value) { + public static Number log10(Number value) { return MathOperation.LOG10.apply(value); } - public static Double pi(Number value) { + public static Number pi(Number value) { return MathOperation.PI.apply(value); } @@ -206,31 +206,31 @@ public static Number power(Number left, Number right) { return BinaryMathOperation.POWER.apply(left, right); } - public static Double radians(Number value) { + public static Number radians(Number value) { return MathOperation.RADIANS.apply(value); } - public static Double random(Number value) { + public static Number random(Number value) { return MathOperation.RANDOM.apply(value); } - public static Double sign(Number value) { + public static Number sign(Number value) { return MathOperation.SIGN.apply(value); } - public static Double sin(Number value) { + public static Number sin(Number value) { return MathOperation.SIN.apply(value); } - public static Double sinh(Number value) { + public static Number sinh(Number value) { return MathOperation.SINH.apply(value); } - public static Double sqrt(Number value) { + public static Number sqrt(Number value) { return MathOperation.SQRT.apply(value); } - public static Double tan(Number value) { + public static Number tan(Number value) { return MathOperation.TAN.apply(value); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java index 5c947ebce5829..06e90a755ecfe 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -84,6 +84,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.LinkedList; @@ -704,7 +705,7 @@ static class ConstantFolding extends OptimizerExpressionRule { @Override protected Expression rule(Expression e) { - return e.foldable() && (e instanceof Literal == false) ? Literal.of(e) : e; + return e.foldable() ? Literal.of(e) : e; } } @@ -807,14 +808,14 @@ private Expression simplifyAndOr(BinaryPredicate bc) { } if (FALSE.equals(l) || FALSE.equals(r)) { - return FALSE; + return new Literal(bc.source(), Boolean.FALSE, DataType.BOOLEAN); } if (l.semanticEquals(r)) { return l; } // - // common factor extraction -> (a || b) && (a || c) => a && (b || c) + // common factor extraction -> (a || b) && (a || c) => a || (b && c) // List leftSplit = splitOr(l); List rightSplit = splitOr(r); @@ -837,7 +838,7 @@ private Expression simplifyAndOr(BinaryPredicate bc) { if (bc instanceof Or) { if (TRUE.equals(l) || TRUE.equals(r)) { - return TRUE; + return new Literal(bc.source(), Boolean.TRUE, DataType.BOOLEAN); } if (FALSE.equals(l)) { @@ -852,7 +853,7 @@ private Expression simplifyAndOr(BinaryPredicate bc) { } // - // common factor extraction -> (a && b) || (a && c) => a || (b & c) + // common factor extraction -> (a && b) || (a && c) => a && (b || c) // List leftSplit = splitAnd(l); List rightSplit = splitAnd(r); @@ -882,10 +883,10 @@ private Expression simplifyNot(Not n) { Expression c = n.field(); if (TRUE.semanticEquals(c)) { - return FALSE; + return new Literal(n.source(), Boolean.FALSE, DataType.BOOLEAN); } if (FALSE.semanticEquals(c)) { - return TRUE; + return new Literal(n.source(), Boolean.TRUE, DataType.BOOLEAN); } if (c instanceof Negatable) { @@ -918,12 +919,12 @@ private Expression simplify(BinaryComparison bc) { // true for equality if (bc instanceof Equals || bc instanceof GreaterThanOrEqual || bc instanceof LessThanOrEqual) { if (l.nullable() == Nullability.FALSE && r.nullable() == Nullability.FALSE && l.semanticEquals(r)) { - return TRUE; + return new Literal(bc.source(), Boolean.TRUE, DataType.BOOLEAN); } } if (bc instanceof NullEquals) { if (l.semanticEquals(r)) { - return TRUE; + return new Literal(bc.source(), Boolean.TRUE, DataType.BOOLEAN); } if (Expressions.isNull(r)) { return new IsNull(bc.source(), l); @@ -933,7 +934,7 @@ private Expression simplify(BinaryComparison bc) { // false for equality if (bc instanceof NotEquals || bc instanceof GreaterThan || bc instanceof LessThan) { if (l.nullable() == Nullability.FALSE && r.nullable() == Nullability.FALSE && l.semanticEquals(r)) { - return FALSE; + return new Literal(bc.source(), Boolean.FALSE, DataType.BOOLEAN); } } @@ -958,9 +959,11 @@ private Expression literalToTheRight(BinaryOperator be) { } /** - * Propagate Equals to eliminate conjuncted Ranges. - * When encountering a different Equals or non-containing {@link Range}, the conjunction becomes false. - * When encountering a containing {@link Range}, the range gets eliminated by the equality. + * Propagate Equals to eliminate conjuncted Ranges or BinaryComparisons. + * When encountering a different Equals, non-containing {@link Range} or {@link BinaryComparison}, the conjunction becomes false. + * When encountering a containing {@link Range}, {@link BinaryComparison} or {@link NotEquals}, these get eliminated by the equality. + * + * Since this rule can eliminate Ranges and BinaryComparisons, it should be applied before {@link CombineBinaryComparisons}. * * This rule doesn't perform any promotion of {@link BinaryComparison}s, that is handled by * {@link CombineBinaryComparisons} on purpose as the resulting Range might be foldable @@ -976,6 +979,8 @@ static class PropagateEquals extends OptimizerExpressionRule { protected Expression rule(Expression e) { if (e instanceof And) { return propagate((And) e); + } else if (e instanceof Or) { + return propagate((Or) e); } return e; } @@ -983,7 +988,11 @@ protected Expression rule(Expression e) { // combine conjunction private Expression propagate(And and) { List ranges = new ArrayList<>(); + // Only equalities, not-equalities and inequalities with a foldable .right are extracted separately; + // the others go into the general 'exps'. List equals = new ArrayList<>(); + List notEquals = new ArrayList<>(); + List inequalities = new ArrayList<>(); List exps = new ArrayList<>(); boolean changed = false; @@ -996,35 +1005,42 @@ private Expression propagate(And and) { // equals on different values evaluate to FALSE if (otherEq.right().foldable()) { for (BinaryComparison eq : equals) { - // cannot evaluate equals so skip it - if (!eq.right().foldable()) { - continue; - } if (otherEq.left().semanticEquals(eq.left())) { - if (eq.right().foldable() && otherEq.right().foldable()) { Integer comp = BinaryComparison.compare(eq.right().fold(), otherEq.right().fold()); if (comp != null) { // var cannot be equal to two different values at the same time if (comp != 0) { - return FALSE; + return new Literal(and.source(), Boolean.FALSE, DataType.BOOLEAN); } } } } + equals.add(otherEq); + } else { + exps.add(otherEq); } + } else if (ex instanceof GreaterThan || ex instanceof GreaterThanOrEqual || + ex instanceof LessThan || ex instanceof LessThanOrEqual) { + BinaryComparison bc = (BinaryComparison) ex; + if (bc.right().foldable()) { + inequalities.add(bc); + } else { + exps.add(ex); } - equals.add(otherEq); + } else if (ex instanceof NotEquals) { + NotEquals otherNotEq = (NotEquals) ex; + if (otherNotEq.right().foldable()) { + notEquals.add(otherNotEq); } else { exps.add(ex); } + } else { + exps.add(ex); + } } // check for (BinaryComparison eq : equals) { - // cannot evaluate equals so skip it - if (!eq.right().foldable()) { - continue; - } Object eqValue = eq.right().fold(); for (int i = 0; i < ranges.size(); i++) { @@ -1039,8 +1055,8 @@ private Expression propagate(And and) { compare > 0 || // eq matches the boundary but should not be included (compare == 0 && !range.includeLower())) - ) { - return FALSE; + ) { + return new Literal(and.source(), Boolean.FALSE, DataType.BOOLEAN); } } if (range.upper().foldable()) { @@ -1050,8 +1066,8 @@ private Expression propagate(And and) { compare < 0 || // eq matches the boundary but should not be included (compare == 0 && !range.includeUpper())) - ) { - return FALSE; + ) { + return new Literal(and.source(), Boolean.FALSE, DataType.BOOLEAN); } } @@ -1060,9 +1076,192 @@ private Expression propagate(And and) { changed = true; } } + + // evaluate all NotEquals against the Equal + for (Iterator iter = notEquals.iterator(); iter.hasNext(); ) { + NotEquals neq = iter.next(); + if (eq.left().semanticEquals(neq.left())) { + Integer comp = BinaryComparison.compare(eqValue, neq.right().fold()); + if (comp != null) { + if (comp == 0) { + return FALSE; // clashing and conflicting: a = 1 AND a != 1 + } else { + iter.remove(); // clashing and redundant: a = 1 AND a != 2 + changed = true; + } + } + } + } + + // evaluate all inequalities against the Equal + for (Iterator iter = inequalities.iterator(); iter.hasNext(); ) { + BinaryComparison bc = iter.next(); + if (eq.left().semanticEquals(bc.left())) { + Integer compare = BinaryComparison.compare(eqValue, bc.right().fold()); + if (compare != null) { + if (bc instanceof LessThan || bc instanceof LessThanOrEqual) { // a = 2 AND a />= ? + if ((compare == 0 && bc instanceof GreaterThan) || // a = 2 AND a > 2 + compare < 0) { // a = 2 AND a >/>= 3 + return FALSE; + } + } + + iter.remove(); + changed = true; + } + } + } + } + + return changed ? Predicates.combineAnd(CollectionUtils.combine(exps, equals, notEquals, inequalities, ranges)) : and; + } + + // combine disjunction: + // a = 2 OR a > 3 -> nop; a = 2 OR a > 1 -> a > 1 + // a = 2 OR a < 3 -> a < 3; a = 2 OR a < 1 -> nop + // a = 2 OR 3 < a < 5 -> nop; a = 2 OR 1 < a < 3 -> 1 < a < 3; a = 2 OR 0 < a < 1 -> nop + // a = 2 OR a != 2 -> TRUE; a = 2 OR a = 5 -> nop; a = 2 OR a != 5 -> a != 5 + private Expression propagate(Or or) { + List exps = new ArrayList<>(); + List equals = new ArrayList<>(); // foldable right term Equals + List notEquals = new ArrayList<>(); // foldable right term NotEquals + List ranges = new ArrayList<>(); + List inequalities = new ArrayList<>(); // foldable right term (=limit) BinaryComparision + + // split expressions by type + for (Expression ex : Predicates.splitOr(or)) { + if (ex instanceof Equals) { + Equals eq = (Equals) ex; + if (eq.right().foldable()) { + equals.add(eq); + } else { + exps.add(ex); + } + } else if (ex instanceof NotEquals) { + NotEquals neq = (NotEquals) ex; + if (neq.right().foldable()) { + notEquals.add(neq); + } else { + exps.add(ex); + } + } else if (ex instanceof Range) { + ranges.add((Range) ex); + } else if (ex instanceof BinaryComparison) { + BinaryComparison bc = (BinaryComparison) ex; + if (bc.right().foldable()) { + inequalities.add(bc); + } else { + exps.add(ex); + } + } else { + exps.add(ex); + } + } + + boolean updated = false; // has the expression been modified? + + // evaluate the impact of each Equal over the different types of Expressions + for (Iterator iterEq = equals.iterator(); iterEq.hasNext(); ) { + Equals eq = iterEq.next(); + Object eqValue = eq.right().fold(); + boolean removeEquals = false; + + // Equals OR NotEquals + for (NotEquals neq : notEquals) { + if (eq.left().semanticEquals(neq.left())) { // a = 2 OR a != ? -> ... + Integer comp = BinaryComparison.compare(eqValue, neq.right().fold()); + if (comp != null) { + if (comp == 0) { // a = 2 OR a != 2 -> TRUE + return TRUE; + } else { // a = 2 OR a != 5 -> a != 5 + removeEquals = true; + break; + } + } + } + } + if (removeEquals) { + iterEq.remove(); + updated = true; + continue; + } + + // Equals OR Range + for (int i = 0; i < ranges.size(); i ++) { // might modify list, so use index loop + Range range = ranges.get(i); + if (eq.left().semanticEquals(range.value())) { + Integer lowerComp = range.lower().foldable() ? BinaryComparison.compare(eqValue, range.lower().fold()) : null; + Integer upperComp = range.upper().foldable() ? BinaryComparison.compare(eqValue, range.upper().fold()) : null; + + if (lowerComp != null && lowerComp == 0) { + if (!range.includeLower()) { // a = 2 OR 2 < a < ? -> 2 <= a < ? + ranges.set(i, new Range(range.source(), range.value(), range.lower(), true, + range.upper(), range.includeUpper())); + } // else : a = 2 OR 2 <= a < ? -> 2 <= a < ? + removeEquals = true; // update range with lower equality instead or simply superfluous + break; + } else if (upperComp != null && upperComp == 0) { + if (!range.includeUpper()) { // a = 2 OR ? < a < 2 -> ? < a <= 2 + ranges.set(i, new Range(range.source(), range.value(), range.lower(), range.includeLower(), + range.upper(), true)); + } // else : a = 2 OR ? < a <= 2 -> ? < a <= 2 + removeEquals = true; // update range with upper equality instead + break; + } else if (lowerComp != null && upperComp != null) { + if (0 < lowerComp && upperComp < 0) { // a = 2 OR 1 < a < 3 + removeEquals = true; // equality is superfluous + break; + } + } + } + } + if (removeEquals) { + iterEq.remove(); + updated = true; + continue; + } + + // Equals OR Inequality + for (int i = 0; i < inequalities.size(); i ++) { + BinaryComparison bc = inequalities.get(i); + if (eq.left().semanticEquals(bc.left())) { + Integer comp = BinaryComparison.compare(eqValue, bc.right().fold()); + if (comp != null) { + if (bc instanceof GreaterThan || bc instanceof GreaterThanOrEqual) { + if (comp < 0) { // a = 1 OR a > 2 -> nop + continue; + } else if (comp == 0 && bc instanceof GreaterThan) { // a = 2 OR a > 2 -> a >= 2 + inequalities.set(i, new GreaterThanOrEqual(bc.source(), bc.left(), bc.right())); + } // else (0 < comp || bc instanceof GreaterThanOrEqual) : + // a = 3 OR a > 2 -> a > 2; a = 2 OR a => 2 -> a => 2 + + removeEquals = true; // update range with equality instead or simply superfluous + break; + } else if (bc instanceof LessThan || bc instanceof LessThanOrEqual) { + if (comp > 0) { // a = 2 OR a < 1 -> nop + continue; + } + if (comp == 0 && bc instanceof LessThan) { // a = 2 OR a < 2 -> a <= 2 + inequalities.set(i, new LessThanOrEqual(bc.source(), bc.left(), bc.right())); + } // else (comp < 0 || bc instanceof LessThanOrEqual) : a = 2 OR a < 3 -> a < 3; a = 2 OR a <= 2 -> a <= 2 + removeEquals = true; // update range with equality instead or simply superfluous + break; + } + } + } + } + if (removeEquals) { + iterEq.remove(); + updated = true; + } } - return changed ? Predicates.combineAnd(CollectionUtils.combine(exps, equals, ranges)) : and; + return updated ? Predicates.combineOr(CollectionUtils.combine(exps, equals, notEquals, inequalities, ranges)) : or; } } @@ -1751,7 +1950,7 @@ private static Expression foldBinaryLogic(Expression expression) { boolean nullLeft = Expressions.isNull(or.left()); boolean nullRight = Expressions.isNull(or.right()); if (nullLeft && nullRight) { - return Literal.NULL; + return new Literal(expression.source(), null, DataType.NULL); } if (nullLeft) { return or.right(); @@ -1763,7 +1962,7 @@ private static Expression foldBinaryLogic(Expression expression) { if (expression instanceof And) { And and = (And) expression; if (Expressions.isNull(and.left()) || Expressions.isNull(and.right())) { - return Literal.NULL; + return new Literal(expression.source(), null, DataType.NULL); } } return expression; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java index fd7ab2990da16..1141d665e87bf 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.xpack.ql.expression.literal.Literals; import org.elasticsearch.xpack.ql.util.StringUtils; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.common.io.SqlStreamInput; @@ -20,6 +19,7 @@ import org.elasticsearch.xpack.sql.execution.search.extractor.SqlBucketExtractors; import org.elasticsearch.xpack.sql.execution.search.extractor.SqlHitExtractors; import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.expression.literal.SqlLiterals; import org.elasticsearch.xpack.sql.plugin.TextFormatterCursor; import java.io.IOException; @@ -57,7 +57,7 @@ public static List getNamedWriteables() { entries.addAll(SqlBucketExtractors.getNamedWriteables()); // and custom types - entries.addAll(Literals.getNamedWriteables()); + entries.addAll(SqlLiterals.getNamedWriteables()); return entries; } diff --git a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt index 771b5aa9046e1..b38620c033c96 100644 --- a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt +++ b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt @@ -82,30 +82,30 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalS Number round(Number, Number) Number truncate(Number, Number) - Double abs(Number) - Double acos(Number) - Double asin(Number) - Double atan(Number) - Double cbrt(Number) - Double ceil(Number) - Double cos(Number) - Double cosh(Number) - Double cot(Number) - Double degrees(Number) - Double e(Number) - Double exp(Number) - Double expm1(Number) - Double floor(Number) - Double log(Number) - Double log10(Number) - Double pi(Number) - Double radians(Number) - Double random(Number) - Double sign(Number) - Double sin(Number) - Double sinh(Number) - Double sqrt(Number) - Double tan(Number) + Number abs(Number) + Number acos(Number) + Number asin(Number) + Number atan(Number) + Number cbrt(Number) + Number ceil(Number) + Number cos(Number) + Number cosh(Number) + Number cot(Number) + Number degrees(Number) + Number e(Number) + Number exp(Number) + Number expm1(Number) + Number floor(Number) + Number log(Number) + Number log10(Number) + Number pi(Number) + Number radians(Number) + Number random(Number) + Number sign(Number) + Number sin(Number) + Number sinh(Number) + Number sqrt(Number) + Number tan(Number) # # Date/Time functions diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ProcessorTests.java index c46be2d9a8448..7e4758c6d95ff 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ProcessorTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ql.expression.gen.processor.Processor; @@ -22,10 +21,9 @@ import static java.util.stream.Collectors.toCollection; -@AwaitsFix(bugUrl = "classpath inside tests is different") public class ProcessorTests extends ESTestCase { - private static List> processors; + private static Set> processors; @BeforeClass public static void init() throws Exception { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathOperationTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathOperationTests.java new file mode 100644 index 0000000000000..3c1c880a8b457 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathOperationTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ql.QlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; + +public class MathOperationTests extends ESTestCase { + + public void testAbsLongMax() { + QlIllegalArgumentException ex = expectThrows(QlIllegalArgumentException.class, () -> MathOperation.ABS.apply(Long.MIN_VALUE)); + assertTrue(ex.getMessage().contains("cannot be negated")); + } + + public void testAbsIntegerMax() { + QlIllegalArgumentException ex = expectThrows(QlIllegalArgumentException.class, () -> MathOperation.ABS.apply(Integer.MIN_VALUE)); + assertTrue(ex.getMessage().contains("cannot be negated")); + } + + public void testAbsShortMax() { + QlIllegalArgumentException ex = expectThrows(QlIllegalArgumentException.class, () -> MathOperation.ABS.apply(Short.MIN_VALUE)); + assertTrue(ex.getMessage().contains("out of")); + } + + public void testAbsPreservesType() { + assertEquals((byte) 42, MathOperation.ABS.apply((byte) -42)); + assertEquals((short) 42, MathOperation.ABS.apply((short) -42)); + assertEquals(42, MathOperation.ABS.apply(-42)); + assertEquals((long) 42, MathOperation.ABS.apply((long) -42)); + assertEquals(42f, MathOperation.ABS.apply(-42f)); + assertEquals(42d, MathOperation.ABS.apply(-42d)); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java index 349c644fb43f7..860b759bb8f3f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.ql.expression.function.aggregate.InnerAggregate; import org.elasticsearch.xpack.ql.expression.predicate.BinaryOperator; +import org.elasticsearch.xpack.ql.expression.predicate.Predicates; import org.elasticsearch.xpack.ql.expression.predicate.Range; import org.elasticsearch.xpack.ql.expression.predicate.conditional.ArbitraryConditionalFunction; import org.elasticsearch.xpack.ql.expression.predicate.conditional.Case; @@ -1550,6 +1551,251 @@ public void testEliminateRangeByNullEqualsOutsideInterval() { assertEquals(FALSE, rule.rule(exp)); } + // a != 3 AND a = 3 -> FALSE + public void testPropagateEquals_VarNeq3AndVarEq3() { + FieldAttribute fa = getFieldAttribute(); + NotEquals neq = new NotEquals(EMPTY, fa, THREE); + Equals eq = new Equals(EMPTY, fa, THREE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, neq, eq)); + assertEquals(FALSE, rule.rule(exp)); + } + + // a != 4 AND a = 3 -> a = 3 + public void testPropagateEquals_VarNeq4AndVarEq3() { + FieldAttribute fa = getFieldAttribute(); + NotEquals neq = new NotEquals(EMPTY, fa, FOUR); + Equals eq = new Equals(EMPTY, fa, THREE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, neq, eq)); + assertEquals(Equals.class, exp.getClass()); + assertEquals(eq, rule.rule(exp)); + } + + // a = 2 AND a < 2 -> FALSE + public void testPropagateEquals_VarEq2AndVarLt2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + LessThan lt = new LessThan(EMPTY, fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, lt)); + assertEquals(FALSE, exp); + } + + // a = 2 AND a <= 2 -> a = 2 + public void testPropagateEquals_VarEq2AndVarLte2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + LessThanOrEqual lt = new LessThanOrEqual(EMPTY, fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, lt)); + assertEquals(eq, exp); + } + + // a = 2 AND a <= 1 -> FALSE + public void testPropagateEquals_VarEq2AndVarLte1() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + LessThanOrEqual lt = new LessThanOrEqual(EMPTY, fa, ONE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, lt)); + assertEquals(FALSE, exp); + } + + // a = 2 AND a > 2 -> FALSE + public void testPropagateEquals_VarEq2AndVarGt2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + GreaterThan gt = new GreaterThan(EMPTY, fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, gt)); + assertEquals(FALSE, exp); + } + + // a = 2 AND a >= 2 -> a = 2 + public void testPropagateEquals_VarEq2AndVarGte2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, gte)); + assertEquals(eq, exp); + } + + // a = 2 AND a > 3 -> FALSE + public void testPropagateEquals_VarEq2AndVarLt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + GreaterThan gt = new GreaterThan(EMPTY, fa, THREE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, gt)); + assertEquals(FALSE, exp); + } + + // a = 2 AND a < 3 AND a > 1 AND a != 4 -> a = 2 + public void testPropagateEquals_VarEq2AndVarLt3AndVarGt1AndVarNeq4() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + LessThan lt = new LessThan(EMPTY, fa, THREE); + GreaterThan gt = new GreaterThan(EMPTY, fa, ONE); + NotEquals neq = new NotEquals(EMPTY, fa, FOUR); + + PropagateEquals rule = new PropagateEquals(); + Expression and = Predicates.combineAnd(Arrays.asList(eq, lt, gt, neq)); + Expression exp = rule.rule(and); + assertEquals(eq, exp); + } + + // a = 2 AND 1 < a < 3 AND a > 0 AND a != 4 -> a = 2 + public void testPropagateEquals_VarEq2AndVarRangeGt1Lt3AndVarGt0AndVarNeq4() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + Range range = new Range(EMPTY, fa, ONE, false, THREE, false); + GreaterThan gt = new GreaterThan(EMPTY, fa, L(0)); + NotEquals neq = new NotEquals(EMPTY, fa, FOUR); + + PropagateEquals rule = new PropagateEquals(); + Expression and = Predicates.combineAnd(Arrays.asList(eq, range, gt, neq)); + Expression exp = rule.rule(and); + assertEquals(eq, exp); + } + + // a = 2 OR a > 1 -> a > 1 + public void testPropagateEquals_VarEq2OrVarGt1() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + GreaterThan gt = new GreaterThan(EMPTY, fa, ONE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, gt)); + assertEquals(gt, exp); + } + + // a = 2 OR a > 2 -> a >= 2 + public void testPropagateEquals_VarEq2OrVarGte2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + GreaterThan gt = new GreaterThan(EMPTY, fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, gt)); + assertEquals(GreaterThanOrEqual.class, exp.getClass()); + GreaterThanOrEqual gte = (GreaterThanOrEqual) exp; + assertEquals(TWO, gte.right()); + } + + // a = 2 OR a < 3 -> a < 3 + public void testPropagateEquals_VarEq2OrVarLt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + LessThan lt = new LessThan(EMPTY, fa, THREE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, lt)); + assertEquals(lt, exp); + } + + // a = 3 OR a < 3 -> a <= 3 + public void testPropagateEquals_VarEq3OrVarLt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, THREE); + LessThan lt = new LessThan(EMPTY, fa, THREE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, lt)); + assertEquals(LessThanOrEqual.class, exp.getClass()); + LessThanOrEqual lte = (LessThanOrEqual) exp; + assertEquals(THREE, lte.right()); + } + + // a = 2 OR 1 < a < 3 -> 1 < a < 3 + public void testPropagateEquals_VarEq2OrVarRangeGt1Lt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + Range range = new Range(EMPTY, fa, ONE, false, THREE, false); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, range)); + assertEquals(range, exp); + } + + // a = 2 OR 2 < a < 3 -> 2 <= a < 3 + public void testPropagateEquals_VarEq2OrVarRangeGt2Lt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + Range range = new Range(EMPTY, fa, TWO, false, THREE, false); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, range)); + assertEquals(Range.class, exp.getClass()); + Range r = (Range) exp; + assertEquals(TWO, r.lower()); + assertTrue(r.includeLower()); + assertEquals(THREE, r.upper()); + assertFalse(r.includeUpper()); + } + + // a = 3 OR 2 < a < 3 -> 2 < a <= 3 + public void testPropagateEquals_VarEq3OrVarRangeGt2Lt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, THREE); + Range range = new Range(EMPTY, fa, TWO, false, THREE, false); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, range)); + assertEquals(Range.class, exp.getClass()); + Range r = (Range) exp; + assertEquals(TWO, r.lower()); + assertFalse(r.includeLower()); + assertEquals(THREE, r.upper()); + assertTrue(r.includeUpper()); + } + + // a = 2 OR a != 2 -> TRUE + public void testPropagateEquals_VarEq2OrVarNeq2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + NotEquals neq = new NotEquals(EMPTY, fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, neq)); + assertEquals(TRUE, exp); + } + + // a = 2 OR a != 5 -> a != 5 + public void testPropagateEquals_VarEq2OrVarNeq5() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + NotEquals neq = new NotEquals(EMPTY, fa, FIVE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, neq)); + assertEquals(NotEquals.class, exp.getClass()); + NotEquals ne = (NotEquals) exp; + assertEquals(ne.right(), FIVE); + } + + // a = 2 OR 3 < a < 4 OR a > 2 OR a!= 2 -> TRUE + public void testPropagateEquals_VarEq2OrVarRangeGt3Lt4OrVarGt2OrVarNe2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = new Equals(EMPTY, fa, TWO); + Range range = new Range(EMPTY, fa, THREE, false, FOUR, false); + GreaterThan gt = new GreaterThan(EMPTY, fa, TWO); + NotEquals neq = new NotEquals(EMPTY, fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(Predicates.combineOr(Arrays.asList(eq, range, neq, gt))); + assertEquals(TRUE, exp); + } + public void testTranslateMinToFirst() { Min min1 = new Min(EMPTY, new FieldAttribute(EMPTY, "str", new EsField("str", DataType.KEYWORD, emptyMap(), true))); Min min2 = new Min(EMPTY, getFieldAttribute()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index 5037b74885d80..1c11ee3a4460c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -249,15 +249,15 @@ public void testDateRangeWithCurrentTimestamp() { public void testDateRangeWithCurrentDate() { testDateRangeWithCurrentFunctions("CURRENT_DATE()", DATE_FORMAT, DateUtils.asDateOnly(TestUtils.TEST_CFG.now())); testDateRangeWithCurrentFunctions_AndRangeOptimization("CURRENT_DATE()", DATE_FORMAT, - DateUtils.asDateOnly(TestUtils.TEST_CFG.now().minusDays(2L)), - DateUtils.asDateOnly(TestUtils.TEST_CFG.now().plusDays(1L))); + DateUtils.asDateOnly(TestUtils.TEST_CFG.now().minusDays(1L)).minusSeconds(1), + DateUtils.asDateOnly(TestUtils.TEST_CFG.now().plusDays(1L)).plusSeconds(1)); } public void testDateRangeWithToday() { testDateRangeWithCurrentFunctions("TODAY()", DATE_FORMAT, DateUtils.asDateOnly(TestUtils.TEST_CFG.now())); testDateRangeWithCurrentFunctions_AndRangeOptimization("TODAY()", DATE_FORMAT, - DateUtils.asDateOnly(TestUtils.TEST_CFG.now().minusDays(2L)), - DateUtils.asDateOnly(TestUtils.TEST_CFG.now().plusDays(1L))); + DateUtils.asDateOnly(TestUtils.TEST_CFG.now().minusDays(1L)).minusSeconds(1), + DateUtils.asDateOnly(TestUtils.TEST_CFG.now().plusDays(1L)).plusSeconds(1)); } public void testDateRangeWithNow() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/SqlNodeSubclassTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/SqlNodeSubclassTests.java index 56576198173a9..4937b618cf35c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/SqlNodeSubclassTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/SqlNodeSubclassTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.tree; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.LiteralTests; @@ -48,7 +47,6 @@ * node of that type is called for. * */ -@AwaitsFix(bugUrl = "classpath inside tests is different") public class SqlNodeSubclassTests> extends NodeSubclassTests { private static final List> CLASSES_WITH_MIN_TWO_CHILDREN = CollectionUtils.combine( diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.put_trained_model.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.put_trained_model.json new file mode 100644 index 0000000000000..a58fa13540748 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.put_trained_model.json @@ -0,0 +1,28 @@ +{ + "ml.put_trained_model":{ + "documentation":{ + "url":"TODO" + }, + "stability":"experimental", + "url":{ + "paths":[ + { + "path":"/_ml/inference/{model_id}", + "methods":[ + "PUT" + ], + "parts":{ + "model_id":{ + "type":"string", + "description":"The ID of the trained models to store" + } + } + } + ] + }, + "body": { + "description":"The trained model configuration", + "required":true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/change_password/11_token.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/change_password/11_token.yml new file mode 100644 index 0000000000000..fcbbb1c257bbe --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/change_password/11_token.yml @@ -0,0 +1,67 @@ +--- +setup: + - skip: + features: headers + - do: + cluster.health: + wait_for_status: yellow + - do: + security.put_user: + username: "token_joe" + body: > + { + "password": "s3krit", + "roles" : [ "token_admin" ] + } + - do: + security.put_role: + name: "token_admin" + body: > + { + "cluster": ["manage_token"], + "indices": [ + { + "names": "*", + "privileges": ["all"] + } + ] + } +--- +teardown: + - do: + security.delete_user: + username: "token_joe" + ignore: 404 + - do: + security.delete_role: + name: "token_admin" + ignore: 404 + +--- +"Test user changing their password authenticating with token not allowed": + + - do: + headers: + Authorization: "Basic dG9rZW5fam9lOnMza3JpdA==" + security.get_token: + body: + grant_type: "password" + username: "token_joe" + password: "s3krit" + + - match: { type: "Bearer" } + - is_true: access_token + - set: { access_token: token } + - match: { expires_in: 1200 } + - is_false: scope + + - do: + headers: + Authorization: Bearer ${token} + catch: forbidden + security.change_password: + username: "joe" + body: > + { + "password" : "s3krit2" + } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/flattened/20_flattened_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/flattened/20_flattened_stats.yml index a2a670769976a..88c32d76b0b7b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/flattened/20_flattened_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/flattened/20_flattened_stats.yml @@ -1,7 +1,7 @@ setup: - skip: - version: " - 7.99.99" - reason: "telemetry for flattened fields was added in 8.0" + version: " - 7.5.99" + reason: "telemetry for flattened fields was added in 7.6.0" --- "Usage stats for flattened fields": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml index 0a3b2bc135b57..8b24d73fcb476 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml @@ -20,8 +20,8 @@ teardown: - do: license.get: {} - ## a license object has 11 attributes - - length: { license: 11 } + ## a v5 license object has 12 attributes & the Rest API always outputs in current version + - length: { license: 12 } ## bwc for licenses format - do: @@ -35,7 +35,7 @@ teardown: - do: license.get: {} - - length: { license: 11 } + - length: { license: 12 } ## license version: 1.x - do: @@ -49,7 +49,7 @@ teardown: - do: license.get: {} - - length: { license: 11 } + - length: { license: 12 } ## multiple licenses version: 1.x - do: @@ -63,7 +63,7 @@ teardown: - do: license.get: {} - - length: { license: 11 } + - length: { license: 12 } - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } --- "Should throw 404 after license deletion": @@ -91,7 +91,7 @@ teardown: - do: license.get: {} - - length: { license: 11 } + - length: { license: 12 } --- "Cannot start basic": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/30_enterprise_license.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/30_enterprise_license.yml index 5f4582c747b5b..9b0fd906f2930 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/30_enterprise_license.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/30_enterprise_license.yml @@ -15,18 +15,20 @@ teardown: license.post: acknowledge: true body: | - {"license":{"uid":"6e57906b-a8d1-4c1f-acb7-73a16edc3934","type":"enterprise","issue_date_in_millis":1523456691721,"expiry_date_in_millis":1838816691721,"max_nodes":50,"issued_to":"rest-test","issuer":"elasticsearch","signature":"AAAABAAAAA03e8BZRVXaCV4CpPGRAAAAIAo5/x6hrsGh1GqqrJmy4qgmEC7gK0U4zQ6q5ZEMhm4jAAABAAZNhjABV6PRfa7P7sJgn70XCGoKtAVT75yU13JvKBd/UjD4TPhuZcztqZ/tcLEPxm/TSvGlogWmnw/Rw8xs8jMpBpKsJ+LOXjHhDdvXb2y7JJhCH8nlSEblMDRXysNvWpKe60Z/hb7hS4JynEUt0EBb6ji7BL42O07PNll1EGmkfsHazfs46iV91BG1VxXksI78XgWSaA0F/h7tvrNW9PTgsUaLo06InlQ8jA1dal90AoXp+MVDOHWQjVFZzUnO87/7lEb+VXt0IwchaW17ahihJqkCtGvKpWFwpuhx9xiFvkySN/g5LIVjYCvgBkiWExQ9p0Zzg3VoSlMBnVy0BWo=","start_date_in_millis":-1}} + {"license":{"uid":"6e57906b-a8d1-4c1f-acb7-73a16edc3934","type":"enterprise","issue_date_in_millis":1523456691721,"expiry_date_in_millis":1838816691721,"max_nodes":null,"max_resource_units":50,"issued_to":"rest-test","issuer":"elasticsearch","signature":"AAAABQAAAA0sKPJdf9T6DItbXVJKAAAAIAo5/x6hrsGh1GqqrJmy4qgmEC7gK0U4zQ6q5ZEMhm4jAAABAKFCHrix7w/xPG14+wdhld1RmphDmXmHfL1xeuI33Ahr1mOUYZ30eR6GZuh7CnK8BQhfq+z63lgctJepWlvwDSgkOvXWLHrJun7YSCrzz1bism0ZHWw7Swb9DO7vePomVBo/Hm9+eX0pV4/cFQNMmbFaX11tqJZYBEO6sNASVAFL7A1ZcVoB2evweGU9pUQYvFvmyzzySf99miDo3NH0XYdownEdtoNgFfmqa3+koCP7onmRZ1h9jhsDOi30RX/DTDXQKW+XoREnOHCoOAJFxwip/c1qaQAOqp1H6+P20ZGr2sIPiU97OVEU9kulm+E+jgiVW3LwGheOXsUOd1B8Mp0=","start_date_in_millis":-1}} - match: { license_status: "valid" } - do: license.get: {} - ## a license object has 11 attributes - - length: { license: 11 } + ## a v5 (enterprise) license object has 12 attributes + - length: { license: 12 } ## In 8.0, the enterprise license is always reports truthfully - match: { license.type: "enterprise" } + - match: { license.max_resource_units: 50 } + - match: { license.max_nodes: null } - do: warnings: @@ -34,8 +36,8 @@ teardown: license.get: accept_enterprise: "true" - ## a license object has 11 attributes - - length: { license: 11 } + ## a v5 license object has 12 attributes + - length: { license: 12 } ## Always returns real type - match: { license.type: "enterprise" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml index 6dc8c800bd414..afd6701af145c 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml @@ -87,7 +87,7 @@ setup: --- "Test put config with security headers in the body": - do: - catch: /unknown field \[headers\], parser not found/ + catch: /unknown field \[headers\]/ ml.put_data_frame_analytics: id: "data_frame_with_header" body: > @@ -107,7 +107,7 @@ setup: "Test put config with create_time in the body": - do: - catch: /unknown field \[create_time\], parser not found/ + catch: /unknown field \[create_time\]/ ml.put_data_frame_analytics: id: "data_frame_with_create_time" body: > @@ -126,7 +126,7 @@ setup: "Test put config with version in the body": - do: - catch: /unknown field \[version\], parser not found/ + catch: /unknown field \[version\]/ ml.put_data_frame_analytics: id: "data_frame_with_version" body: > @@ -272,7 +272,7 @@ setup: "Test put config given dest index contains uppercase chars": - do: - catch: /.*reason=Validation Failed.* Destination index \[Foo\] must be lowercase;.*/ + catch: /.*Validation Failed.* Destination index \[Foo\] must be lowercase;.*/ ml.put_data_frame_analytics: id: "config-given-dest-index-uppercase" body: > @@ -308,7 +308,7 @@ setup: "Test put config with missing concrete source index": - do: - catch: /.*reason=Validation Failed.* no such index \[missing\]/ + catch: /.*Validation Failed.* no such index \[missing\]/ ml.put_data_frame_analytics: id: "config-with-missing-concrete-source-index" body: > @@ -344,7 +344,7 @@ setup: "Test put config with dest index same as source index": - do: - catch: /.*reason=Validation Failed.* Destination index \[index-source\] is included in source expression \[index-source\]/ + catch: /.*Validation Failed.* Destination index \[index-source\] is included in source expression \[index-source\]/ ml.put_data_frame_analytics: id: "config-with-same-source-dest-index" body: > @@ -380,7 +380,7 @@ setup: name: multiple-dest-index - do: - catch: /.*reason=Validation Failed.* no write index is defined for alias \[multiple-dest-index\].*/ + catch: /.*Validation Failed.* no write index is defined for alias \[multiple-dest-index\].*/ ml.put_data_frame_analytics: id: "config-with-dest-index-matching-multiple-indices" body: > @@ -407,7 +407,7 @@ setup: name: dest-alias - do: - catch: /.*reason=Validation Failed.* Destination index \[another-source-index\] is included in source expression \[another-source-index\]/ + catch: /.*Validation Failed.* Destination index \[another-source-index\] is included in source expression \[another-source-index\]/ ml.put_data_frame_analytics: id: "config-with-dest-index-included-in-source-via-alias" body: > @@ -425,7 +425,7 @@ setup: "Test put config with remote source index": - do: - catch: /.*reason=Validation Failed.* remote source indices are not supported/ + catch: /.*Validation Failed.* remote source indices are not supported/ ml.put_data_frame_analytics: id: "config-with-missing-concrete-source-index" body: > @@ -443,7 +443,7 @@ setup: "Test put config with unknown top level field": - do: - catch: /unknown field \[unknown_field\], parser not found/ + catch: /unknown field \[unknown_field\]/ ml.put_data_frame_analytics: id: "unknown_field" body: > diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml index 8c6e94635f4da..c63aa4b4e48d4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml @@ -86,7 +86,7 @@ setup: --- "Test put datafeed with security headers in the body": - do: - catch: /unknown field \[headers\], parser not found/ + catch: /unknown field \[headers\]/ ml.put_datafeed: datafeed_id: test-datafeed-1 body: > diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/inference_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/inference_crud.yml index f72fd1120d81e..f5f9a56bab8d8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/inference_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/inference_crud.yml @@ -1,3 +1,74 @@ +setup: + - skip: + features: headers + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + ml.put_trained_model: + model_id: a-regression-model-0 + body: > + { + "description": "empty model for tests", + "input": {"field_names": ["field1", "field2"]}, + "definition": { + "preprocessors": [], + "trained_model": { + "tree": { + "feature_names": ["field1", "field2"], + "tree_structure": [ + {"node_index": 0, "leaf_value": 1} + ], + "target_type": "regression" + } + } + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + ml.put_trained_model: + model_id: a-regression-model-1 + body: > + { + "description": "empty model for tests", + "input": {"field_names": ["field1", "field2"]}, + "definition": { + "preprocessors": [], + "trained_model": { + "tree": { + "feature_names": ["field1", "field2"], + "tree_structure": [ + {"node_index": 0, "leaf_value": 1} + ], + "target_type": "regression" + } + } + } + } + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + ml.put_trained_model: + model_id: a-classification-model + body: > + { + "description": "empty model for tests", + "input": {"field_names": ["field1", "field2"]}, + "definition": { + "preprocessors": [], + "trained_model": { + "tree": { + "feature_names": ["field1", "field2"], + "tree_structure": [ + {"node_index": 0, "leaf_value": 1} + ], + "target_type": "classification", + "classification_labels": ["no", "yes"] + } + } + } + } --- "Test get given missing trained model": @@ -24,56 +95,52 @@ - match: { count: 0 } - match: { trained_model_configs: [] } --- -"Test delete given unused trained model": +"Test get models": + - do: + ml.get_trained_models: + model_id: "*" + - match: { count: 4 } + - match: { trained_model_configs.0.model_id: "a-classification-model" } + - match: { trained_model_configs.1.model_id: "a-regression-model-0" } + - match: { trained_model_configs.2.model_id: "a-regression-model-1" } - do: - index: - id: trained_model_config-unused-regression-model-0 - index: .ml-inference-000001 - body: > - { - "model_id": "unused-regression-model", - "created_by": "ml_tests", - "version": "8.0.0", - "description": "empty model for tests", - "create_time": 0, - "model_version": 0, - "model_type": "local" - } + ml.get_trained_models: + model_id: "a-regression*" + - match: { count: 2 } + - match: { trained_model_configs.0.model_id: "a-regression-model-0" } + - match: { trained_model_configs.1.model_id: "a-regression-model-1" } + - do: - indices.refresh: {} + ml.get_trained_models: + model_id: "*" + from: 0 + size: 2 + - match: { count: 4 } + - match: { trained_model_configs.0.model_id: "a-classification-model" } + - match: { trained_model_configs.1.model_id: "a-regression-model-0" } + - do: + ml.get_trained_models: + model_id: "*" + from: 1 + size: 1 + - match: { count: 4 } + - match: { trained_model_configs.0.model_id: "a-regression-model-0" } +--- +"Test delete given unused trained model": - do: ml.delete_trained_model: - model_id: "unused-regression-model" + model_id: "a-classification-model" - match: { acknowledged: true } - --- "Test delete with missing model": - do: catch: missing ml.delete_trained_model: model_id: "missing-trained-model" - --- "Test delete given used trained model": - - do: - index: - id: trained_model_config-used-regression-model-0 - index: .ml-inference-000001 - body: > - { - "model_id": "used-regression-model", - "created_by": "ml_tests", - "version": "8.0.0", - "description": "empty model for tests", - "create_time": 0, - "model_version": 0, - "model_type": "local" - } - - do: - indices.refresh: {} - - do: ingest.put_pipeline: id: "regression-model-pipeline" @@ -82,7 +149,7 @@ "processors": [ { "inference" : { - "model_id" : "used-regression-model", + "model_id" : "a-regression-model-0", "inference_config": {"regression": {}}, "target_field": "regression_field", "field_mappings": {} @@ -95,12 +162,12 @@ - do: catch: conflict ml.delete_trained_model: - model_id: "used-regression-model" + model_id: "a-regression-model-0" --- "Test get pre-packaged trained models": - do: ml.get_trained_models: - model_id: "_all" + model_id: "lang_ident_model_1" allow_no_match: false - match: { count: 1 } - match: { trained_model_configs.0.model_id: "lang_ident_model_1" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_data_frame_analytics.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_data_frame_analytics.yml index 8cae555a201f5..1bac227066204 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_data_frame_analytics.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_data_frame_analytics.yml @@ -31,7 +31,7 @@ index: missing - do: - catch: /.*reason=Validation Failed.* no such index \[missing\]/ + catch: /.*Validation Failed.* no such index \[missing\]/ ml.start_data_frame_analytics: id: "missing_index" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml index 27b637ba7081d..530bf6414bbed 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml @@ -170,7 +170,7 @@ setup: "Try to include headers": - do: - catch: /unknown field \[headers\], parser not found/ + catch: /unknown field \[headers\]/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser rollup.put_job: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/transform/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/transform/preview_transforms.yml index cf139b04b44c5..7abb83834f515 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/transform/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/transform/preview_transforms.yml @@ -166,7 +166,7 @@ setup: --- "Test preview with non-existing source index": - do: - catch: /.*reason=Validation Failed.* no such index \[does_not_exist\]/ + catch: /.*Validation Failed.* no such index \[does_not_exist\]/ transform.preview_transform: body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/transform/transforms_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/transform/transforms_crud.yml index b8dc88c3ae0c1..e66982aadece1 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/transform/transforms_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/transform/transforms_crud.yml @@ -79,7 +79,7 @@ setup: --- "Test put transform with invalid source index": - do: - catch: /.*reason=Validation Failed.* no such index \[missing-index\]/ + catch: /.*Validation Failed.* no such index \[missing-index\]/ transform.put_transform: transform_id: "missing-source-transform" body: > @@ -400,7 +400,7 @@ setup: name: source-index - do: - catch: /.*reason=Validation Failed.* Destination index \[created-destination-index\] is included in source expression \[airline-data,created-destination-index\]/ + catch: /.*Validation Failed.* Destination index \[created-destination-index\] is included in source expression \[airline-data,created-destination-index\]/ transform.put_transform: transform_id: "transform-from-aliases-failures" body: > @@ -426,7 +426,7 @@ setup: name: dest-index - do: - catch: /.*reason=Validation Failed.* no write index is defined for alias [dest2-index].*/ + catch: /.*Validation Failed.* no write index is defined for alias [dest2-index].*/ transform.put_transform: transform_id: "airline-transform" body: > @@ -537,7 +537,7 @@ setup: --- "Test invalid destination index name": - do: - catch: /.*reason=Validation Failed.* Destination index \[DeStInAtIoN\] must be lowercase/ + catch: /.*Validation Failed.* Destination index \[DeStInAtIoN\] must be lowercase/ transform.put_transform: transform_id: "airline-transform" body: > diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/transform/transforms_update.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/transform/transforms_update.yml index f55fcd2cb07e5..d82bc1bea9d0a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/transform/transforms_update.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/transform/transforms_update.yml @@ -67,7 +67,7 @@ setup: --- "Test put transform with invalid source index": - do: - catch: /.*reason=Validation Failed.* no such index \[missing-index\]/ + catch: /.*Validation Failed.* no such index \[missing-index\]/ transform.update_transform: transform_id: "updating-airline-transform" body: > @@ -255,7 +255,7 @@ setup: name: source2-index - do: - catch: /.*reason=Validation Failed.* Destination index \[created-destination-index\] is included in source expression \[created-destination-index\]/ + catch: /.*Validation Failed.* Destination index \[created-destination-index\] is included in source expression \[created-destination-index\]/ transform.update_transform: transform_id: "updating-airline-transform" body: > @@ -280,7 +280,7 @@ setup: index: created-destination-index name: dest2-index - do: - catch: /.*reason=Validation Failed.* no write index is defined for alias [dest2-index].*/ + catch: /.*Validation Failed.* no write index is defined for alias [dest2-index].*/ transform.update_transform: transform_id: "updating-airline-transform" body: > @@ -290,7 +290,7 @@ setup: --- "Test invalid destination index name": - do: - catch: /.*reason=Validation Failed.* Destination index \[DeStInAtIoN\] must be lowercase/ + catch: /.*Validation Failed.* Destination index \[DeStInAtIoN\] must be lowercase/ transform.update_transform: transform_id: "updating-airline-transform" body: > @@ -298,7 +298,7 @@ setup: "dest": { "index": "DeStInAtIoN" } } - do: - catch: /.*reason=Validation Failed.* Invalid index name \[destination#dest\], must not contain \'#\'/ + catch: /.*Validation Failed.* Invalid index name \[destination#dest\], must not contain \'#\'/ transform.update_transform: transform_id: "updating-airline-transform" body: > diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml index d8b25a29531bc..4757ee00360e9 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml @@ -166,4 +166,92 @@ - is_true: features.monitoring.available - is_false: tagline +--- +"Usage stats for mappings": + - do: + xpack.usage: {} + + - match: { index.mappings.field_types: [] } + + - do: + indices.create: + index: test-index1 + body: + mappings: + properties: + foo: + type: keyword + + - do: + indices.create: + index: test-index2 + body: + mappings: + properties: + foo: + type: keyword + bar: + properties: + quux: + type: integer + + - do: + xpack.usage: {} + + - match: { index.mappings.field_types: [ "integer", "keyword", "object" ] } + +--- +"Usage stats for analysis": + - do: + xpack.usage: {} + + - match: { index.analysis.char_filter_types: [] } + - match: { index.analysis.tokenizer_types: [] } + - match: { index.analysis.filter_types: [] } + - match: { index.analysis.analyzer_types: [] } + + - do: + indices.create: + index: test-index1 + body: + settings: + analysis: + char_filter: + c: + type: mapping + mappings: [ "a => b" ] + tokenizer: + tok: + type: pattern + pattern: "," + filter: + st: + type: stop + stopwords: [ "a" ] + analyzer: + en: + type: standard + stopwords: "_english_" + cust: + char_filter: [ "html_strip" ] + tokenizer: "keyword" + filter: [ "trim" ] + mappings: + properties: + message: + type: "text" + analyzer: french + search_analyzer: spanish + search_quote_analyzer: german + + - do: + xpack.usage: {} + - match: { index.analysis.char_filter_types: [ "mapping" ] } + - match: { index.analysis.tokenizer_types: [ "pattern" ] } + - match: { index.analysis.filter_types: [ "stop" ] } + - match: { index.analysis.analyzer_types: [ "custom", "standard" ] } + - match: { index.analysis.built_in_char_filters: [ "html_strip" ] } + - match: { index.analysis.built_in_tokenizers: [ "keyword" ] } + - match: { index.analysis.built_in_filters: [ "trim" ] } + - match: { index.analysis.built_in_analyzers: [ "french", "german", "spanish" ] } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java index b7d57595b232a..2f9bae329a813 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java @@ -244,7 +244,13 @@ public Collection createComponents( TransformConfigManager configManager = new IndexBasedTransformConfigManager(client, xContentRegistry); TransformAuditor auditor = new TransformAuditor(client, clusterService.getNodeName()); - TransformCheckpointService checkpointService = new TransformCheckpointService(client, configManager, auditor); + TransformCheckpointService checkpointService = new TransformCheckpointService( + client, + settings, + clusterService, + configManager, + auditor + ); SchedulerEngine scheduler = new SchedulerEngine(settings, Clock.systemUTC()); transformServices.set(new TransformServices(configManager, checkpointService, auditor, scheduler)); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java index a7bda6886fd58..63205fd44cae9 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java @@ -16,9 +16,11 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointingInfo; @@ -26,16 +28,20 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; +import org.elasticsearch.xpack.transform.checkpoint.RemoteClusterResolver.ResolvedIndices; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; import java.time.Instant; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.stream.Collectors; public class DefaultCheckpointProvider implements CheckpointProvider { @@ -45,17 +51,20 @@ public class DefaultCheckpointProvider implements CheckpointProvider { private static final Logger logger = LogManager.getLogger(DefaultCheckpointProvider.class); protected final Client client; + protected final RemoteClusterResolver remoteClusterResolver; protected final TransformConfigManager transformConfigManager; protected final TransformAuditor transformAuditor; protected final TransformConfig transformConfig; public DefaultCheckpointProvider( final Client client, + final RemoteClusterResolver remoteClusterResolver, final TransformConfigManager transformConfigManager, final TransformAuditor transformAuditor, final TransformConfig transformConfig ) { this.client = client; + this.remoteClusterResolver = remoteClusterResolver; this.transformConfigManager = transformConfigManager; this.transformAuditor = transformAuditor; this.transformConfig = transformConfig; @@ -84,13 +93,61 @@ public void createNextCheckpoint(final TransformCheckpoint lastCheckpoint, final } protected void getIndexCheckpoints(ActionListener> listener) { + try { + ResolvedIndices resolvedIndexes = remoteClusterResolver.resolve(transformConfig.getSource().getIndex()); + ActionListener> groupedListener = listener; + + if (resolvedIndexes.numClusters() > 1) { + ActionListener>> mergeMapsListener = ActionListener.wrap(indexCheckpoints -> { + listener.onResponse( + indexCheckpoints.stream() + .flatMap(m -> m.entrySet().stream()) + .collect(Collectors.toMap(entry -> entry.getKey(), entry -> entry.getValue())) + ); + }, listener::onFailure); + + groupedListener = new GroupedActionListener<>(mergeMapsListener, resolvedIndexes.numClusters()); + } + + if (resolvedIndexes.getLocalIndices().isEmpty() == false) { + getCheckpointsFromOneCluster( + client, + transformConfig.getHeaders(), + resolvedIndexes.getLocalIndices().toArray(new String[0]), + RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY, + groupedListener + ); + } + + for (Map.Entry> remoteIndex : resolvedIndexes.getRemoteIndicesPerClusterAlias().entrySet()) { + Client remoteClient = client.getRemoteClusterClient(remoteIndex.getKey()); + getCheckpointsFromOneCluster( + remoteClient, + transformConfig.getHeaders(), + remoteIndex.getValue().toArray(new String[0]), + remoteIndex.getKey() + RemoteClusterService.REMOTE_CLUSTER_INDEX_SEPARATOR, + groupedListener + ); + } + } catch (Exception e) { + listener.onFailure(e); + } + } + + private static void getCheckpointsFromOneCluster( + Client client, + Map headers, + String[] indices, + String prefix, + ActionListener> listener + ) { // 1st get index to see the indexes the user has access to - GetIndexRequest getIndexRequest = new GetIndexRequest().indices(transformConfig.getSource().getIndex()) + GetIndexRequest getIndexRequest = new GetIndexRequest().indices(indices) .features(new GetIndexRequest.Feature[0]) .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); ClientHelper.executeWithHeadersAsync( - transformConfig.getHeaders(), + headers, ClientHelper.TRANSFORM_ORIGIN, client, GetIndexAction.INSTANCE, @@ -104,23 +161,20 @@ protected void getIndexCheckpoints(ActionListener> listener) client, ClientHelper.TRANSFORM_ORIGIN, IndicesStatsAction.INSTANCE, - new IndicesStatsRequest().indices(transformConfig.getSource().getIndex()) - .clear() - .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN), + new IndicesStatsRequest().indices(indices).clear().indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN), ActionListener.wrap(response -> { if (response.getFailedShards() != 0) { listener.onFailure(new CheckpointException("Source has [" + response.getFailedShards() + "] failed shards")); return; } - - listener.onResponse(extractIndexCheckPoints(response.getShards(), userIndices)); + listener.onResponse(extractIndexCheckPoints(response.getShards(), userIndices, prefix)); }, e -> listener.onFailure(new CheckpointException("Failed to create checkpoint", e))) ); }, e -> listener.onFailure(new CheckpointException("Failed to create checkpoint", e))) ); } - static Map extractIndexCheckPoints(ShardStats[] shards, Set userIndices) { + static Map extractIndexCheckPoints(ShardStats[] shards, Set userIndices, String prefix) { Map> checkpointsByIndex = new TreeMap<>(); for (ShardStats shard : shards) { @@ -129,9 +183,10 @@ static Map extractIndexCheckPoints(ShardStats[] shards, Set checkpoints = checkpointsByIndex.get(indexName); + TreeMap checkpoints = checkpointsByIndex.get(fullIndexName); // 1st time we see this shard for this index, add the entry for the shard // or there is already a checkpoint entry for this index/shard combination // but with a higher global checkpoint. This is by design(not a problem) and @@ -142,8 +197,8 @@ static Map extractIndexCheckPoints(ShardStats[] shards, Set()); - checkpointsByIndex.get(indexName).put(shard.getShardRouting().getId(), globalCheckpoint); + checkpointsByIndex.put(fullIndexName, new TreeMap<>()); + checkpointsByIndex.get(fullIndexName).put(shard.getShardRouting().getId(), globalCheckpoint); } } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/RemoteClusterResolver.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/RemoteClusterResolver.java new file mode 100644 index 0000000000000..34cc3d95f47af --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/RemoteClusterResolver.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.checkpoint; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteConnectionStrategy; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArraySet; + +/** + * Maintain a list of remote clusters (aliases) and provide the ability to resolve. + */ +class RemoteClusterResolver extends RemoteClusterAware { + + private final CopyOnWriteArraySet clusters; + + class ResolvedIndices { + private final Map> remoteIndicesPerClusterAlias; + private final List localIndices; + + ResolvedIndices(Map> remoteIndicesPerClusterAlias, List localIndices) { + this.localIndices = localIndices; + this.remoteIndicesPerClusterAlias = remoteIndicesPerClusterAlias; + } + + public Map> getRemoteIndicesPerClusterAlias() { + return remoteIndicesPerClusterAlias; + } + + public List getLocalIndices() { + return localIndices; + } + + public int numClusters() { + return remoteIndicesPerClusterAlias.size() + (localIndices.isEmpty() ? 0 : 1); + } + } + + RemoteClusterResolver(Settings settings, ClusterSettings clusterSettings) { + super(settings); + clusters = new CopyOnWriteArraySet<>(getEnabledRemoteClusters(settings)); + listenForUpdates(clusterSettings); + } + + @Override + protected void updateRemoteCluster(String clusterAlias, Settings settings) { + if (RemoteConnectionStrategy.isConnectionEnabled(clusterAlias, settings)) { + clusters.add(clusterAlias); + } else { + clusters.remove(clusterAlias); + } + } + + ResolvedIndices resolve(String... indices) { + Map> resolvedClusterIndices = groupClusterIndices(clusters, indices); + List localIndices = resolvedClusterIndices.getOrDefault(LOCAL_CLUSTER_GROUP_KEY, Collections.emptyList()); + resolvedClusterIndices.remove(LOCAL_CLUSTER_GROUP_KEY); + return new ResolvedIndices(resolvedClusterIndices, localIndices); + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java index 96f2ff181cf83..552a1e5e5c814 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java @@ -32,11 +32,12 @@ public class TimeBasedCheckpointProvider extends DefaultCheckpointProvider { TimeBasedCheckpointProvider( final Client client, + final RemoteClusterResolver remoteClusterResolver, final TransformConfigManager transformConfigManager, final TransformAuditor transformAuditor, final TransformConfig transformConfig ) { - super(client, transformConfigManager, transformAuditor, transformConfig); + super(client, remoteClusterResolver, transformConfigManager, transformAuditor, transformConfig); timeSyncConfig = (TimeSyncConfig) transformConfig.getSyncConfig(); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointService.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointService.java index 64faf41462589..492073b111c21 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointService.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointService.java @@ -10,6 +10,8 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointingInfo.TransformCheckpointingInfoBuilder; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; @@ -33,23 +35,33 @@ public class TransformCheckpointService { private final Client client; private final TransformConfigManager transformConfigManager; private final TransformAuditor transformAuditor; + private final RemoteClusterResolver remoteClusterResolver; public TransformCheckpointService( final Client client, + final Settings settings, + final ClusterService clusterService, final TransformConfigManager transformConfigManager, TransformAuditor transformAuditor ) { this.client = client; this.transformConfigManager = transformConfigManager; this.transformAuditor = transformAuditor; + this.remoteClusterResolver = new RemoteClusterResolver(settings, clusterService.getClusterSettings()); } public CheckpointProvider getCheckpointProvider(final TransformConfig transformConfig) { if (transformConfig.getSyncConfig() instanceof TimeSyncConfig) { - return new TimeBasedCheckpointProvider(client, transformConfigManager, transformAuditor, transformConfig); + return new TimeBasedCheckpointProvider( + client, + remoteClusterResolver, + transformConfigManager, + transformAuditor, + transformConfig + ); } - return new DefaultCheckpointProvider(client, transformConfigManager, transformAuditor, transformConfig); + return new DefaultCheckpointProvider(client, remoteClusterResolver, transformConfigManager, transformAuditor, transformConfig); } /** @@ -82,5 +94,4 @@ public void getCheckpointingInfo( listener.onFailure(new CheckpointException("Failed to retrieve configuration", transformError)); })); } - } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java index 02fa4765ff637..b7eca07108518 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java @@ -11,6 +11,8 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.client.Client; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; @@ -48,6 +50,7 @@ public void testReportSourceIndexChangesRunsEmpty() throws Exception { DefaultCheckpointProvider provider = new DefaultCheckpointProvider( client, + new RemoteClusterResolver(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), transformConfigManager, transformAuditor, transformConfig @@ -92,6 +95,7 @@ public void testReportSourceIndexChangesAddDelete() throws Exception { DefaultCheckpointProvider provider = new DefaultCheckpointProvider( client, + new RemoteClusterResolver(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), transformConfigManager, transformAuditor, transformConfig @@ -151,6 +155,7 @@ public void testReportSourceIndexChangesAddDeleteMany() throws Exception { DefaultCheckpointProvider provider = new DefaultCheckpointProvider( client, + new RemoteClusterResolver(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), transformConfigManager, transformAuditor, transformConfig diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointServiceNodeTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointServiceNodeTests.java index 7af774f4a6faf..1552178b7dc39 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointServiceNodeTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointServiceNodeTests.java @@ -19,7 +19,10 @@ import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.request.RequestCacheStats; @@ -138,7 +141,13 @@ public void createComponents() { // use a mock for the checkpoint service TransformAuditor mockAuditor = mock(TransformAuditor.class); - transformCheckpointService = new TransformCheckpointService(mockClientForCheckpointing, transformsConfigManager, mockAuditor); + transformCheckpointService = new TransformCheckpointService( + mockClientForCheckpointing, + Settings.EMPTY, + new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null), + transformsConfigManager, + mockAuditor + ); } @AfterClass diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformsCheckpointServiceTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformsCheckpointServiceTests.java index d62a242d8fe16..d4b30eb833051 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformsCheckpointServiceTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformsCheckpointServiceTests.java @@ -51,7 +51,7 @@ public void testExtractIndexCheckpoints() { ShardStats[] shardStatsArray = createRandomShardStats(expectedCheckpoints, indices, false, false, false); - Map checkpoints = DefaultCheckpointProvider.extractIndexCheckPoints(shardStatsArray, indices); + Map checkpoints = DefaultCheckpointProvider.extractIndexCheckPoints(shardStatsArray, indices, ""); assertEquals(expectedCheckpoints.size(), checkpoints.size()); assertEquals(expectedCheckpoints.keySet(), checkpoints.keySet()); @@ -68,7 +68,7 @@ public void testExtractIndexCheckpointsMissingSeqNoStats() { ShardStats[] shardStatsArray = createRandomShardStats(expectedCheckpoints, indices, false, false, true); - Map checkpoints = DefaultCheckpointProvider.extractIndexCheckPoints(shardStatsArray, indices); + Map checkpoints = DefaultCheckpointProvider.extractIndexCheckPoints(shardStatsArray, indices, ""); assertEquals(expectedCheckpoints.size(), checkpoints.size()); assertEquals(expectedCheckpoints.keySet(), checkpoints.keySet()); @@ -85,7 +85,7 @@ public void testExtractIndexCheckpointsLostPrimaries() { ShardStats[] shardStatsArray = createRandomShardStats(expectedCheckpoints, indices, true, false, false); - Map checkpoints = DefaultCheckpointProvider.extractIndexCheckPoints(shardStatsArray, indices); + Map checkpoints = DefaultCheckpointProvider.extractIndexCheckPoints(shardStatsArray, indices, ""); assertEquals(expectedCheckpoints.size(), checkpoints.size()); assertEquals(expectedCheckpoints.keySet(), checkpoints.keySet()); @@ -102,7 +102,7 @@ public void testExtractIndexCheckpointsInconsistentGlobalCheckpoints() { ShardStats[] shardStatsArray = createRandomShardStats(expectedCheckpoints, indices, randomBoolean(), true, false); - Map checkpoints = DefaultCheckpointProvider.extractIndexCheckPoints(shardStatsArray, indices); + Map checkpoints = DefaultCheckpointProvider.extractIndexCheckPoints(shardStatsArray, indices, ""); assertEquals(expectedCheckpoints.size(), checkpoints.size()); assertEquals(expectedCheckpoints.keySet(), checkpoints.keySet()); @@ -142,8 +142,13 @@ private static Set randomUserIndices() { * @param missingSeqNoStats whether some indices miss SeqNoStats * @return array of ShardStats */ - private static ShardStats[] createRandomShardStats(Map expectedCheckpoints, Set userIndices, - boolean skipPrimaries, boolean inconsistentGlobalCheckpoints, boolean missingSeqNoStats) { + private static ShardStats[] createRandomShardStats( + Map expectedCheckpoints, + Set userIndices, + boolean skipPrimaries, + boolean inconsistentGlobalCheckpoints, + boolean missingSeqNoStats + ) { // always create the full list List indices = new ArrayList<>(); @@ -192,15 +197,17 @@ private static ShardStats[] createRandomShardStats(Map expectedC checkpoints.add(globalCheckpoint); } - for (int replica = 0; replica < numShardCopies; replica++) { + for (int replica = 0; replica < numShardCopies; replica++) { ShardId shardId = new ShardId(index, shardIndex); boolean primary = (replica == primaryShard); Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve(String.valueOf(shardIndex)); - ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, primary, + ShardRouting shardRouting = ShardRouting.newUnassigned( + shardId, + primary, primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null) - ); + ); shardRouting = shardRouting.initialize("node-0", null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); shardRouting = shardRouting.moveToStarted(); @@ -222,15 +229,18 @@ private static ShardStats[] createRandomShardStats(Map expectedC if (inconsistentReplica == replica) { // overwrite - SeqNoStats invalidSeqNoStats = - new SeqNoStats(maxSeqNo, localCheckpoint, globalCheckpoint - randomLongBetween(10L, 100L)); + SeqNoStats invalidSeqNoStats = new SeqNoStats( + maxSeqNo, + localCheckpoint, + globalCheckpoint - randomLongBetween(10L, 100L) + ); shardStats.add( - new ShardStats(shardRouting, - new ShardPath(false, path, path, shardId), stats, null, invalidSeqNoStats, null)); + new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, invalidSeqNoStats, null) + ); } else { shardStats.add( - new ShardStats(shardRouting, - new ShardPath(false, path, path, shardId), stats, null, validSeqNoStats, null)); + new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, validSeqNoStats, null) + ); } } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java index 8947559c849c5..200e7d5f2527f 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java @@ -126,6 +126,8 @@ public void testNodeVersionAssignment() { IndexBasedTransformConfigManager transformsConfigManager = new IndexBasedTransformConfigManager(client, xContentRegistry()); TransformCheckpointService transformCheckpointService = new TransformCheckpointService( client, + Settings.EMPTY, + new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null), transformsConfigManager, mockAuditor ); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java index 4b1c2cb2f9b12..a4eff60768773 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java @@ -10,6 +10,9 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; @@ -69,8 +72,13 @@ public void testStopOnFailedTaskWithStoppedIndexer() { TransformConfig transformConfig = TransformConfigTests.randomDataFrameTransformConfigWithoutHeaders(); TransformAuditor auditor = new MockTransformAuditor(); TransformConfigManager transformsConfigManager = new InMemoryTransformConfigManager(); - - TransformCheckpointService transformsCheckpointService = new TransformCheckpointService(client, transformsConfigManager, auditor); + TransformCheckpointService transformsCheckpointService = new TransformCheckpointService( + client, + Settings.EMPTY, + new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null), + transformsConfigManager, + auditor + ); TransformState transformState = new TransformState( TransformTaskState.FAILED, diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java index 9464abcc4ba59..2e496c2ec619c 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java @@ -56,7 +56,7 @@ protected void execute(Terminal terminal, OptionSet options) throws Exception { int count = countOption.value(options); List args = arguments.values(options); if (args.size() != 1) { - throw new UserException(ExitCodes.USAGE, "expecting a single argument that is the cron expression to evaluate"); + throw new UserException(ExitCodes.USAGE, "expecting a single argument that is the cron expression to evaluate, got " + args); } boolean printDetail = options.has(detailOption); execute(terminal, args.get(0), count, printDetail); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java index ecc071d598105..2ebaccc61801c 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java @@ -196,7 +196,7 @@ public void testParserInvalidUnexpectedField() throws Exception { TextTemplate.parse(parser); fail("expected parse exception when encountering an unknown field"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("[script] unknown field [unknown_field], parser not found")); + assertThat(e.getMessage(), containsString("[script] unknown field [unknown_field]")); } } @@ -210,7 +210,7 @@ public void testParserInvalidUnknownScriptType() throws Exception { XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); parser.nextToken(); XContentParseException ex = expectThrows(XContentParseException.class, () -> TextTemplate.parse(parser)); - assertEquals("[1:2] [script] unknown field [template], parser not found", ex.getMessage()); + assertEquals("[1:2] [script] unknown field [template]", ex.getMessage()); } public void testParserInvalidMissingText() throws Exception { @@ -222,7 +222,7 @@ public void testParserInvalidMissingText() throws Exception { XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); parser.nextToken(); XContentParseException ex = expectThrows(XContentParseException.class, () -> TextTemplate.parse(parser)); - assertEquals("[1:2] [script] unknown field [type], parser not found", ex.getMessage()); + assertEquals("[1:2] [script] unknown field [type]", ex.getMessage()); } public void testNullObject() throws Exception { diff --git a/x-pack/qa/oidc-op-tests/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthIT.java b/x-pack/qa/oidc-op-tests/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthIT.java index 67643af723894..4b0e3afc813ad 100644 --- a/x-pack/qa/oidc-op-tests/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthIT.java +++ b/x-pack/qa/oidc-op-tests/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthIT.java @@ -276,7 +276,7 @@ private void verifyElasticsearchAccessTokenForCodeFlow(String accessToken) throw final Map map = callAuthenticateApiUsingAccessToken(accessToken); logger.info("Authentication with token Response: " + map); assertThat(map.get("username"), equalTo("alice")); - assertThat((List) map.get("roles"), containsInAnyOrder("kibana_user", "auditor")); + assertThat((List) map.get("roles"), containsInAnyOrder("kibana_admin", "auditor")); assertThat(map.get("metadata"), instanceOf(Map.class)); final Map metadata = (Map) map.get("metadata"); @@ -374,7 +374,7 @@ private void setFacilitatorUser() throws IOException { private void setRoleMappings() throws IOException { Request createRoleMappingRequest = new Request("PUT", "/_security/role_mapping/oidc_kibana"); - createRoleMappingRequest.setJsonEntity("{ \"roles\" : [\"kibana_user\"]," + + createRoleMappingRequest.setJsonEntity("{ \"roles\" : [\"kibana_admin\"]," + "\"enabled\": true," + "\"rules\": {" + "\"field\": { \"realm.name\": \"" + REALM_NAME + "\"}" + diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 527f594ce4174..dc7856a38e26d 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -161,13 +161,6 @@ for (Version bwcVersion : bwcVersions.wireCompatible) { nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}") systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.upgrade_from_version', bwcVersion.toString().replace('-SNAPSHOT', '') - // disabled temporarily for backporting serialization change - if (bwcVersion.before('8.0.0')) { - systemProperty 'tests.rest.blacklist', [ - 'upgraded_cluster/80_transform_jobs_crud/Get start, stop mixed cluster batch transform', - 'upgraded_cluster/80_transform_jobs_crud/Test GET, mixed continuous transforms', - ].join(',') - } } tasks.register("${baseName}#bwcTest") { diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_transform_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_transform_jobs_crud.yml index 1817984be7cd5..57d16baf58dad 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_transform_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_transform_jobs_crud.yml @@ -1,9 +1,5 @@ --- "Test put batch transform on mixed cluster": - - skip: - version: " - 7.99.99" # after BWC merged then remove entirely - reason: disabled temporarily for backporting serialization change - - do: cluster.health: index: "transform-airline-data" @@ -110,10 +106,6 @@ --- "Test put continuous transform on mixed cluster": - - skip: - version: " - 7.99.99" # after BWC merged then remove entirely - reason: disabled temporarily for backporting serialization change - - do: cluster.health: index: "transform-airline-data-cont" diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index 96f189ffc3ac0..26fd474da9020 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -28,6 +28,7 @@ testClusters.integTest { setting 'xpack.security.authc.realms.custom.custom.filtered_setting', 'should be filtered' setting 'xpack.security.authc.realms.file.esusers.order', '1' setting 'xpack.security.authc.realms.native.native.order', '2' + setting 'xpack.security.authc.realms.custom_role_mapping.role_map.order', '3' setting 'xpack.security.enabled', 'true' setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java index 3f85d8086d678..764f044a01154 100644 --- a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java @@ -6,14 +6,13 @@ package org.elasticsearch.example; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.example.realm.CustomAuthenticationFailureHandler; import org.elasticsearch.example.realm.CustomRealm; +import org.elasticsearch.example.realm.CustomRoleMappingRealm; import org.elasticsearch.example.role.CustomInMemoryRolesProvider; -import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.SecurityExtension; import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authc.Realm; -import org.elasticsearch.xpack.core.security.SecurityExtension; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; import java.security.AccessController; @@ -43,19 +42,22 @@ public class ExampleSecurityExtension implements SecurityExtension { } @Override - public Map getRealms(ResourceWatcherService resourceWatcherService) { - return Collections.singletonMap(CustomRealm.TYPE, CustomRealm::new); + public Map getRealms(SecurityComponents components) { + return Map.ofEntries( + Map.entry(CustomRealm.TYPE, CustomRealm::new), + Map.entry(CustomRoleMappingRealm.TYPE, + config -> new CustomRoleMappingRealm(config, components.roleMapper())) + ); } @Override - public AuthenticationFailureHandler getAuthenticationFailureHandler() { + public AuthenticationFailureHandler getAuthenticationFailureHandler(SecurityComponents components) { return new CustomAuthenticationFailureHandler(); } - @Override public List, ActionListener>> - getRolesProviders(Settings settings, ResourceWatcherService resourceWatcherService) { + getRolesProviders(SecurityComponents components) { CustomInMemoryRolesProvider rp1 = new CustomInMemoryRolesProvider(Collections.singletonMap(ROLE_A, "read")); Map roles = new HashMap<>(); roles.put(ROLE_A, "all"); diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/SpiExtensionPlugin.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/SpiExtensionPlugin.java index eedb06f2c1bad..ee47fa0bf7b5b 100644 --- a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/SpiExtensionPlugin.java +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/SpiExtensionPlugin.java @@ -7,6 +7,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.example.realm.CustomRealm; +import org.elasticsearch.example.realm.CustomRoleMappingRealm; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestHeaderDefinition; @@ -33,6 +34,7 @@ public Collection getRestHeaders() { public List> getSettings() { List> list = new ArrayList<>(RealmSettings.getStandardSettings(CustomRealm.TYPE)); list.add(RealmSettings.simpleString(CustomRealm.TYPE, "filtered_setting", Setting.Property.NodeScope, Setting.Property.Filtered)); + list.addAll(RealmSettings.getStandardSettings(CustomRoleMappingRealm.TYPE)); return list; } } diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRoleMappingRealm.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRoleMappingRealm.java new file mode 100644 index 0000000000000..2b277d39b1364 --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRoleMappingRealm.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.example.realm; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.List; +import java.util.Map; + +/** + * An example realm with specific behaviours: + * (1) It only supports lookup (that is, "run-as" and "authorization_realms") but not authentication + * (2) It performs role mapping to determine the roles for the looked-up user + * (3) It caches the looked-up User objects + */ +public class CustomRoleMappingRealm extends Realm implements CachingRealm { + + public static final String TYPE = "custom_role_mapping"; + + static final String USERNAME = "role_mapped_user"; + static final String USER_GROUP = "user_group"; + + private final Cache cache; + private final UserRoleMapper roleMapper; + + public CustomRoleMappingRealm(RealmConfig config, UserRoleMapper roleMapper) { + super(config); + this.cache = CacheBuilder.builder().build(); + this.roleMapper = roleMapper; + this.roleMapper.refreshRealmOnChange(this); + } + + @Override + public boolean supports(AuthenticationToken token) { + return false; + } + + @Override + public UsernamePasswordToken token(ThreadContext threadContext) { + return null; + } + + @Override + public void authenticate(AuthenticationToken authToken, ActionListener listener) { + listener.onResponse(AuthenticationResult.notHandled()); + } + + @Override + public void lookupUser(String username, ActionListener listener) { + final User user = cache.get(username); + if (user != null) { + listener.onResponse(user); + return; + } + if (USERNAME.equals(username)) { + buildUser(username, ActionListener.wrap( + u -> listener.onResponse(cache.computeIfAbsent(username, k -> u)), + listener::onFailure + )); + } else { + listener.onResponse(null); + } + } + + private void buildUser(String username, ActionListener listener) { + final UserRoleMapper.UserData data = new UserRoleMapper.UserData(username, null, List.of(USER_GROUP), Map.of(), super.config); + roleMapper.resolveRoles(data, ActionListener.wrap( + roles -> listener.onResponse(new User(username, roles.toArray(String[]::new))), + listener::onFailure + )); + } + + @Override + public void expire(String username) { + this.cache.invalidate(username); + } + + @Override + public void expireAll() { + this.cache.invalidateAll(); + } +} diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRoleMappingRealmIT.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRoleMappingRealmIT.java new file mode 100644 index 0000000000000..4eee89d0d6ce5 --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRoleMappingRealmIT.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.example.realm; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +/** + * Integration test to test authentication with the custom role-mapping realm + */ +public class CustomRoleMappingRealmIT extends ESRestTestCase { + + private String expectedRole; + + @Override + protected Settings restClientSettings() { + return Settings.builder() + .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER) + .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) + .build(); + } + + @Before + public void setupRoleMapping() throws Exception { + expectedRole = randomAlphaOfLengthBetween(4, 16); + Request request = new Request("PUT", "/_security/role_mapping/test"); + request.setJsonEntity("{" + + "\"enabled\": true," + + "\"roles\":[\"" + + expectedRole + + "\"]," + + "\"rules\":{\"field\":{\"groups\":\"" + CustomRoleMappingRealm.USER_GROUP + "\"} }" + + "}"); + adminClient().performRequest(request); + } + + public void testUserWithRoleMapping() throws Exception { + Request request = new Request("GET", "/_security/_authenticate"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + // Authenticate as the custom realm superuser + options.addHeader(CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER); + options.addHeader(CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()); + // But "run-as" the role mapped user + options.addHeader("es-security-runas-user", CustomRoleMappingRealm.USERNAME); + request.setOptions(options); + + final Response response = client().performRequest(request); + final Map authenticate = entityAsMap(response); + assertThat(authenticate.get("username"), is(CustomRoleMappingRealm.USERNAME)); + assertThat(authenticate.get("roles"), instanceOf(List.class)); + assertThat(authenticate.get("roles"), equalTo(List.of(expectedRole))); + } + +} diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRoleMappingRealmTests.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRoleMappingRealmTests.java new file mode 100644 index 0000000000000..4057f2636d08f --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRoleMappingRealmTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.example.realm; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; + +public class CustomRoleMappingRealmTests extends ESTestCase { + + public void testCachingOfUserLookup() throws Exception { + final Environment env = super.newEnvironment(); + final UserRoleMapper roleMapper = mock(UserRoleMapper.class); + final RealmConfig realmConfig = new RealmConfig( + new RealmConfig.RealmIdentifier(CustomRoleMappingRealm.TYPE, "test"), + env.settings(), env, new ThreadContext(env.settings()) + ); + CustomRoleMappingRealm realm = new CustomRoleMappingRealm(realmConfig, roleMapper); + + final AtomicInteger roleMappingCounter = new AtomicInteger(0); + mockRoleMapping(roleMapper, () -> { + roleMappingCounter.incrementAndGet(); + return Set.of("role1", "role2"); + }); + + PlainActionFuture future = new PlainActionFuture<>(); + realm.lookupUser(CustomRoleMappingRealm.USERNAME, future); + final User user1 = future.get(); + assertThat(user1.principal(), is(CustomRoleMappingRealm.USERNAME)); + assertThat(user1.roles(), arrayContainingInAnyOrder("role1", "role2")); + assertThat(roleMappingCounter.get(), is(1)); + + future = new PlainActionFuture<>(); + realm.lookupUser(CustomRoleMappingRealm.USERNAME, future); + final User user2 = future.get(); + assertThat(user2, sameInstance(user1)); + assertThat(roleMappingCounter.get(), is(1)); + } + + @SuppressWarnings("unchecked") + private void mockRoleMapping(UserRoleMapper roleMapper, Supplier> supplier) { + doAnswer(inv -> { + ActionListener> listener = (ActionListener>) inv.getArguments()[1]; + listener.onResponse(supplier.get()); + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + } + +} diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index 7675642860b7c..bcc8aa3acdb07 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -305,39 +305,76 @@ private ObjectPath getWatchHistoryEntry(String watchId) throws Exception { private ObjectPath getWatchHistoryEntry(String watchId, String state) throws Exception { final AtomicReference objectPathReference = new AtomicReference<>(); - assertBusy(() -> { - client().performRequest(new Request("POST", "/.watcher-history-*/_refresh")); - - try (XContentBuilder builder = jsonBuilder()) { - builder.startObject(); - builder.startObject("query").startObject("bool").startArray("must"); - builder.startObject().startObject("term").startObject("watch_id").field("value", watchId).endObject().endObject() + try { + assertBusy(() -> { + client().performRequest(new Request("POST", "/.watcher-history-*/_refresh")); + + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.startObject("query").startObject("bool").startArray("must"); + builder.startObject().startObject("term").startObject("watch_id").field("value", watchId).endObject().endObject() .endObject(); - if (Strings.isNullOrEmpty(state) == false) { - builder.startObject().startObject("term").startObject("state").field("value", state).endObject().endObject() + if (Strings.isNullOrEmpty(state) == false) { + builder.startObject().startObject("term").startObject("state").field("value", state).endObject().endObject() .endObject(); - } - builder.endArray().endObject().endObject(); - builder.startArray("sort").startObject().startObject("trigger_event.triggered_time").field("order", "desc").endObject() + } + builder.endArray().endObject().endObject(); + builder.startArray("sort").startObject().startObject("trigger_event.triggered_time").field("order", "desc").endObject() .endObject().endArray(); - builder.endObject(); - - Request searchRequest = new Request("POST", "/.watcher-history-*/_search"); - searchRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - searchRequest.setJsonEntity(Strings.toString(builder)); - Response response = client().performRequest(searchRequest); - ObjectPath objectPath = ObjectPath.createFromResponse(response); - int totalHits = objectPath.evaluate("hits.total"); - assertThat(totalHits, is(greaterThanOrEqualTo(1))); - String watchid = objectPath.evaluate("hits.hits.0._source.watch_id"); - assertThat(watchid, is(watchId)); - objectPathReference.set(objectPath); - } catch (ResponseException e) { - final String err = "Failed to perform search of watcher history"; - logger.info(err, e); - fail(err); + builder.endObject(); + + Request searchRequest = new Request("POST", "/.watcher-history-*/_search"); + searchRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); + searchRequest.setJsonEntity(Strings.toString(builder)); + Response response = client().performRequest(searchRequest); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + int totalHits = objectPath.evaluate("hits.total"); + assertThat(totalHits, is(greaterThanOrEqualTo(1))); + String watchid = objectPath.evaluate("hits.hits.0._source.watch_id"); + assertThat(watchid, is(watchId)); + objectPathReference.set(objectPath); + } catch (ResponseException e) { + final String err = "Failed to perform search of watcher history"; + logger.info(err, e); + fail(err); + } + }); + } catch (AssertionError ae) { + { + Request request = new Request("GET", "/_watcher/stats"); + request.addParameter("metric", "_all"); + request.addParameter("pretty", "true"); + try { + Response response = client().performRequest(request); + logger.info("watcher_stats: {}", EntityUtils.toString(response.getEntity())); + } catch (IOException e) { + logger.error("error while fetching watcher_stats", e); + } } - }); + { + Request request = new Request("GET", "/_cluster/state"); + request.addParameter("pretty", "true"); + try { + Response response = client().performRequest(request); + logger.info("cluster_state: {}", EntityUtils.toString(response.getEntity())); + } catch (IOException e) { + logger.error("error while fetching cluster_state", e); + } + } + { + Request request = new Request("GET", "/.watcher-history-*/_search"); + request.addParameter("size", "100"); + request.addParameter("sort", "trigger_event.triggered_time:desc"); + request.addParameter("pretty", "true"); + try { + Response response = client().performRequest(request); + logger.info("watcher_history_snippets: {}", EntityUtils.toString(response.getEntity())); + } catch (IOException e) { + logger.error("error while fetching watcher_history_snippets", e); + } + } + throw ae; + } return objectPathReference.get(); } } diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java index 2b509a348e901..c7fcf719d5594 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java @@ -109,7 +109,6 @@ protected Settings restAdminSettings() { return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32299") public void testMonitorClusterHealth() throws Exception { String watchId = "cluster_health_watch"; @@ -125,7 +124,7 @@ public void testMonitorClusterHealth() throws Exception { assertThat(address, is(notNullValue())); String[] splitAddress = address.split(":", 2); String host = splitAddress[0]; - int port = Integer.valueOf(splitAddress[1]); + int port = Integer.parseInt(splitAddress[1]); // put watch try (XContentBuilder builder = jsonBuilder()) { @@ -161,7 +160,12 @@ public void testMonitorClusterHealth() throws Exception { assertThat(conditionMet, is(true)); deleteWatch(watchId); - assertWatchCount(0); + // Wrap inside an assertBusy(...), because watch may execute just after being deleted, + // This tries to re-add the watch which fails, because of version conflict, + // but for a moment the watch count from watcher stats api may be incorrect. + // (via WatcherIndexingListener#preIndex) + // The WatcherIndexingListener#postIndex() detects this version conflict and corrects the watch count. + assertBusy(() -> assertWatchCount(0)); } private void indexWatch(String watchId, XContentBuilder builder) throws Exception {