From 0906c33bf222d9bb9c12969466ecff07e506fbc8 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Thu, 10 Dec 2020 12:25:47 +0000 Subject: [PATCH] Make it possible to use Stack logging in Docker Backport of #65778. Closes #62758. Include the Stack log4j config in the Docker image, in order to make it possible to write logs in a container environment in the same way as for an archive or package deployment. This is useful in situations where the user is bind-mounting the logs directory and has their own arrangements for log shipping. To use stack logging, set the environment variable `ES_LOG_STYLE` to `file`. It can also be set to `console`, which is the same as not specifying it at all. The Docker logging config is now auto-generated at image build time, by running the default config through a transformer program when preparing the distribution in an image builder step. Also, in the docker distribution `build.gradle`, I changed a helper closure into a class with a static method in order to fix an issue where the Docker image was always being rebuilt, even when there were no changes. --- .../gradle/docker/ShellRetry.java | 45 ++++++ distribution/docker/build.gradle | 63 ++------ distribution/docker/src/docker/Dockerfile | 23 +-- .../src/docker/bin/docker-entrypoint.sh | 17 +++ .../src/docker/config/log4j2.properties | 131 ----------------- .../transform-log4j-config/build.gradle | 29 ++++ .../transform/log4j/TransformLog4jConfig.java | 126 ++++++++++++++++ .../log4j/TransformLog4jConfigTests.java | 135 ++++++++++++++++++ docs/reference/setup/install/docker.asciidoc | 4 +- .../packaging/test/DockerTests.java | 44 ++++++ .../elasticsearch/packaging/util/Docker.java | 2 +- settings.gradle | 1 + 12 files changed, 424 insertions(+), 196 deletions(-) create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/docker/ShellRetry.java mode change 100644 => 100755 distribution/docker/src/docker/bin/docker-entrypoint.sh delete mode 100644 distribution/docker/src/docker/config/log4j2.properties create mode 100644 distribution/docker/transform-log4j-config/build.gradle create mode 100644 distribution/docker/transform-log4j-config/src/main/java/org/elasticsearch/transform/log4j/TransformLog4jConfig.java create mode 100644 distribution/docker/transform-log4j-config/src/test/java/org/elasticsearch/transform/log4j/TransformLog4jConfigTests.java diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/docker/ShellRetry.java b/buildSrc/src/main/java/org/elasticsearch/gradle/docker/ShellRetry.java new file mode 100644 index 000000000000..fffa62f73110 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/docker/ShellRetry.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.docker; + +/** + * The methods in this class take a shell command and wrap it in retry logic, so that our + * Docker builds can be more robust in the face of transient errors e.g. network issues. + */ +public class ShellRetry { + static String loop(String name, String command) { + return loop(name, command, 4, "exit"); + } + + static String loop(String name, String command, int indentSize, String exitKeyword) { + String indent = " ".repeat(indentSize); + + StringBuilder commandWithRetry = new StringBuilder("for iter in {1..10}; do \n"); + commandWithRetry.append(indent).append(" ").append(command).append(" && \n"); + commandWithRetry.append(indent).append(" exit_code=0 && break || \n"); + commandWithRetry.append(indent); + commandWithRetry.append(" exit_code=$? && echo \"").append(name).append(" error: retry $iter in 10s\" && sleep 10; \n"); + commandWithRetry.append(indent).append("done; \n"); + commandWithRetry.append(indent).append(exitKeyword).append(" $exit_code"); + + // We need to escape all newlines so that the build process doesn't run all lines onto a single line + return commandWithRetry.toString().replaceAll(" *\n", " \\\\\n"); + } +} diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index ebd4aee07e7c..dd5de465819c 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -4,6 +4,7 @@ import org.elasticsearch.gradle.ElasticsearchDistribution.Flavor import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.docker.DockerBuildTask +import org.elasticsearch.gradle.docker.ShellRetry import org.elasticsearch.gradle.info.BuildParams import org.elasticsearch.gradle.testfixtures.TestFixturesPlugin @@ -21,6 +22,7 @@ configurations { dockerSource aarch64OssDockerSource ossDockerSource + transformLog4jJar } dependencies { @@ -28,6 +30,7 @@ dependencies { dockerSource project(path: ":distribution:archives:linux-tar", configuration:"default") aarch64OssDockerSource project(path: ":distribution:archives:oss-linux-aarch64-tar", configuration:"default") ossDockerSource project(path: ":distribution:archives:oss-linux-tar", configuration:"default") + transformLog4jJar project(path: ":distribution:docker:transform-log4j-config", configuration: "default") } ext.expansions = { Architecture architecture, boolean oss, DockerBase base, boolean local -> @@ -67,7 +70,7 @@ ARG BASE_TAG=8.2 sourceElasticsearch = "COPY $elasticsearch /opt/elasticsearch.tar.gz" } else { sourceElasticsearch = """ -RUN curl --retry 8 -S -L \\ +RUN curl --retry 10 -S -L \\ --output /opt/elasticsearch.tar.gz \\ https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/$elasticsearch """ @@ -75,18 +78,6 @@ RUN curl --retry 8 -S -L \\ def (major,minor) = VersionProperties.elasticsearch.split("\\.") - def retry_loop = { name, command, indentSize = 4, exitKeyword = 'exit' -> - String indent = ' ' * indentSize - String commandWithRetry = """for iter in {1..10}; do -${indent} ${command} && -${indent} exit_code=0 && break || -${indent} exit_code=\$? && echo "${name} error: retry \$iter in 10s" && sleep 10; -${indent}done; -${indent}${exitKeyword} \$exit_code""" - - return commandWithRetry.replaceAll(" *\n", " \\\\\n") - } - return [ 'base_image' : base.getImage(), 'bin_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'bin', @@ -100,7 +91,7 @@ ${indent}${exitKeyword} \$exit_code""" 'docker_base' : base.name().toLowerCase(), 'version' : VersionProperties.elasticsearch, 'major_minor_version' : "${major}.${minor}", - 'retry_loop' : retry_loop + 'retry' : ShellRetry ] } @@ -196,6 +187,10 @@ void addCopyDockerContextTask(Architecture architecture, boolean oss, DockerBase with dockerBuildContext(architecture, oss, base, true) + into(base == DockerBase.IRON_BANK ? 'scripts' : 'bin') { + from configurations.transformLog4jJar + } + if (architecture == Architecture.AARCH64) { if (oss) { from configurations.aarch64OssDockerSource @@ -230,46 +225,6 @@ tasks.register("copyKeystore", Sync) { } } -tasks.register("checkSecurityAuditLayoutPatternIdentical") { - // the two log4j2.properties files containing security audit configuration for archive and docker builds respectively - def originalLog4j = project(":x-pack:plugin:core").file('src/main/config/log4j2.properties') - def dockerLog4j = project.file("src/docker/config/log4j2.properties") - inputs.files(originalLog4j, dockerLog4j) - def patternPropertyKey = "appender.audit_rolling.layout.pattern" - doLast { - def coreLog4jProperties = new Properties() - originalLog4j.withInputStream { input -> - coreLog4jProperties.load(input) - } - - if (false == coreLog4jProperties.containsKey(patternPropertyKey)) { - throw new GradleException("The [${originalLog4j.getPath()}] file changed such that the layout pattern is not " + - "referred to by the property named [${patternPropertyKey}]. Please update the task [${name}] " + - "definition from project [${path}] to reflect the new name for the layout pattern property.") - } - - def dockerLog4jProperties = new Properties() - dockerLog4j.withInputStream { input -> - dockerLog4jProperties.load(input) - } - - if (false == dockerLog4jProperties.containsKey(patternPropertyKey)) { - throw new GradleException("The [${dockerLog4j.getPath()}] file changed such that the layout pattern is not " + - "referred to by the property named [${patternPropertyKey}]. Please update the task [${name}] " + - "definition from project [${path}] to reflect the new name for the layout pattern property.") - } - - if (false == coreLog4jProperties.getProperty(patternPropertyKey).equals(dockerLog4jProperties.getProperty(patternPropertyKey))) { - throw new GradleException("The property value for the layout pattern [${patternPropertyKey}] is NOT identical " + - "between the [${originalLog4j.getPath()}] and the [${dockerLog4j.getPath()}] files.") - } - } -} - -tasks.named("precommit").configure { - dependsOn 'checkSecurityAuditLayoutPatternIdentical' -} - elasticsearch_distributions { Architecture.values().each { eachArchitecture -> Flavor.values().each { distroFlavor -> diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index a093976acc97..472a92a30326 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -29,7 +29,7 @@ FROM ${base_image} AS builder <% if (docker_base == 'ubi') { %> # Install required packages to extract the Elasticsearch distribution -RUN <%= retry_loop(package_manager, "${package_manager} install -y findutils tar gzip") %> +RUN <%= retry.loop(package_manager, "${package_manager} install -y findutils tar gzip") %> <% } %> <% if (docker_base == 'iron_bank') { %> @@ -69,21 +69,26 @@ ${source_elasticsearch} RUN tar -zxf /opt/elasticsearch.tar.gz --strip-components=1 # The distribution includes a `config` directory, no need to create it -COPY ${config_dir}/elasticsearch.yml ${config_dir}/log4j2.properties config/ +COPY ${config_dir}/elasticsearch.yml config/ +COPY ${bin_dir}/transform-log4j-config-${version}.jar /tmp/ # 1. Configure the distribution for Docker # 2. Ensure directories are created. Most already are, but make sure # 3. Apply correct permissions -# 4. Apply more correct permissions -# 5. The JDK's directories' permissions don't allow `java` to be executed under a different +# 4. Move the distribution's default logging config aside +# 5. Generate a docker logging config, to be used by default +# 6. Apply more correct permissions +# 7. The JDK's directories' permissions don't allow `java` to be executed under a different # group to the default. Fix this. -# 6. Ensure that there are no files with setuid or setgid, in order to mitigate "stackclash" attacks. -# 7. Ensure all files are world-readable by default. It should be possible to +# 8. Ensure that there are no files with setuid or setgid, in order to mitigate "stackclash" attacks. +# 9. Ensure all files are world-readable by default. It should be possible to # examine the contents of the image under any UID:GID -RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' /usr/share/elasticsearch/bin/elasticsearch-env && \\ +RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elasticsearch-env && \\ mkdir -p config/jvm.options.d data logs plugins && \\ chmod 0775 config config/jvm.options.d data logs plugins && \\ - chmod 0660 config/elasticsearch.yml config/log4j2.properties && \\ + mv config/log4j2.properties config/log4j2.file.properties && \\ + jdk/bin/java -jar /tmp/transform-log4j-config-${version}.jar config/log4j2.file.properties > config/log4j2.properties && \\ + chmod 0660 config/elasticsearch.yml config/log4j2*.properties && \\ find ./jdk -type d -exec chmod 0755 {} + && \\ find . -xdev -perm -4000 -exec chmod ug-s {} + && \\ find . -type f -exec chmod o+r {} + @@ -109,7 +114,7 @@ RUN ${package_manager} update --setopt=tsflags=nodocs -y && \\ <% } else { %> -RUN <%= retry_loop( +RUN <%= retry.loop( package_manager, "${package_manager} update --setopt=tsflags=nodocs -y && \n" + " ${package_manager} install --setopt=tsflags=nodocs -y \n" + diff --git a/distribution/docker/src/docker/bin/docker-entrypoint.sh b/distribution/docker/src/docker/bin/docker-entrypoint.sh old mode 100644 new mode 100755 index c12964b13acf..7c0c69d20952 --- a/distribution/docker/src/docker/bin/docker-entrypoint.sh +++ b/distribution/docker/src/docker/bin/docker-entrypoint.sh @@ -80,4 +80,21 @@ if [[ "$(id -u)" == "0" ]]; then fi fi +if [[ -n "$ES_LOG_STYLE" ]]; then + case "$ES_LOG_STYLE" in + console) + # This is the default. Nothing to do. + ;; + file) + # Overwrite the default config with the stack config + mv /usr/share/elasticsearch/config/log4j2.file.properties /usr/share/elasticsearch/config/log4j2.properties + ;; + *) + echo "ERROR: ES_LOG_STYLE set to [$ES_LOG_STYLE]. Expected [console] or [file]" >&2 + exit 1 ;; + esac +fi + +# Signal forwarding and child reaping is handled by `tini`, which is the +# actual entrypoint of the container run_as_other_user_if_needed /usr/share/elasticsearch/bin/elasticsearch <<<"$KEYSTORE_PASSWORD" diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties deleted file mode 100644 index e67d963a7231..000000000000 --- a/distribution/docker/src/docker/config/log4j2.properties +++ /dev/null @@ -1,131 +0,0 @@ -status = error - -appender.rolling.type = Console -appender.rolling.name = rolling -appender.rolling.layout.type = ESJsonLayout -appender.rolling.layout.type_name = server - -rootLogger.level = info -rootLogger.appenderRef.rolling.ref = rolling - -appender.deprecation_rolling.type = Console -appender.deprecation_rolling.name = deprecation_rolling -appender.deprecation_rolling.layout.type = ESJsonLayout -appender.deprecation_rolling.layout.type_name = deprecation -appender.deprecation_rolling.layout.esmessagefields=x-opaque-id -appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter - -appender.header_warning.type = HeaderWarningAppender -appender.header_warning.name = header_warning - -logger.deprecation.name = org.elasticsearch.deprecation -logger.deprecation.level = deprecation -logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling -logger.deprecation.appenderRef.header_warning.ref = header_warning -logger.deprecation.additivity = false - -appender.index_search_slowlog_rolling.type = Console -appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling -appender.index_search_slowlog_rolling.layout.type = ESJsonLayout -appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog -appender.index_search_slowlog_rolling.layout.esmessagefields=message,took,took_millis,total_hits,stats,search_type,total_shards,source,id - -logger.index_search_slowlog_rolling.name = index.search.slowlog -logger.index_search_slowlog_rolling.level = trace -logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling -logger.index_search_slowlog_rolling.additivity = false - -appender.index_indexing_slowlog_rolling.type = Console -appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling -appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout -appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog -appender.index_indexing_slowlog_rolling.layout.esmessagefields=message,took,took_millis,doc_type,id,routing,source - -logger.index_indexing_slowlog.name = index.indexing.slowlog.index -logger.index_indexing_slowlog.level = trace -logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling -logger.index_indexing_slowlog.additivity = false - -appender.audit_rolling.type = Console -appender.audit_rolling.name = audit_rolling -appender.audit_rolling.layout.type = PatternLayout -appender.audit_rolling.layout.pattern = {\ -"type":"audit", \ -"timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\ -%varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\ -%varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\ -%varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\ -%varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\ -%varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\ -%varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\ -%varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\ -%varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\ -%varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\ -%varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\ -%varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\ -%varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\ -%varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\ -%varsNotEmpty{, "user.roles":%map{user.roles}}\ -%varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\ -%varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\ -%varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\ -%varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\ -%varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\ -%varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\ -%varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\ -%varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\ -%varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\ -%varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\ -%varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\ -%varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\ -%varsNotEmpty{, "indices":%map{indices}}\ -%varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\ -%varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\ -%varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\ -%varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\ -%varsNotEmpty{, "event.category":"%enc{%map{event.category}}{JSON}"}\ -}%n -# "node.name" node name from the `elasticsearch.yml` settings -# "node.id" node id which should not change between cluster restarts -# "host.name" unresolved hostname of the local node -# "host.ip" the local bound ip (i.e. the ip listening for connections) -# "event.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal) -# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc. -# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal" -# "user.name" the subject name as authenticated by a realm -# "user.run_by.name" the original authenticated subject name that is impersonating another one. -# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as. -# "user.realm" the name of the realm that authenticated "user.name" -# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name") -# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from -# "user.roles" the roles array of the user; these are the roles that are granting privileges -# "apikey.id" this field is present if and only if the "authentication.type" is "api_key" -# "apikey.name" this field is present if and only if the "authentication.type" is "api_key" -# "origin.type" it is "rest" if the event is originating (is in relation to) a REST request; possible other values are "transport" and "ip_filter" -# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node -# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated -# "url.path" the URI component between the port and the query string; it is percent (URL) encoded -# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded -# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT -# "request.body" the content of the request body entity, JSON escaped -# "request.id" a synthentic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request -# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal) -# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal) -# "indices" the array of indices that the "action" is acting upon -# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header -# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array) -# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event -# "rule" name of the applied rule if the "origin.type" is "ip_filter" -# "event.category" fixed value "elasticsearch-audit" - -logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail -logger.xpack_security_audit_logfile.level = info -logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling -logger.xpack_security_audit_logfile.additivity = false - -logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature -logger.xmlsig.level = error -logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter -logger.samlxml_decrypt.level = fatal -logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter -logger.saml2_decrypt.level = fatal diff --git a/distribution/docker/transform-log4j-config/build.gradle b/distribution/docker/transform-log4j-config/build.gradle new file mode 100644 index 000000000000..7f239831e5ed --- /dev/null +++ b/distribution/docker/transform-log4j-config/build.gradle @@ -0,0 +1,29 @@ +apply plugin: 'elasticsearch.build' + +repositories { + jcenter() +} + +dependencies { + testImplementation "junit:junit:${versions.junit}" + testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" +} + +tasks.named('jar').configure { + manifest { + attributes 'Main-Class': 'org.elasticsearch.transform.log4j.TransformLog4jConfig' + } +} + +// This tests depend on ES core +disableTasks('forbiddenApisMain', 'forbiddenApisTest') + +tasks.named('testingConventions').configure { + naming.clear() + naming { + Tests { + baseClass 'junit.framework.TestCase' + } + } +} + diff --git a/distribution/docker/transform-log4j-config/src/main/java/org/elasticsearch/transform/log4j/TransformLog4jConfig.java b/distribution/docker/transform-log4j-config/src/main/java/org/elasticsearch/transform/log4j/TransformLog4jConfig.java new file mode 100644 index 000000000000..8c8972789e41 --- /dev/null +++ b/distribution/docker/transform-log4j-config/src/main/java/org/elasticsearch/transform/log4j/TransformLog4jConfig.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transform.log4j; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +/** + * This class takes in a log4j configuration file, and transform it into a config that + * writes everything to the console. This is useful when running Elasticsearch in a Docker + * container, where the Docker convention is to log to stdout / stderr and let the + * orchestration layer direct the output. + */ +public class TransformLog4jConfig { + + public static void main(String[] args) throws IOException { + List lines = getConfigFile(args); + + final List output = transformConfig(lines); + + output.forEach(System.out::println); + } + + private static List getConfigFile(String[] args) throws IOException { + if (args.length != 1) { + System.err.println("ERROR: Must supply a single argument, the file to process"); + System.exit(1); + } + + Path configPath = Path.of(args[0]); + + if (Files.exists(configPath) == false) { + System.err.println("ERROR: [" + configPath + "] does not exist"); + System.exit(1); + } + + if (Files.isReadable(configPath) == false) { + System.err.println("ERROR: [" + configPath + "] exists but is not readable"); + System.exit(1); + } + + return Files.readAllLines(configPath); + } + + public static List transformConfig(List lines) { + final List output = new ArrayList<>(lines.size()); + + // This flag provides a way to handle properties whose values are split + // over multiple lines and we need to omit those properties. + boolean skipNext = false; + + for (final String line : lines) { + if (skipNext) { + if (line.endsWith("\\") == false) { + skipNext = false; + } + continue; + } + if (line.startsWith("appender.")) { + String[] parts = line.split("\\s*=\\s*"); + String key = parts[0]; + String[] keyParts = key.split("\\."); + String value = parts[1]; + + // We don't need to explicitly define a console appender because the + // "rolling" appender will become a console appender. We also don't + // carry over "rolling_old" + if (keyParts[1].equals("console") || keyParts[1].equals("rolling_old")) { + continue; + } + + switch (keyParts[2]) { + case "type": + if (value.equals("RollingFile")) { + value = "Console"; + } + output.add(key + " = " + value); + break; + + case "fileName": + case "filePattern": + case "policies": + case "strategy": + // No longer applicable. Omit it. + skipNext = line.endsWith("\\"); + break; + + default: + output.add(line); + break; + } + } else if (line.startsWith("rootLogger.appenderRef")) { + String[] parts = line.split("\\s*=\\s*"); + + // The root logger only needs this appender + if (parts[1].equals("rolling")) { + output.add(line); + } + } else { + output.add(line); + } + } + + return output; + } +} diff --git a/distribution/docker/transform-log4j-config/src/test/java/org/elasticsearch/transform/log4j/TransformLog4jConfigTests.java b/distribution/docker/transform-log4j-config/src/test/java/org/elasticsearch/transform/log4j/TransformLog4jConfigTests.java new file mode 100644 index 000000000000..7deb414ba46e --- /dev/null +++ b/distribution/docker/transform-log4j-config/src/test/java/org/elasticsearch/transform/log4j/TransformLog4jConfigTests.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transform.log4j; + +import junit.framework.TestCase; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assert.assertThat; + +public class TransformLog4jConfigTests extends TestCase { + + /** + * Check that the transformer doesn't explode when given an empty file. + */ + public void testTransformEmptyConfig() { + runTest(List.of(), List.of()); + } + + /** + * Check that the transformer leaves non-appender lines alone. + */ + public void testTransformEchoesNonAppenderLines() { + List input = List.of( + "status = error", + "", + "##############################", + "rootLogger.level = info", + "example = \"broken\\", + " line\"" + ); + + runTest(input, input); + } + + /** + * Check that the root logger appenders are filtered to just the "rolling" appender + */ + public void testTransformFiltersRootLogger() { + List input = List.of( + "rootLogger.appenderRef.console.ref = console", + "rootLogger.appenderRef.rolling.ref = rolling", + "rootLogger.appenderRef.rolling_old.ref = rolling_old" + ); + List expected = List.of("rootLogger.appenderRef.rolling.ref = rolling"); + + runTest(input, expected); + } + + /** + * Check that any explicit 'console' or 'rolling_old' appenders are removed. + */ + public void testTransformRemoveExplicitConsoleAndRollingOldAppenders() { + List input = List.of( + "appender.console.type = Console", + "appender.console.name = console", + "appender.console.layout.type = PatternLayout", + "appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n", + "appender.rolling_old.type = RollingFile", + "appender.rolling_old.name = rolling_old", + "appender.rolling_old.layout.type = PatternLayout", + "appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n" + ); + + runTest(input, List.of()); + } + + /** + * Check that rolling file appenders are converted to console appenders. + */ + public void testTransformConvertsRollingToConsole() { + List input = List.of("appender.rolling.type = RollingFile", "appender.rolling.name = rolling"); + + List expected = List.of("appender.rolling.type = Console", "appender.rolling.name = rolling"); + + runTest(input, expected); + } + + /** + * Check that rolling file appenders have redundant properties removed. + */ + public void testTransformRemovedRedundantProperties() { + List input = List.of( + "appender.rolling.fileName = ${sys:es.logs.base_path}/${sys:es.logs.cluster_name}_server.json", + "appender.rolling.layout.type = ECSJsonLayout", + "appender.rolling.layout.type_name = server", + "appender.rolling.filePattern = ${sys:es.logs.base_path}/${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz", + "appender.rolling.policies.type = Policies", + "appender.rolling.strategy.type = DefaultRolloverStrategy" + ); + + List expected = List.of("appender.rolling.layout.type = ECSJsonLayout", "appender.rolling.layout.type_name = server"); + + runTest(input, expected); + } + + /** + * Check that rolling file appenders have redundant properties removed. + */ + public void testTransformSkipsPropertiesWithLineBreaks() { + List input = List.of( + "appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}\\", + " ${sys:es.logs.cluster_name}_server.json", + "appender.rolling.layout.type = ECSJsonLayout" + ); + + List expected = List.of("appender.rolling.layout.type = ECSJsonLayout"); + + runTest(input, expected); + } + + private void runTest(List input, List expected) { + final List transformed = TransformLog4jConfig.transformConfig(input); + + assertThat(transformed, equalTo(expected)); + } +} diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index b3c94cb8c8bf..d5158d330783 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -121,7 +121,9 @@ curl -X GET "localhost:9200/_cat/nodes?v&pretty" // NOTCONSOLE Log messages go to the console and are handled by the configured Docker logging driver. -By default you can access logs with `docker logs`. +By default you can access logs with `docker logs`. If you would prefer the {es} +container to write logs to disk, set the `ES_LOG_STYLE` environment variable to `file`. +This causes {es} to use the same logging configuration as other {es} distribution formats. To stop the cluster, run `docker-compose down`. The data in the Docker volumes is preserved and loaded diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index 8ca2e6cbecec..ea728d57c6a1 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -72,6 +72,7 @@ import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.matchesPattern; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; @@ -647,6 +648,49 @@ public void test120DockerLogsIncludeElasticsearchLogs() throws Exception { assertThat("Container logs don't contain INFO level messages", containerLogs.stdout, containsString("INFO")); } + /** + * Check that it is possible to write logs to disk + */ + public void test121CanUseStackLoggingConfig() throws Exception { + runContainer(distribution(), builder().envVars(Map.of("ES_LOG_STYLE", "file"))); + + waitForElasticsearch(installation); + + final Result containerLogs = getContainerLogs(); + final List stdout = containerLogs.stdout.lines().collect(Collectors.toList()); + + assertThat( + "Container logs should be formatted using the stack config", + stdout.get(stdout.size() - 1), + matchesPattern("^\\[\\d\\d\\d\\d-.*") + ); + assertThat("[logs/docker-cluster.log] should exist but it doesn't", existsInContainer("logs/docker-cluster.log"), is(true)); + } + + /** + * Check that the default logging config can be explicitly selected. + */ + public void test122CanUseDockerLoggingConfig() throws Exception { + runContainer(distribution(), builder().envVars(Map.of("ES_LOG_STYLE", "console"))); + + waitForElasticsearch(installation); + + final Result containerLogs = getContainerLogs(); + final List stdout = containerLogs.stdout.lines().collect(Collectors.toList()); + + assertThat("Container logs should be formatted using the docker config", stdout.get(stdout.size() - 1), startsWith("{\"")); + assertThat("[logs/docker-cluster.log] shouldn't exist but it does", existsInContainer("logs/docker-cluster.log"), is(false)); + } + + /** + * Check that an unknown logging config is rejected + */ + public void test123CannotUseUnknownLoggingConfig() { + final Result result = runContainerExpectingFailure(distribution(), builder().envVars(Map.of("ES_LOG_STYLE", "unknown"))); + + assertThat(result.stderr, containsString("ERROR: ES_LOG_STYLE set to [unknown]. Expected [console] or [file]")); + } + /** * Check that the Java process running inside the container has the expected UID, GID and username. */ diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java index 96834fa0c668..5e9713250c1f 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java @@ -202,7 +202,7 @@ private static void waitForElasticsearchToExit() { if (isElasticsearchRunning) { final Shell.Result dockerLogs = getContainerLogs(); - fail("Elasticsearch container did exit.\n\nStdout:\n" + dockerLogs.stdout + "\n\nStderr:\n" + dockerLogs.stderr); + fail("Elasticsearch container didn't exit.\n\nStdout:\n" + dockerLogs.stdout + "\n\nStderr:\n" + dockerLogs.stderr); } } diff --git a/settings.gradle b/settings.gradle index 8d8d8f0f4aa4..eba6a72456ed 100644 --- a/settings.gradle +++ b/settings.gradle @@ -42,6 +42,7 @@ List projects = [ 'distribution:docker:oss-docker-aarch64-export', 'distribution:docker:oss-docker-build-context', 'distribution:docker:oss-docker-export', + 'distribution:docker:transform-log4j-config', 'distribution:docker:ubi-docker-aarch64-export', 'distribution:docker:ubi-docker-build-context', 'distribution:docker:ubi-docker-export',