From fd34fbc8c454ddf1558a05acc932e44601ef9f87 Mon Sep 17 00:00:00 2001 From: Lee Dongjin Date: Thu, 5 Mar 2020 15:37:55 +0900 Subject: [PATCH 1/9] Change log4j dependency into log4j2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1. Update Dependency Configuration (except log4j-appender, shell, and tools) 1. Change log4j into log4j2 (1.2.7 → 2.14.1) 2. Change log4j dependency into log4j-api + log4j-core 3. Change slf4j-log4j12 dependency into log4j-slf4j-impl 4. Add log4j-1.2-api dependency for backward-compatibility 2. Update core 1. Update Log4jController to use log4j2. (PlaintextAdminIntegrationTest also changed for API change.) 2. Test logging config changed: core/src/test/resources/log4j.properties → core/src/test/resources/log4j2.properties 3. Logging test class changed: LogCaptureAppender → LogCaptureContext, LoggingUtil 4. Enable ignored tests in PlaintextAdminIntegrationTest 3. Update client 1. Test logging config changed: client/src/test/resources/log4j.properties → client/src/test/resources/log4j2.properties 3. Update connect:mirror 1. Test logging config changed: connect/mirror/src/test/resources/log4j.properties → connect/mirror/src/test/resources/log4j2.properties 4. Update connect:runtime 1. Update LoggingResource to use log4j2. 2. Test logging config changed: connect/runtime/src/test/resources/log4j.properties → connect/runtime/src/test/resources/log4j2.properties 5. Update streams 1. Logging test class changed: LogCaptureAppender → LogCaptureContext 2. Test logging config changed: streams/src/test/resources/log4j.properties → streams/src/test/resources/log4j2.properties 3. Add Archetype log4j2 configuration: streams/quickstart/java/src/main/resources/archetype-resources/src/main/resources/log4j2.properties 6. Update streams:test-utils 1. Test logging config changed: streams/test-utils/src/test/resources/log4j.properties → streams/test-utils/src/test/resources/log4j2.properties 7. Update log4j-appender 1. Add a resource-closing process to KafkaLog4jAppenderTest methods that use real Kafka Produce object. (It prevents the tests hanging up.) 8. Update raft 1. Add log4j deprecation message: raft/bin/test-kraft-server-start.sh 2. Add log4j2 configuration: raft/config/kraft-log4j2.properties 9. Update tests - Update tests/kafkatest/services/kafka/kafka.py - tests/kafkatest/services/kafka/templates/log4j.properties → log4j2.properties - Update tests/kafkatest/services/connect.py - tests/kafkatest/services/templates/connect_log4j.properties → connect_log4j2.properties - Update tests/kafkatest/services/trogdor/trogdor.py - tests/kafkatest/services/trogdor/templates/log4j2.properties → log4j2.properties - Update tests/kafkatest/services/streams.py - Add tests/kafkatest/services/templates/tools_log4j2.properties - Update tests/kafkatest/tests/streams/streams_relational_smoke_test.py - tests/kafkatest/tests/streams/templates/log4j_template.properties → log4j2_template.properties 10. Add log4j deprecation messages to the launcher scripts (except tools) - bin/connect-distributed.sh - bin/connect-mirror-maker.sh - bin/connect-standalone.sh - bin/kafka-server-start.sh - bin/zookeeper-server-start.sh - bin/windows/connect-distributed.bat - bin/windows/connect-standalone.bat - bin/windows/kafka-server-start.bat - bin/windows/zookeeper-server-start.bat 11. Add log4j2 conf properties for broker, and connect launcher scripts - config/log4j2.properties - config/connect-log4j2.properties 12. Trivial Cleanups - Remove redundant method call: Defaults.CompressionType.toString → Defaults.CompressionType - Fix typo: the curent root logger level → the current root logger level --- README.md | 2 +- bin/connect-distributed.sh | 1 + bin/connect-mirror-maker.sh | 1 + bin/connect-standalone.sh | 1 + bin/kafka-server-start.sh | 1 + bin/windows/connect-distributed.bat | 1 + bin/windows/connect-standalone.bat | 1 + bin/windows/kafka-server-start.bat | 1 + bin/windows/zookeeper-server-start.bat | 1 + bin/zookeeper-server-start.sh | 1 + build.gradle | 95 +++--- checkstyle/import-control.xml | 3 +- .../src/test/resources/log4j2.properties | 17 +- config/connect-log4j2.properties | 60 ++++ config/log4j2.properties | 171 ++++++++++ .../{log4j.properties => log4j2.properties} | 31 +- .../rest/resources/LoggingResource.java | 118 +++---- .../rest/resources/LoggingResourceTest.java | 151 +++++---- .../{log4j.properties => log4j2.properties} | 37 ++- .../scala/kafka/utils/Log4jController.scala | 116 ++++--- core/src/test/resources/log4j2.properties | 37 +++ .../api/PlaintextAdminIntegrationTest.scala | 99 +++--- .../unit/kafka/admin/AclCommandTest.scala | 24 +- .../controller/ControllerFailoverTest.scala | 2 - .../ControllerIntegrationTest.scala | 34 +- .../UncleanLeaderElectionTest.scala | 14 +- .../unit/kafka/network/SocketServerTest.scala | 11 +- .../unit/kafka/utils/LogCaptureAppender.scala | 66 ---- .../unit/kafka/utils/LogCaptureContext.scala | 77 +++++ .../scala/unit/kafka/utils/LoggingUtil.scala | 31 ++ gradle/dependencies.gradle | 6 + .../log4jappender/KafkaLog4jAppenderTest.java | 10 +- .../src/test/resources/log4j2.properties | 28 +- raft/bin/test-kraft-server-start.sh | 1 + .../config/kraft-log4j2.properties | 20 +- .../{log4j.properties => log4j2.properties} | 23 +- .../src/main/resources/log4j2.properties | 14 +- .../kafka/streams/KafkaStreamsTest.java | 14 +- .../kafka/streams/StreamsConfigTest.java | 83 +++-- .../AdjustStreamThreadCountTest.java | 161 ++++++---- .../internals/KGroupedStreamImplTest.java | 18 +- .../internals/KStreamKStreamJoinTest.java | 22 +- .../internals/KStreamKTableJoinTest.java | 26 +- ...amSessionWindowAggregateProcessorTest.java | 35 +- .../KStreamSlidingWindowAggregateTest.java | 41 ++- .../internals/KStreamWindowAggregateTest.java | 45 +-- .../internals/KTableKTableInnerJoinTest.java | 9 +- .../internals/KTableKTableLeftJoinTest.java | 9 +- .../internals/KTableKTableOuterJoinTest.java | 9 +- .../internals/KTableKTableRightJoinTest.java | 15 +- .../kstream/internals/KTableSourceTest.java | 29 +- .../internals/GlobalStateManagerImplTest.java | 11 +- .../internals/InternalTopicManagerTest.java | 45 +-- .../internals/PartitionGroupTest.java | 121 +++---- .../internals/ProcessorStateManagerTest.java | 37 +-- .../internals/RecordCollectorTest.java | 67 ++-- .../internals/StateDirectoryTest.java | 64 ++-- .../internals/StateManagerUtilTest.java | 3 +- .../internals/StoreChangelogReaderTest.java | 20 +- .../processor/internals/StreamThreadTest.java | 36 ++- .../processor/internals/TaskManagerTest.java | 42 +-- .../testutil/LogCaptureAppender.java | 130 -------- ...lSchemaRocksDBSegmentedBytesStoreTest.java | 14 +- .../internals/AbstractKeyValueStoreTest.java | 66 ++-- ...bstractRocksDBSegmentedBytesStoreTest.java | 9 +- .../AbstractSessionBytesStoreTest.java | 32 +- .../AbstractWindowBytesStoreTest.java | 20 +- .../CachingInMemorySessionStoreTest.java | 39 ++- .../CachingPersistentSessionStoreTest.java | 31 +- .../CachingPersistentWindowStoreTest.java | 46 +-- ...OptionsColumnFamilyOptionsAdapterTest.java | 27 +- .../internals/RocksDBSessionStoreTest.java | 2 +- .../RocksDBTimestampedStoreTest.java | 298 +++++++++--------- ...deredCachingPersistentWindowStoreTest.java | 20 +- .../apache/kafka/test/LogCaptureContext.java | 111 +++++++ streams/src/test/resources/log4j.properties | 35 -- streams/src/test/resources/log4j2.properties | 62 ++++ .../src/test/resources/log4j.properties | 21 -- .../src/test/resources/log4j2.properties | 26 +- tests/kafkatest/services/connect.py | 10 +- tests/kafkatest/services/kafka/kafka.py | 6 +- .../services/kafka/templates/log4j.properties | 136 -------- .../kafka/templates/log4j2.properties | 297 +++++++++++++++++ tests/kafkatest/services/streams.py | 4 +- .../templates/connect_log4j2.properties | 39 +++ .../templates/tools_log4j2.properties} | 29 +- .../trogdor/templates/log4j.properties | 23 -- .../trogdor/templates/log4j2.properties | 39 +++ tests/kafkatest/services/trogdor/trogdor.py | 24 +- .../streams/streams_relational_smoke_test.py | 6 +- .../templates/log4j2_template.properties | 40 +++ 91 files changed, 2375 insertions(+), 1536 deletions(-) rename raft/src/test/resources/log4j.properties => clients/src/test/resources/log4j2.properties (71%) create mode 100644 config/connect-log4j2.properties create mode 100644 config/log4j2.properties rename connect/mirror/src/test/resources/{log4j.properties => log4j2.properties} (63%) rename connect/runtime/src/test/resources/{log4j.properties => log4j2.properties} (57%) create mode 100644 core/src/test/resources/log4j2.properties delete mode 100644 core/src/test/scala/unit/kafka/utils/LogCaptureAppender.scala create mode 100644 core/src/test/scala/unit/kafka/utils/LogCaptureContext.scala create mode 100644 core/src/test/scala/unit/kafka/utils/LoggingUtil.scala rename tests/kafkatest/services/templates/connect_log4j.properties => metadata/src/test/resources/log4j2.properties (60%) rename core/src/test/resources/log4j.properties => raft/config/kraft-log4j2.properties (66%) rename storage/src/test/resources/{log4j.properties => log4j2.properties} (52%) rename clients/src/test/resources/log4j.properties => streams/quickstart/java/src/main/resources/archetype-resources/src/main/resources/log4j2.properties (72%) delete mode 100644 streams/src/test/java/org/apache/kafka/streams/processor/internals/testutil/LogCaptureAppender.java create mode 100644 streams/src/test/java/org/apache/kafka/test/LogCaptureContext.java delete mode 100644 streams/src/test/resources/log4j.properties create mode 100644 streams/src/test/resources/log4j2.properties delete mode 100644 streams/test-utils/src/test/resources/log4j.properties rename metadata/src/test/resources/log4j.properties => streams/test-utils/src/test/resources/log4j2.properties (57%) delete mode 100644 tests/kafkatest/services/kafka/templates/log4j.properties create mode 100644 tests/kafkatest/services/kafka/templates/log4j2.properties create mode 100644 tests/kafkatest/services/templates/connect_log4j2.properties rename tests/kafkatest/{tests/streams/templates/log4j_template.properties => services/templates/tools_log4j2.properties} (64%) delete mode 100644 tests/kafkatest/services/trogdor/templates/log4j.properties create mode 100644 tests/kafkatest/services/trogdor/templates/log4j2.properties create mode 100644 tests/kafkatest/tests/streams/templates/log4j2_template.properties diff --git a/README.md b/README.md index 2aa509e081012..ad5d362aee9bb 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ Follow instructions in https://kafka.apache.org/quickstart ./gradlew clients:test --tests org.apache.kafka.clients.MetadataTest.testTimeToNextUpdate ### Running a particular unit/integration test with log4j output ### -Change the log4j setting in either `clients/src/test/resources/log4j.properties` or `core/src/test/resources/log4j.properties` +Change the log4j setting in either `clients/src/test/resources/log4j2.properties` or `core/src/test/resources/log4j2.properties` ./gradlew clients:test --tests RequestResponseTest diff --git a/bin/connect-distributed.sh b/bin/connect-distributed.sh index b8088ad923451..f367fe8e4aba7 100755 --- a/bin/connect-distributed.sh +++ b/bin/connect-distributed.sh @@ -23,6 +23,7 @@ fi base_dir=$(dirname $0) if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then + echo "DEPRECATED: using log4j 1.x configuration. To use log4j 2.x configuration, run with: 'export KAFKA_LOG4J_OPTS=\"-Dlog4j.configurationFile=file:$base_dir/../config/connect-log4j2.properties\"'" export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties" fi diff --git a/bin/connect-mirror-maker.sh b/bin/connect-mirror-maker.sh index 8e2b2e162daac..8bf70f178bffc 100755 --- a/bin/connect-mirror-maker.sh +++ b/bin/connect-mirror-maker.sh @@ -23,6 +23,7 @@ fi base_dir=$(dirname $0) if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then + echo "DEPRECATED: using log4j 1.x configuration. To use log4j 2.x configuration, run with: 'export KAFKA_LOG4J_OPTS=\"-Dlog4j.configurationFile=file:$base_dir/../config/connect-log4j2.properties\"'" export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties" fi diff --git a/bin/connect-standalone.sh b/bin/connect-standalone.sh index 441069fed3139..12d023b312ba9 100755 --- a/bin/connect-standalone.sh +++ b/bin/connect-standalone.sh @@ -23,6 +23,7 @@ fi base_dir=$(dirname $0) if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then + echo "DEPRECATED: using log4j 1.x configuration. To use log4j 2.x configuration, run with: 'export KAFKA_LOG4J_OPTS=\"-Dlog4j.configurationFile=file:$base_dir/../config/connect-log4j2.properties\"'" export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties" fi diff --git a/bin/kafka-server-start.sh b/bin/kafka-server-start.sh index 5a53126172de9..831d07b115bdb 100755 --- a/bin/kafka-server-start.sh +++ b/bin/kafka-server-start.sh @@ -22,6 +22,7 @@ fi base_dir=$(dirname $0) if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then + echo "DEPRECATED: using log4j 1.x configuration. To use log4j 2.x configuration, run with: 'export KAFKA_LOG4J_OPTS=\"-Dlog4j.configurationFile=file:$base_dir/../config/log4j2.properties\"'" export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties" fi diff --git a/bin/windows/connect-distributed.bat b/bin/windows/connect-distributed.bat index 0535085bde507..a2358e1fba38b 100644 --- a/bin/windows/connect-distributed.bat +++ b/bin/windows/connect-distributed.bat @@ -27,6 +27,7 @@ popd rem Log4j settings IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( + echo DEPRECATED: using log4j 1.x configuration. To use log4j 2.x configuration, run with: 'set KAFKA_LOG4J_OPTS=-Dlog4j.configurationFile=file:%BASE_DIR%/config/connect-log4j2.properties' set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/connect-log4j.properties ) diff --git a/bin/windows/connect-standalone.bat b/bin/windows/connect-standalone.bat index 12ebb21dc9a85..3efa311ba4931 100644 --- a/bin/windows/connect-standalone.bat +++ b/bin/windows/connect-standalone.bat @@ -27,6 +27,7 @@ popd rem Log4j settings IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( + echo DEPRECATED: using log4j 1.x configuration. To use log4j 2.x configuration, run with: 'set KAFKA_LOG4J_OPTS=-Dlog4j.configurationFile=file:%BASE_DIR%/config/connect-log4j2.properties' set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/connect-log4j.properties ) diff --git a/bin/windows/kafka-server-start.bat b/bin/windows/kafka-server-start.bat index 8624eda9ff089..b39c840056abd 100644 --- a/bin/windows/kafka-server-start.bat +++ b/bin/windows/kafka-server-start.bat @@ -21,6 +21,7 @@ IF [%1] EQU [] ( SetLocal IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( + echo DEPRECATED: using log4j 1.x configuration. To use log4j 2.x configuration, run with: 'set KAFKA_LOG4J_OPTS=-Dlog4j.configurationFile=file:%~dp0../../config/log4j2.properties' set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties ) IF ["%KAFKA_HEAP_OPTS%"] EQU [""] ( diff --git a/bin/windows/zookeeper-server-start.bat b/bin/windows/zookeeper-server-start.bat index f201a585135d2..ecdf5fe3710b5 100644 --- a/bin/windows/zookeeper-server-start.bat +++ b/bin/windows/zookeeper-server-start.bat @@ -21,6 +21,7 @@ IF [%1] EQU [] ( SetLocal IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( + echo DEPRECATED: using log4j 1.x configuration. To use log4j 2.x configuration, run with: 'set KAFKA_LOG4J_OPTS=-Dlog4j.configurationFile=file:%~dp0../../config/log4j2.properties' set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties ) IF ["%KAFKA_HEAP_OPTS%"] EQU [""] ( diff --git a/bin/zookeeper-server-start.sh b/bin/zookeeper-server-start.sh index bd9c1142817c0..3dfbb491dea79 100755 --- a/bin/zookeeper-server-start.sh +++ b/bin/zookeeper-server-start.sh @@ -22,6 +22,7 @@ fi base_dir=$(dirname $0) if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then + echo "DEPRECATED: using log4j 1.x configuration. To use log4j 2.x configuration, run with: 'export KAFKA_LOG4J_OPTS=\"-Dlog4j.configurationFile=file:$base_dir/../config/log4j2.properties\"'" export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties" fi diff --git a/build.gradle b/build.gradle index 954e920d7f06e..a525bfd01e5a1 100644 --- a/build.gradle +++ b/build.gradle @@ -867,7 +867,9 @@ project(':core') { // ZooKeeperMain depends on commons-cli but declares the dependency as `provided` implementation libs.commonsCli - compileOnly libs.log4j + compileOnly libs.log4j2Api + compileOnly libs.log4j1_2Api + compileOnly libs.log4j2Core testImplementation project(':clients').sourceSets.test.output testImplementation project(':metadata').sourceSets.test.output @@ -889,7 +891,10 @@ project(':core') { testImplementation libs.apachedsMavibotPartition testImplementation libs.apachedsJdbmPartition testImplementation libs.junitJupiter - testImplementation libs.slf4jlog4j + testImplementation libs.log4j2Api + testImplementation libs.log4j2Core + testImplementation libs.log4j2CoreTest + testImplementation libs.slf4jlog4j2 testImplementation(libs.jfreechart) { exclude group: 'junit', module: 'junit' } @@ -921,8 +926,8 @@ project(':core') { tasks.create(name: "copyDependantLibs", type: Copy) { from (configurations.testRuntimeClasspath) { - include('slf4j-log4j12*') - include('log4j*jar') + include('log4j-slf4j-impl*') + include('log4j-[api|core]*jar') } from (configurations.runtimeClasspath) { exclude('kafka-clients*') @@ -1146,12 +1151,14 @@ project(':metadata') { implementation libs.jacksonDatabind implementation libs.jacksonJDK8Datatypes implementation libs.metrics - compileOnly libs.log4j + compileOnly libs.log4j2Api + compileOnly libs.log4j1_2Api + compileOnly libs.log4j2Core testImplementation libs.junitJupiter testImplementation libs.hamcrest testImplementation libs.mockitoCore testImplementation libs.mockitoInline - testImplementation libs.slf4jlog4j + testImplementation libs.slf4jlog4j2 testImplementation project(':clients').sourceSets.test.output testImplementation project(':raft').sourceSets.test.output generator project(':generator') @@ -1245,7 +1252,7 @@ project(':clients') { testImplementation libs.junitJupiter testImplementation libs.mockitoCore - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 testRuntimeOnly libs.jacksonDatabind testRuntimeOnly libs.jacksonJDK8Datatypes testImplementation libs.jose4j @@ -1379,7 +1386,7 @@ project(':raft') { testImplementation libs.mockitoCore testImplementation libs.jqwik - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 generator project(':generator') } @@ -1750,8 +1757,8 @@ project(':trogdor') { tasks.create(name: "copyDependantLibs", type: Copy) { from (configurations.testRuntimeClasspath) { - include('slf4j-log4j12*') - include('log4j*jar') + include('log4j-slf4j-impl*') + include('log4j-[api|core]*jar') } from (configurations.runtimeClasspath) { exclude('kafka-clients*') @@ -1787,7 +1794,7 @@ project(':shell') { testImplementation project(':clients') testImplementation libs.junitJupiter - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 } javadoc { @@ -1832,7 +1839,9 @@ project(':streams') { testImplementation project(':clients').sourceSets.test.output testImplementation project(':core') testImplementation project(':core').sourceSets.test.output - testImplementation libs.log4j + testImplementation libs.log4j2Api + testImplementation libs.log4j2Core + testImplementation libs.log4j2CoreTest testImplementation libs.junitJupiterApi testImplementation libs.junitVintageEngine testImplementation libs.easymock @@ -1843,7 +1852,7 @@ project(':streams') { testImplementation libs.mockitoInline // supports mocking static methods, final classes, etc. testRuntimeOnly project(':streams:test-utils') - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 generator project(':generator') } @@ -1973,7 +1982,7 @@ project(':streams:streams-scala') { testImplementation libs.junitJupiter testImplementation libs.easymock testImplementation libs.hamcrest - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 } javadoc { @@ -2009,7 +2018,7 @@ project(':streams:test-utils') { testImplementation libs.mockitoCore testImplementation libs.hamcrest - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 } javadoc { @@ -2042,7 +2051,7 @@ project(':streams:examples') { implementation project(':streams') - implementation libs.slf4jlog4j + implementation libs.slf4jlog4j2 testImplementation project(':streams:test-utils') testImplementation project(':clients').sourceSets.test.output // for org.apache.kafka.test.IntegrationTest @@ -2314,7 +2323,7 @@ project(':jmh-benchmarks') { implementation libs.jacksonDatabind implementation libs.metrics implementation libs.mockitoCore - implementation libs.slf4jlog4j + implementation libs.slf4jlog4j2 implementation libs.scalaLibrary implementation libs.scalaJava8Compat } @@ -2379,7 +2388,7 @@ project(':connect:api') { implementation libs.jaxrsApi testImplementation libs.junitJupiter - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 testImplementation project(':clients').sourceSets.test.output } @@ -2389,8 +2398,8 @@ project(':connect:api') { tasks.create(name: "copyDependantLibs", type: Copy) { from (configurations.testRuntimeClasspath) { - include('slf4j-log4j12*') - include('log4j*jar') + include('log4j-slf4j-impl*') + include('log4j-[api|core]*jar') } from (configurations.runtimeClasspath) { exclude('kafka-clients*') @@ -2416,7 +2425,7 @@ project(':connect:transforms') { testImplementation libs.easymock testImplementation libs.junitJupiter - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 testImplementation project(':clients').sourceSets.test.output } @@ -2426,8 +2435,8 @@ project(':connect:transforms') { tasks.create(name: "copyDependantLibs", type: Copy) { from (configurations.testRuntimeClasspath) { - include('slf4j-log4j12*') - include('log4j*jar') + include('log4j-slf4j-impl*') + include('log4j-[api|core]*jar') } from (configurations.runtimeClasspath) { exclude('kafka-clients*') @@ -2456,7 +2465,7 @@ project(':connect:json') { testImplementation libs.easymock testImplementation libs.junitJupiter - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 testImplementation project(':clients').sourceSets.test.output } @@ -2466,8 +2475,8 @@ project(':connect:json') { tasks.create(name: "copyDependantLibs", type: Copy) { from (configurations.testRuntimeClasspath) { - include('slf4j-log4j12*') - include('log4j*jar') + include('log4j-slf4j-impl*') + include('log4j-[api|core]*jar') } from (configurations.runtimeClasspath) { exclude('kafka-clients*') @@ -2496,7 +2505,9 @@ project(':connect:runtime') { implementation project(':tools') implementation libs.slf4jApi - implementation libs.log4j + implementation libs.log4j2Api + implementation libs.log4j1_2Api + implementation libs.log4j2Core implementation libs.jose4j // for SASL/OAUTHBEARER JWT validation implementation libs.jacksonAnnotations implementation libs.jacksonJaxrsJsonProvider @@ -2524,7 +2535,7 @@ project(':connect:runtime') { testImplementation libs.mockitoInline testImplementation libs.httpclient - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 } javadoc { @@ -2533,8 +2544,8 @@ project(':connect:runtime') { tasks.create(name: "copyDependantLibs", type: Copy) { from (configurations.testRuntimeClasspath) { - include('slf4j-log4j12*') - include('log4j*jar') + include('log4j-slf4j-impl*') + include('log4j-[api|core]*jar') } from (configurations.runtimeClasspath) { exclude('kafka-clients*') @@ -2602,7 +2613,7 @@ project(':connect:file') { testImplementation libs.junitJupiter testImplementation libs.mockitoCore - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 testImplementation project(':clients').sourceSets.test.output } @@ -2612,8 +2623,8 @@ project(':connect:file') { tasks.create(name: "copyDependantLibs", type: Copy) { from (configurations.testRuntimeClasspath) { - include('slf4j-log4j12*') - include('log4j*jar') + include('log4j-slf4j-impl*') + include('log4j-[api|core]*jar') } from (configurations.runtimeClasspath) { exclude('kafka-clients*') @@ -2641,7 +2652,7 @@ project(':connect:basic-auth-extension') { testImplementation libs.junitJupiter testImplementation project(':clients').sourceSets.test.output - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 testRuntimeOnly libs.jerseyContainerServlet } @@ -2651,8 +2662,8 @@ project(':connect:basic-auth-extension') { tasks.create(name: "copyDependantLibs", type: Copy) { from (configurations.testRuntimeClasspath) { - include('slf4j-log4j12*') - include('log4j*jar') + include('log4j-slf4j-impl*') + include('log4j-[api|core]*jar') } from (configurations.runtimeClasspath) { exclude('kafka-clients*') @@ -2688,7 +2699,7 @@ project(':connect:mirror') { testImplementation project(':core').sourceSets.test.output testRuntimeOnly project(':connect:runtime') - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 testRuntimeOnly libs.bcpkix } @@ -2698,8 +2709,8 @@ project(':connect:mirror') { tasks.create(name: "copyDependantLibs", type: Copy) { from (configurations.testRuntimeClasspath) { - include('slf4j-log4j12*') - include('log4j*jar') + include('log4j-slf4j-impl*') + include('log4j-[api|core]*jar') } from (configurations.runtimeClasspath) { exclude('kafka-clients*') @@ -2724,7 +2735,7 @@ project(':connect:mirror-client') { testImplementation libs.junitJupiter testImplementation project(':clients').sourceSets.test.output - testRuntimeOnly libs.slf4jlog4j + testRuntimeOnly libs.slf4jlog4j2 } javadoc { @@ -2733,8 +2744,8 @@ project(':connect:mirror-client') { tasks.create(name: "copyDependantLibs", type: Copy) { from (configurations.testRuntimeClasspath) { - include('slf4j-log4j12*') - include('log4j*jar') + include('log4j-slf4j-impl*') + include('log4j-[api|core]*jar') } from (configurations.runtimeClasspath) { exclude('kafka-clients*') diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index 414e59a614d31..d24d9168317c0 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -459,6 +459,7 @@ + @@ -562,7 +563,7 @@ - + diff --git a/raft/src/test/resources/log4j.properties b/clients/src/test/resources/log4j2.properties similarity index 71% rename from raft/src/test/resources/log4j.properties rename to clients/src/test/resources/log4j2.properties index 6d90f6dd34884..a04dd4f1a452a 100644 --- a/raft/src/test/resources/log4j.properties +++ b/clients/src/test/resources/log4j2.properties @@ -12,11 +12,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -log4j.rootLogger=OFF, stdout +name=TestConfig +appenders = console -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=[%d] %p %m (%c:%L)%n -log4j.logger.org.apache.kafka.raft=ERROR -log4j.logger.org.apache.kafka.snapshot=ERROR +rootLogger.level = off +rootLogger.appenderRefs = stdout +rootLogger.appenderRef.stdout.ref = STDOUT + +status = error diff --git a/config/connect-log4j2.properties b/config/connect-log4j2.properties new file mode 100644 index 0000000000000..60fc963b2cf7f --- /dev/null +++ b/config/connect-log4j2.properties @@ -0,0 +1,60 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name=ConnectConfig +appenders=stdout,connectAppender + +# Send the logs to the console. +# +appender.stdout.type=Console +appender.stdout.name=STDOUT +appender.stdout.layout.type=PatternLayout + +# Send the logs to a file, rolling the file at midnight local time. For example, the `File` option specifies the +# location of the log files (e.g. ${kafka.logs.dir}/connect.log), and at midnight local time the file is closed +# and compressed in the same directory but with a filename that ends in the `DatePattern` option. +# +appender.connectAppender.type=RollingFile +appender.connectAppender.name=CONNECT_APPENDER +appender.connectAppender.fileName=${kafka.logs.dir}/connect.log +appender.connectAppender.filePattern=${kafka.logs.dir}/connect.log.%d{yyyy-MM-dd}.log.gz +appender.connectAppender.layout.type=PatternLayout +appender.connectAppender.policies.type=Policies +appender.connectAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.connectAppender.policies.time.interval=1 +appender.connectAppender.policies.time.modulate=true +appender.connectAppender.strategy.type=DefaultRolloverStrategy +appender.connectAppender.strategy.max=1 + +# The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information +# in the log messages, where appropriate. This makes it easier to identify those log messages that apply to a +# specific connector. +# +connect.log.pattern=[%d] %p %X{connector.context}%m (%c:%L)%n + +appender.stdout.layout.pattern=${connect.log.pattern} +appender.connectAppender.layout.pattern=${connect.log.pattern} + +rootLogger.level=INFO +rootLogger.appenderRefs=stdout,connectAppender +rootLogger.appenderRef.stdout.ref=STDOUT +rootLogger.appenderRef.connectAppender.ref=CONNECT_APPENDER + +loggers=org.apache.zookeeper,org.reflections + +logger.org.apache.zookeeper.name=org.apache.zookeeper +logger.org.apache.zookeeper.level=ERROR + +logger.org.reflections.name=org.reflections +logger.org.reflections.level=ERROR diff --git a/config/log4j2.properties b/config/log4j2.properties new file mode 100644 index 0000000000000..e699f0a1eb670 --- /dev/null +++ b/config/log4j2.properties @@ -0,0 +1,171 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name=KafkaConfig + +# Unspecified loggers and loggers with additivity=true output to server.log and stdout +appenders=stdout,kafkaAppender,requestAppender,controllerAppender,cleanerAppender,stateChangeAppender,authorizerAppender + +appender.stdout.type=Console +appender.stdout.name=STDOUT +appender.stdout.layout.type=PatternLayout +appender.stdout.layout.pattern=[%d] %p %m (%c)%n + +appender.kafkaAppender.type=RollingFile +appender.kafkaAppender.name=KAFKA_APPENDER +appender.kafkaAppender.fileName=${kafka.logs.dir}/server.log +appender.kafkaAppender.filePattern=${kafka.logs.dir}/server.log.%d{yyyy-MM-dd}.log.gz +appender.kafkaAppender.layout.type=PatternLayout +appender.kafkaAppender.layout.pattern=[%d] %p %m (%c)%n +appender.kafkaAppender.policies.type=Policies +appender.kafkaAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.kafkaAppender.policies.time.interval=1 +appender.kafkaAppender.policies.time.modulate=true +appender.kafkaAppender.strategy.type=DefaultRolloverStrategy +appender.kafkaAppender.strategy.max=1 + +appender.requestAppender.type=RollingFile +appender.requestAppender.name=REQUEST_APPENDER +appender.requestAppender.fileName=${kafka.logs.dir}/kafka-request.log +appender.requestAppender.filePattern=${kafka.logs.dir}/kafka-request.log.%d{yyyy-MM-dd}.log.gz +appender.requestAppender.layout.type=PatternLayout +appender.requestAppender.layout.pattern=[%d] %p %m (%c)%n +appender.requestAppender.policies.type=Policies +appender.requestAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.requestAppender.policies.time.interval=1 +appender.requestAppender.policies.time.modulate=true +appender.requestAppender.strategy.type=DefaultRolloverStrategy +appender.requestAppender.strategy.max=1 + +appender.controllerAppender.type=RollingFile +appender.controllerAppender.name=CONTROLLER_APPENDER +appender.controllerAppender.fileName=${kafka.logs.dir}/controller.log +appender.controllerAppender.filePattern=${kafka.logs.dir}/controller.log.%d{yyyy-MM-dd}.log.gz +appender.controllerAppender.layout.type=PatternLayout +appender.controllerAppender.layout.pattern=[%d] %p %m (%c)%n +appender.controllerAppender.policies.type=Policies +appender.controllerAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.controllerAppender.policies.time.interval=1 +appender.controllerAppender.policies.time.modulate=true +appender.controllerAppender.strategy.type=DefaultRolloverStrategy +appender.controllerAppender.strategy.max=1 + +appender.cleanerAppender.type=RollingFile +appender.cleanerAppender.name=CLEANER_APPENDER +appender.cleanerAppender.fileName=${kafka.logs.dir}/log-cleaner.log +appender.cleanerAppender.filePattern=${kafka.logs.dir}/log-cleaner.log.%d{yyyy-MM-dd}.log.gz +appender.cleanerAppender.layout.type=PatternLayout +appender.cleanerAppender.layout.pattern=[%d] %p %m (%c)%n +appender.cleanerAppender.policies.type=Policies +appender.cleanerAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.cleanerAppender.policies.time.interval=1 +appender.cleanerAppender.policies.time.modulate=true +appender.cleanerAppender.strategy.type=DefaultRolloverStrategy +appender.cleanerAppender.strategy.max=1 + +appender.stateChangeAppender.type=RollingFile +appender.stateChangeAppender.name=STATE_CHANGE_APPENDER +appender.stateChangeAppender.fileName=${kafka.logs.dir}/state-change.log +appender.stateChangeAppender.filePattern=${kafka.logs.dir}/state-change.log.%d{yyyy-MM-dd}.log.gz +appender.stateChangeAppender.layout.type=PatternLayout +appender.stateChangeAppender.layout.pattern=[%d] %p %m (%c)%n +appender.stateChangeAppender.policies.type=Policies +appender.stateChangeAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.stateChangeAppender.policies.time.interval=1 +appender.stateChangeAppender.policies.time.modulate=true +appender.stateChangeAppender.strategy.type=DefaultRolloverStrategy +appender.stateChangeAppender.strategy.max=1 + +appender.authorizerAppender.type=RollingFile +appender.authorizerAppender.name=AUTHORIZER_APPENDER +appender.authorizerAppender.fileName=${kafka.logs.dir}/kafka-authorizer.log +appender.authorizerAppender.filePattern=${kafka.logs.dir}/kafka-authorizer.log.%d{yyyy-MM-dd}.log.gz +appender.authorizerAppender.layout.type=PatternLayout +appender.authorizerAppender.layout.pattern=[%d] %p %m (%c)%n +appender.authorizerAppender.policies.type=Policies +appender.authorizerAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.authorizerAppender.policies.time.interval=1 +appender.authorizerAppender.policies.time.modulate=true +appender.authorizerAppender.strategy.type=DefaultRolloverStrategy +appender.authorizerAppender.strategy.max=1 + +# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise +rootLogger.level=INFO +rootLogger.appenderRefs=stdout,kafkaAppender +rootLogger.appenderRef.stdout.ref=STDOUT +rootLogger.appenderRef.kafkaAppender.ref=KAFKA_APPENDER + +loggers=org.apache.zookeeper,org.apache.kafka,kafka.request.logger,kafka.network.RequestChannel$,kafka.controller,kafka.log.LogCleaner,state.change.logger,kafka.authorizer.logger,kafka + +# Change the log level below to adjust ZK client logging +logger.org.apache.zookeeper.name=org.apache.zookeeper +logger.org.apache.zookeeper.level=INFO + +# Change the log levels below to adjust the general broker logging level (output to server.log and stdout) +logger.kafka.name=kafka +logger.kafka.level=INFO + +# Change the log level below to DEBUG or TRACE to enable request logging +logger.kafka.request.logger.name=kafka.request.logger +logger.kafka.request.logger.level=WARN +logger.kafka.request.logger.additivity=false +logger.kafka.request.logger.appenderRefs=requestAppender +logger.kafka.request.logger.requestAppender.ref=REQUEST_APPENDER + +# Uncomment the lines below, Add 'kafka.network.Processor,kafka.server.KafkaApis' to loggers above, and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output +# related to the handling of requests +# logger.kafka.network.Processor.name=kafka.network.Processor +# logger.kafka.network.Processor.level=TRACE +# logger.kafka.network.Processor.appenderRefs=requestAppender +# logger.kafka.network.Processor.requestAppender.ref=REQUEST_APPENDER + +# logger.kafka.server.KafkaApis.name=kafka.server.KafkaApis +# logger.kafka.server.KafkaApis.level=TRACE +# logger.kafka.server.KafkaApis.additivity=false +# logger.kafka.server.KafkaApis.appenderRefs=requestAppender +# logger.kafka.server.KafkaApis.requestAppender.ref=REQUEST_APPENDER + +logger.kafka.network.RequestChannel$.name=kafka.network.RequestChannel$ +logger.kafka.network.RequestChannel$.level=WARN +logger.kafka.network.RequestChannel$.additivity=false +logger.kafka.network.RequestChannel$.appenderRefs=requestAppender +logger.kafka.network.RequestChannel$.requestAppender.ref=REQUEST_APPENDER + +logger.kafka.controller.name=kafka.controller +logger.kafka.controller.level=TRACE +logger.kafka.controller.additivity=false +logger.kafka.controller.appenderRefs=controllerAppender +logger.kafka.controller.controllerAppender.ref=CONTROLLER_APPENDER + +logger.kafka.log.LogCleaner.name=kafka.log.LogCleaner +logger.kafka.log.LogCleaner.level=INFO +logger.kafka.log.LogCleaner.additivity=false +logger.kafka.log.LogCleaner.appenderRefs=cleanerAppender +logger.kafka.log.LogCleaner.cleanerAppender.ref=CLEANER_APPENDER + +logger.state.change.logger.name=state.change.logger +logger.state.change.logger.level=INFO +logger.state.change.logger.additivity=false +logger.state.change.logger.appenderRefs=stateChangeAppender +logger.state.change.logger.stateChangeAppender.ref=STATE_CHANGE_APPENDER + +# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses +logger.kafka.authorizer.logger.name=kafka.authorizer.logger +logger.kafka.authorizer.logger.level=INFO +logger.kafka.authorizer.logger.additivity=false +logger.kafka.authorizer.logger.appenderRefs=authorizerAppender +logger.kafka.authorizer.logger.authorizerAppender.ref=AUTHORIZER_APPENDER + +logger.org.apache.kafka.name=org.apache.kafka +logger.org.apache.kafka.level=INFO diff --git a/connect/mirror/src/test/resources/log4j.properties b/connect/mirror/src/test/resources/log4j2.properties similarity index 63% rename from connect/mirror/src/test/resources/log4j.properties rename to connect/mirror/src/test/resources/log4j2.properties index a2ac021dfab98..87b898a41b388 100644 --- a/connect/mirror/src/test/resources/log4j.properties +++ b/connect/mirror/src/test/resources/log4j2.properties @@ -14,21 +14,36 @@ # See the License for the specific language governing permissions and # limitations under the License. ## -log4j.rootLogger=ERROR, stdout +name=TestConfig +appenders=console -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout # # The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information # in the log message, where appropriate. This makes it easier to identify those log messages that apply to a # specific connector. Simply add this parameter to the log layout configuration below to include the contextual information. # -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %X{connector.context}%m (%c:%L)%n +appender.console.layout.pattern=[%d] %p %X{connector.context}%m (%c:%L)%n # # The following line includes no MDC context parameters: #log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n (%t) -log4j.logger.org.reflections=OFF -log4j.logger.kafka=OFF -log4j.logger.state.change.logger=OFF -log4j.logger.org.apache.kafka.connect.mirror=INFO +rootLogger.level=ERROR +rootLogger.appenderRefs=stdout +rootLogger.appenderRef.stdout.ref=STDOUT + +loggers=org.reflections,state.change.logger,org.apache.kafka.connect.mirror,kafka + +logger.org.reflections.name=org.reflections +logger.org.reflections.level=OFF + +logger.state.change.logger.name=state.change.logger +logger.state.change.logger.level=OFF + +logger.org.apache.kafka.connect.mirror.name=org.apache.kafka.connect.mirror +logger.org.apache.kafka.connect.mirror.level=INFO + +logger.kafka.name=kafka +logger.kafka.level=OFF diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java index ce9ce14e97488..6069951765c0e 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java @@ -18,9 +18,10 @@ import org.apache.kafka.connect.errors.NotFoundException; import org.apache.kafka.connect.runtime.rest.errors.BadRequestException; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.Logger; +import org.apache.logging.log4j.core.LoggerContext; import javax.ws.rs.Consumes; import javax.ws.rs.GET; @@ -30,14 +31,16 @@ import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; + import java.util.ArrayList; import java.util.Collections; -import java.util.Enumeration; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.TreeMap; +import java.util.stream.Collectors; /** * A set of endpoints to adjust the log levels of runtime loggers. @@ -48,7 +51,10 @@ public class LoggingResource { /** - * Log4j uses "root" (case insensitive) as name of the root logger. + * Note: In log4j, the root logger's name was "root" and Kafka also followed that name for dynamic logging control feature. + * + * The root logger's name is changed in log4j2 to empty string (see: [[LogManager.ROOT_LOGGER_NAME]]) but for backward- + * compatibility. Kafka keeps its original root logger name. It is why here is a dedicated definition for the root logger name. */ private static final String ROOT_LOGGER_NAME = "root"; @@ -60,67 +66,60 @@ public class LoggingResource { @GET @Path("/") public Response listLoggers() { - Map> loggers = new TreeMap<>(); - Enumeration enumeration = currentLoggers(); - Collections.list(enumeration) - .stream() - .filter(logger -> logger.getLevel() != null) - .forEach(logger -> loggers.put(logger.getName(), levelToMap(logger))); + // current loggers + final Map> loggers = currentLoggers() + .stream() + .filter(logger -> logger.getLevel() != Level.OFF) + .collect(Collectors.toMap(logger -> logger.getName(), logger -> levelToMap(logger))); + // Replace "" logger to "root" logger Logger root = rootLogger(); - if (root.getLevel() != null) { + if (root.getLevel() != Level.OFF) { loggers.put(ROOT_LOGGER_NAME, levelToMap(root)); } - return Response.ok(loggers).build(); + return Response.ok(new TreeMap<>(loggers)).build(); } /** * Get the log level of a named logger. * - * @param namedLogger name of a logger + * @param loggerName name of a logger * @return level of the logger, effective level if the level was not explicitly set. */ @GET @Path("/{logger}") - public Response getLogger(final @PathParam("logger") String namedLogger) { - Objects.requireNonNull(namedLogger, "require non-null name"); + public Response getLogger(final @PathParam("logger") String loggerName) { + Objects.requireNonNull(loggerName, "require non-null name"); - Logger logger = null; - if (ROOT_LOGGER_NAME.equalsIgnoreCase(namedLogger)) { + final Logger logger; + if (ROOT_LOGGER_NAME.equalsIgnoreCase(loggerName)) { logger = rootLogger(); } else { - Enumeration en = currentLoggers(); - // search within existing loggers for the given name. - // using LogManger.getLogger() will create a logger if it doesn't exist - // (potential leak since these don't get cleaned up). - while (en.hasMoreElements()) { - Logger l = en.nextElement(); - if (namedLogger.equals(l.getName())) { - logger = l; - break; - } - } + List en = currentLoggers(); + Optional found = en.stream().filter(existingLogger -> loggerName.equals(existingLogger.getName())).findAny(); + + logger = found.orElse(null); } + if (logger == null) { - throw new NotFoundException("Logger " + namedLogger + " not found."); + throw new NotFoundException("Logger " + loggerName + " not found."); } else { return Response.ok(effectiveLevelToMap(logger)).build(); } } - /** * Adjust level of a named logger. if name corresponds to an ancestor, then the log level is applied to all child loggers. * - * @param namedLogger name of the logger - * @param levelMap a map that is expected to contain one key 'level', and a value that is one of the log4j levels: - * DEBUG, ERROR, FATAL, INFO, TRACE, WARN + * @param loggerName name of the logger + * @param levelMap a map that is expected to contain one key 'level', and a value that is one of the log4j levels: + * DEBUG, ERROR, FATAL, INFO, TRACE, WARN, OFF * @return names of loggers whose levels were modified */ @PUT @Path("/{logger}") - public Response setLevel(final @PathParam("logger") String namedLogger, + public Response setLevel(final @PathParam("logger") String loggerName, final Map levelMap) { String desiredLevelStr = levelMap.get("level"); if (desiredLevelStr == null) { @@ -133,20 +132,18 @@ public Response setLevel(final @PathParam("logger") String namedLogger, } List childLoggers; - if (ROOT_LOGGER_NAME.equalsIgnoreCase(namedLogger)) { - childLoggers = Collections.list(currentLoggers()); + if (ROOT_LOGGER_NAME.equalsIgnoreCase(loggerName)) { + childLoggers = new ArrayList<>(currentLoggers()); childLoggers.add(rootLogger()); } else { childLoggers = new ArrayList<>(); - Logger ancestorLogger = lookupLogger(namedLogger); - Enumeration en = currentLoggers(); + Logger ancestorLogger = lookupLogger(loggerName); boolean present = false; - while (en.hasMoreElements()) { - Logger current = en.nextElement(); - if (current.getName().startsWith(namedLogger)) { - childLoggers.add(current); + for (Logger logger : currentLoggers()) { + if (logger.getName().startsWith(loggerName)) { + childLoggers.add(logger); } - if (namedLogger.equals(current.getName())) { + if (loggerName.equals(logger.getName())) { present = true; } } @@ -158,43 +155,50 @@ public Response setLevel(final @PathParam("logger") String namedLogger, List modifiedLoggerNames = new ArrayList<>(); for (Logger logger: childLoggers) { logger.setLevel(level); - modifiedLoggerNames.add(logger.getName()); + if (LogManager.ROOT_LOGGER_NAME.equals(logger.getName())) { + modifiedLoggerNames.add(ROOT_LOGGER_NAME); + } else { + modifiedLoggerNames.add(logger.getName()); + } } Collections.sort(modifiedLoggerNames); return Response.ok(modifiedLoggerNames).build(); } - protected Logger lookupLogger(String namedLogger) { - return LogManager.getLogger(namedLogger); + protected Logger lookupLogger(String loggerName) { + final LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + + return loggerContext.getLogger(loggerName); } @SuppressWarnings("unchecked") - protected Enumeration currentLoggers() { - return LogManager.getCurrentLoggers(); + protected List currentLoggers() { + final LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + + return loggerContext.getLoggers() + .stream() + .filter(logger -> !logger.getName().equals(LogManager.ROOT_LOGGER_NAME)) + .collect(Collectors.toList()); } protected Logger rootLogger() { - return LogManager.getRootLogger(); + final LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + + return loggerContext.getRootLogger(); } /** - * * Map representation of a logger's effective log level. * * @param logger a non-null log4j logger * @return a singleton map whose key is level and the value is the string representation of the logger's effective log level. */ private static Map effectiveLevelToMap(Logger logger) { - Level level = logger.getLevel(); - if (level == null) { - level = logger.getEffectiveLevel(); - } - return Collections.singletonMap("level", String.valueOf(level)); + return Collections.singletonMap("level", logger.getLevel() != null ? logger.getLevel().toString() : null); } /** - * * Map representation of a logger's log level. * * @param logger a non-null log4j logger diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java index 63814cdc1737f..8f65a15954f5c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java @@ -18,20 +18,18 @@ import org.apache.kafka.connect.errors.NotFoundException; import org.apache.kafka.connect.runtime.rest.errors.BadRequestException; -import org.apache.log4j.Hierarchy; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.Logger; +import org.apache.logging.log4j.core.LoggerContext; import org.junit.Test; import java.util.Arrays; import java.util.Collections; -import java.util.Enumeration; import java.util.List; import java.util.Map; -import java.util.Vector; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThrows; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -41,54 +39,67 @@ public class LoggingResourceTest { @Test - public void getLoggersIgnoresNullLevelsTest() { - LoggingResource loggingResource = mock(LoggingResource.class); - Logger root = new Logger("root") { - }; - Logger a = new Logger("a") { - }; + public void getLoggersIgnoresOFFLevelsTest() { + // setup + final LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + final Logger root = loggerContext.getRootLogger(); + root.setLevel(Level.OFF); + final Logger a = loggerContext.getLogger("a"); a.setLevel(null); - Logger b = new Logger("b") { - }; + final Logger b = loggerContext.getLogger("b"); b.setLevel(Level.INFO); + + LoggingResource loggingResource = mock(LoggingResource.class); when(loggingResource.currentLoggers()).thenReturn(loggers(a, b)); when(loggingResource.rootLogger()).thenReturn(root); when(loggingResource.listLoggers()).thenCallRealMethod(); Map> loggers = (Map>) loggingResource.listLoggers().getEntity(); assertEquals(1, loggers.size()); assertEquals("INFO", loggers.get("b").get("level")); + + // restore + loggerContext.removeObject("b"); + loggerContext.removeObject("a"); + loggerContext.getRootLogger().setLevel(Level.INFO); } @Test public void getLoggerFallsbackToEffectiveLogLevelTest() { - LoggingResource loggingResource = mock(LoggingResource.class); - Logger root = new Logger("root") { - }; + // setup + final LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + final Logger root = loggerContext.getRootLogger(); root.setLevel(Level.ERROR); - Hierarchy hierarchy = new Hierarchy(root); - Logger a = hierarchy.getLogger("a"); + final Logger a = loggerContext.getLogger("a"); a.setLevel(null); - Logger b = hierarchy.getLogger("b"); + final Logger b = loggerContext.getLogger("b"); b.setLevel(Level.INFO); + + LoggingResource loggingResource = mock(LoggingResource.class); when(loggingResource.currentLoggers()).thenReturn(loggers(a, b)); when(loggingResource.rootLogger()).thenReturn(root); when(loggingResource.getLogger(any())).thenCallRealMethod(); Map level = (Map) loggingResource.getLogger("a").getEntity(); assertEquals(1, level.size()); assertEquals("ERROR", level.get("level")); + + // restore + loggerContext.removeObject("b"); + loggerContext.removeObject("a"); + loggerContext.getRootLogger().setLevel(Level.INFO); } @Test public void getUnknownLoggerTest() { - LoggingResource loggingResource = mock(LoggingResource.class); - Logger root = new Logger("root") { - }; + // setup + final LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + final Logger root = loggerContext.getRootLogger(); root.setLevel(Level.ERROR); - Hierarchy hierarchy = new Hierarchy(root); - Logger a = hierarchy.getLogger("a"); + final Logger a = loggerContext.getLogger("a"); a.setLevel(null); - Logger b = hierarchy.getLogger("b"); + final Logger b = loggerContext.getLogger("b"); b.setLevel(Level.INFO); + + LoggingResource loggingResource = mock(LoggingResource.class); when(loggingResource.currentLoggers()).thenReturn(loggers(a, b)); when(loggingResource.rootLogger()).thenReturn(root); when(loggingResource.getLogger(any())).thenCallRealMethod(); @@ -97,20 +108,22 @@ public void getUnknownLoggerTest() { @Test public void setLevelTest() { - LoggingResource loggingResource = mock(LoggingResource.class); - Logger root = new Logger("root") { - }; + // setup + final LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + final Logger root = loggerContext.getRootLogger(); root.setLevel(Level.ERROR); - Hierarchy hierarchy = new Hierarchy(root); - Logger p = hierarchy.getLogger("a.b.c.p"); - Logger x = hierarchy.getLogger("a.b.c.p.X"); - Logger y = hierarchy.getLogger("a.b.c.p.Y"); - Logger z = hierarchy.getLogger("a.b.c.p.Z"); - Logger w = hierarchy.getLogger("a.b.c.s.W"); + final Logger p = loggerContext.getLogger("a.b.c.p"); + p.setLevel(Level.INFO); + final Logger x = loggerContext.getLogger("a.b.c.p.X"); x.setLevel(Level.INFO); + final Logger y = loggerContext.getLogger("a.b.c.p.Y"); y.setLevel(Level.INFO); + final Logger z = loggerContext.getLogger("a.b.c.p.Z"); z.setLevel(Level.INFO); + final Logger w = loggerContext.getLogger("a.b.c.s.W"); w.setLevel(Level.INFO); + + LoggingResource loggingResource = mock(LoggingResource.class); when(loggingResource.currentLoggers()).thenReturn(loggers(x, y, z, w)); when(loggingResource.lookupLogger("a.b.c.p")).thenReturn(p); when(loggingResource.rootLogger()).thenReturn(root); @@ -122,24 +135,34 @@ public void setLevelTest() { assertEquals(x.getLevel(), Level.DEBUG); assertEquals(y.getLevel(), Level.DEBUG); assertEquals(z.getLevel(), Level.DEBUG); + + // restore + loggerContext.removeObject("a.b.c.s.W"); + loggerContext.removeObject("a.b.c.p.Z"); + loggerContext.removeObject("a.b.c.p.Y"); + loggerContext.removeObject("a.b.c.p.X"); + loggerContext.removeObject("a.b.c.p"); + loggerContext.getRootLogger().setLevel(Level.INFO); } @Test public void setRootLevelTest() { - LoggingResource loggingResource = mock(LoggingResource.class); - Logger root = new Logger("root") { - }; + // setup + final LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + final Logger root = loggerContext.getRootLogger(); root.setLevel(Level.ERROR); - Hierarchy hierarchy = new Hierarchy(root); - Logger p = hierarchy.getLogger("a.b.c.p"); - Logger x = hierarchy.getLogger("a.b.c.p.X"); - Logger y = hierarchy.getLogger("a.b.c.p.Y"); - Logger z = hierarchy.getLogger("a.b.c.p.Z"); - Logger w = hierarchy.getLogger("a.b.c.s.W"); + final Logger p = loggerContext.getLogger("a.b.c.p"); + p.setLevel(Level.INFO); + final Logger x = loggerContext.getLogger("a.b.c.p.X"); x.setLevel(Level.INFO); + final Logger y = loggerContext.getLogger("a.b.c.p.Y"); y.setLevel(Level.INFO); + final Logger z = loggerContext.getLogger("a.b.c.p.Z"); z.setLevel(Level.INFO); + final Logger w = loggerContext.getLogger("a.b.c.s.W"); w.setLevel(Level.INFO); + + LoggingResource loggingResource = mock(LoggingResource.class); when(loggingResource.currentLoggers()).thenReturn(loggers(x, y, z, w)); when(loggingResource.lookupLogger("a.b.c.p")).thenReturn(p); when(loggingResource.rootLogger()).thenReturn(root); @@ -147,25 +170,34 @@ public void setRootLevelTest() { List modified = (List) loggingResource.setLevel("root", Collections.singletonMap("level", "DEBUG")).getEntity(); assertEquals(5, modified.size()); assertEquals(Arrays.asList("a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z", "a.b.c.s.W", "root"), modified); - assertNull(p.getLevel()); assertEquals(root.getLevel(), Level.DEBUG); + assertEquals(p.getLevel(), Level.INFO); assertEquals(w.getLevel(), Level.DEBUG); assertEquals(x.getLevel(), Level.DEBUG); assertEquals(y.getLevel(), Level.DEBUG); assertEquals(z.getLevel(), Level.DEBUG); + + // restore + loggerContext.removeObject("a.b.c.s.W"); + loggerContext.removeObject("a.b.c.p.Z"); + loggerContext.removeObject("a.b.c.p.Y"); + loggerContext.removeObject("a.b.c.p.X"); + loggerContext.removeObject("a.b.c.p"); + loggerContext.getRootLogger().setLevel(Level.INFO); } @Test public void setLevelWithEmptyArgTest() { - LoggingResource loggingResource = mock(LoggingResource.class); - Logger root = new Logger("root") { - }; + // setup + final LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + final Logger root = loggerContext.getRootLogger(); root.setLevel(Level.ERROR); - Hierarchy hierarchy = new Hierarchy(root); - Logger a = hierarchy.getLogger("a"); + final Logger a = loggerContext.getLogger("a"); a.setLevel(null); - Logger b = hierarchy.getLogger("b"); + final Logger b = loggerContext.getLogger("b"); b.setLevel(Level.INFO); + + LoggingResource loggingResource = mock(LoggingResource.class); when(loggingResource.currentLoggers()).thenReturn(loggers(a, b)); when(loggingResource.rootLogger()).thenReturn(root); when(loggingResource.setLevel(any(), any())).thenCallRealMethod(); @@ -174,23 +206,24 @@ public void setLevelWithEmptyArgTest() { @Test public void setLevelWithInvalidArgTest() { - LoggingResource loggingResource = mock(LoggingResource.class); - Logger root = new Logger("root") { - }; + // setup + final LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); + final Logger root = loggerContext.getRootLogger(); root.setLevel(Level.ERROR); - Hierarchy hierarchy = new Hierarchy(root); - Logger a = hierarchy.getLogger("a"); + final Logger a = loggerContext.getLogger("a"); a.setLevel(null); - Logger b = hierarchy.getLogger("b"); + final Logger b = loggerContext.getLogger("b"); b.setLevel(Level.INFO); + + LoggingResource loggingResource = mock(LoggingResource.class); when(loggingResource.currentLoggers()).thenReturn(loggers(a, b)); when(loggingResource.rootLogger()).thenReturn(root); when(loggingResource.setLevel(any(), any())).thenCallRealMethod(); assertThrows(NotFoundException.class, () -> loggingResource.setLevel("@root", Collections.singletonMap("level", "HIGH"))); } - private Enumeration loggers(Logger... loggers) { - return new Vector<>(Arrays.asList(loggers)).elements(); + private List loggers(Logger... loggers) { + return Arrays.asList(loggers); } } diff --git a/connect/runtime/src/test/resources/log4j.properties b/connect/runtime/src/test/resources/log4j2.properties similarity index 57% rename from connect/runtime/src/test/resources/log4j.properties rename to connect/runtime/src/test/resources/log4j2.properties index 176692deb7b2b..69f11bd896525 100644 --- a/connect/runtime/src/test/resources/log4j.properties +++ b/connect/runtime/src/test/resources/log4j2.properties @@ -14,22 +14,39 @@ # See the License for the specific language governing permissions and # limitations under the License. ## -log4j.rootLogger=INFO, stdout +name=TestConfig +appenders=console -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout # # The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information # in the log message, where appropriate. This makes it easier to identify those log messages that apply to a # specific connector. Simply add this parameter to the log layout configuration below to include the contextual information. # -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %X{connector.context}%m (%c:%L)%n +appender.console.layout.pattern=[%d] %p %X{connector.context}%m (%c:%L)%n # # The following line includes no MDC context parameters: -#log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n (%t) +#appender.console.layout.pattern=[%d] %p %m (%c:%L)%n (%t) -log4j.logger.org.reflections=ERROR -log4j.logger.kafka=WARN -log4j.logger.org.apache.kafka.connect=DEBUG -log4j.logger.org.apache.kafka.connect.runtime.distributed=DEBUG -log4j.logger.org.apache.kafka.connect.integration=DEBUG +rootLogger.level=INFO +rootLogger.appenderRefs=stdout +rootLogger.appenderRef.stdout.ref=STDOUT + +loggers=org.reflections,org.apache.kafka.connect,integration,runtime.distributed,kafka + +logger.org.reflections.name=org.reflections +logger.org.reflections.level=ERROR + +logger.org.apache.kafka.connect.name=org.apache.kafka.connect +logger.org.apache.kafka.connect.level=DEBUG + +logger.integration.name=org.apache.kafka.connect.integration +logger.integration.level=DEBUG + +logger.runtime.distributed.name=org.apache.kafka.connect.runtime.distributed +logger.runtime.distributed.level=DEBUG + +logger.kafka.name=kafka +logger.kafka.level=WARN diff --git a/core/src/main/scala/kafka/utils/Log4jController.scala b/core/src/main/scala/kafka/utils/Log4jController.scala index 0d54c74e07542..af18a67f5f9a1 100755 --- a/core/src/main/scala/kafka/utils/Log4jController.scala +++ b/core/src/main/scala/kafka/utils/Log4jController.scala @@ -17,83 +17,89 @@ package kafka.utils +import org.apache.logging.log4j.core.LoggerContext +import org.apache.logging.log4j.core.config.Configurator +import org.apache.logging.log4j.{Level, LogManager} + import java.util import java.util.Locale - -import org.apache.kafka.common.utils.Utils -import org.apache.log4j.{Level, LogManager, Logger} - -import scala.collection.mutable import scala.jdk.CollectionConverters._ object Log4jController { + + /** + * Note: In log4j, the root logger's name was "root" and Kafka also followed that name for dynamic logging control feature. + * + * The root logger's name is changed in log4j2 to empty string (see: [[LogManager.ROOT_LOGGER_NAME]]) but for backward- + * compatibility. Kafka keeps its original root logger name. It is why here is a dedicated definition for the root logger name. + */ val ROOT_LOGGER = "root" - private def resolveLevel(logger: Logger): String = { - var name = logger.getName - var level = logger.getLevel - while (level == null) { - val index = name.lastIndexOf(".") - if (index > 0) { - name = name.substring(0, index) - val ancestor = existingLogger(name) - if (ancestor != null) { - level = ancestor.getLevel - } - } else { - level = existingLogger(ROOT_LOGGER).getLevel - } - } - level.toString - } + /** + * Returns given logger's parent's (or the first ancestor's) name. + * + * @throws IllegalArgumentException loggerName is null or empty. + */ /** * Returns a map of the log4j loggers and their assigned log level. - * If a logger does not have a log level assigned, we return the root logger's log level + * If a logger does not have a log level assigned, we return the log level of the first ancestor with a level configured. */ - def loggers: mutable.Map[String, String] = { - val logs = new mutable.HashMap[String, String]() - val rootLoggerLvl = existingLogger(ROOT_LOGGER).getLevel.toString - logs.put(ROOT_LOGGER, rootLoggerLvl) - - val loggers = LogManager.getCurrentLoggers - while (loggers.hasMoreElements) { - val logger = loggers.nextElement().asInstanceOf[Logger] - if (logger != null) { - logs.put(logger.getName, resolveLevel(logger)) - } - } - logs + def loggers: Map[String, String] = { + val logContext = LogManager.getContext(false).asInstanceOf[LoggerContext] + val rootLoggerLevel = logContext.getRootLogger.getLevel.toString + + // Loggers defined in the configuration + val configured = logContext.getConfiguration.getLoggers.asScala + .values + .filter(_.getName != LogManager.ROOT_LOGGER_NAME) + .map { logger => + logger.getName -> logger.getLevel.toString + }.toMap + + // Loggers actually running + val actual = logContext.getLoggers.asScala + .filter(_.getName != LogManager.ROOT_LOGGER_NAME) + .map { logger => + logger.getName -> logger.getLevel.toString + }.toMap + + (configured ++ actual) + (ROOT_LOGGER -> rootLoggerLevel) } /** * Sets the log level of a particular logger */ def logLevel(loggerName: String, logLevel: String): Boolean = { - val log = existingLogger(loggerName) - if (!Utils.isBlank(loggerName) && !Utils.isBlank(logLevel) && log != null) { - log.setLevel(Level.toLevel(logLevel.toUpperCase(Locale.ROOT))) + val level = Level.toLevel(logLevel.toUpperCase(Locale.ROOT), null) + + if (loggerName == ROOT_LOGGER) { + Configurator.setAllLevels(LogManager.ROOT_LOGGER_NAME, level) true + } else { + if (loggerExists(loggerName) && level != null) { + Configurator.setAllLevels(loggerName, level) + true + } + else false } - else false } def unsetLogLevel(loggerName: String): Boolean = { - val log = existingLogger(loggerName) - if (!Utils.isBlank(loggerName) && log != null) { - log.setLevel(null) + if (loggerName == ROOT_LOGGER) { + Configurator.setAllLevels(LogManager.ROOT_LOGGER_NAME, null) true + } else { + if (loggerExists(loggerName)) { + Configurator.setAllLevels(loggerName, null) + true + } + else false } - else false } - def loggerExists(loggerName: String): Boolean = existingLogger(loggerName) != null - - private def existingLogger(loggerName: String) = - if (loggerName == ROOT_LOGGER) - LogManager.getRootLogger - else LogManager.exists(loggerName) + def loggerExists(loggerName: String): Boolean = loggers.contains(loggerName) } /** @@ -113,15 +119,7 @@ class Log4jController extends Log4jControllerMBean { def getLogLevel(loggerName: String): String = { - val log = Log4jController.existingLogger(loggerName) - if (log != null) { - val level = log.getLevel - if (level != null) - log.getLevel.toString - else - Log4jController.resolveLevel(log) - } - else "No such logger." + Log4jController.loggers.getOrElse(loggerName, "No such logger.") } def setLogLevel(loggerName: String, level: String): Boolean = Log4jController.logLevel(loggerName, level) diff --git a/core/src/test/resources/log4j2.properties b/core/src/test/resources/log4j2.properties new file mode 100644 index 0000000000000..5e16ffcf738be --- /dev/null +++ b/core/src/test/resources/log4j2.properties @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name=TestConfig +appenders=stdout + +appender.stdout.type=Console +appender.stdout.name=STDOUT +appender.stdout.layout.type=PatternLayout +appender.stdout.layout.pattern=[%d] %p %m (%c:%L)%n + +rootLogger.level=off +rootLogger.appenderRefs=stdout +rootLogger.appenderRef.stdout.ref=STDOUT + +loggers=kafka,org.apache.kafka,org.apache.zookeeper + +logger.kafka.name=kafka +logger.kafka.level=WARN + +logger.org.apache.kafka.name=org.apache.kafka +logger.org.apache.kafka.level=WARN + +# zkclient can be verbose, during debugging it is common to adjust it separately +logger.org.apache.zookeeper.name=org.apache.zookeeper +logger.org.apache.zookeeper.level=WARN diff --git a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala index b095a6170c38e..0e1e890859c24 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala @@ -45,8 +45,9 @@ import org.apache.kafka.common.requests.{DeleteRecordsRequest, MetadataResponse} import org.apache.kafka.common.resource.{PatternType, ResourcePattern, ResourceType} import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.common.{ConsumerGroupState, ElectionType, TopicCollection, TopicPartition, TopicPartitionInfo, TopicPartitionReplica, Uuid} +import org.apache.logging.log4j.core.config.Configurator import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Disabled, Test, TestInfo} +import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource import org.slf4j.LoggerFactory @@ -77,16 +78,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { super.setUp(testInfo) + Configurator.reconfigure() brokerLoggerConfigResource = new ConfigResource( ConfigResource.Type.BROKER_LOGGER, brokers.head.config.brokerId.toString) } - @AfterEach - override def tearDown(): Unit = { - teardownBrokerLoggers() - super.tearDown() - } - @Test def testClose(): Unit = { val client = Admin.create(createConfig) @@ -1919,7 +1915,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(2, configs.size) assertEquals(Defaults.LogCleanerMinCleanRatio.toString, configs.get(topic1Resource).get(LogConfig.MinCleanableDirtyRatioProp).value) - assertEquals(Defaults.CompressionType.toString, configs.get(topic1Resource).get(LogConfig.CompressionTypeProp).value) + assertEquals(Defaults.CompressionType, configs.get(topic1Resource).get(LogConfig.CompressionTypeProp).value) assertEquals("0.9", configs.get(topic2Resource).get(LogConfig.MinCleanableDirtyRatioProp).value) //check invalid use of append/subtract operation types @@ -2060,9 +2056,23 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { client = Admin.create(createConfig) LoggerFactory.getLogger("kafka.cluster.Replica").trace("Message to create the logger") val loggerConfig = describeBrokerLoggers() + + // Logger name can't be empty. + assertNull(loggerConfig.get("")) + + // "root" -> "OFF" + val rootLogLevel = loggerConfig.get(Log4jController.ROOT_LOGGER).value + assertEquals("OFF", rootLogLevel) + + // Configured loggers: "kafka" -> "WARN", "org.apache.kafka" -> "WARN", "org.apache.zookeeper" -> "WARN" + assertEquals("WARN", loggerConfig.get("kafka").value) + assertEquals("WARN", loggerConfig.get("org.apache.kafka").value) + assertEquals("WARN", loggerConfig.get("org.apache.zookeeper").value) + + // we expect the log level to be inherited from the first ancestor with a level configured. + // For example, `kafka.cluster.Replica` from `kafka` (ERROR). val kafkaLogLevel = loggerConfig.get("kafka").value() val logCleanerLogLevelConfig = loggerConfig.get("kafka.cluster.Replica") - // we expect the log level to be inherited from the first ancestor with a level configured assertEquals(kafkaLogLevel, logCleanerLogLevelConfig.value()) assertEquals("kafka.cluster.Replica", logCleanerLogLevelConfig.name()) assertEquals(ConfigSource.DYNAMIC_BROKER_LOGGER_CONFIG, logCleanerLogLevelConfig.source()) @@ -2072,33 +2082,32 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } @Test - @Disabled // To be re-enabled once KAFKA-8779 is resolved def testIncrementalAlterConfigsForLog4jLogLevels(): Unit = { client = Admin.create(createConfig) val initialLoggerConfig = describeBrokerLoggers() - val initialRootLogLevel = initialLoggerConfig.get(Log4jController.ROOT_LOGGER).value() - assertEquals(initialRootLogLevel, initialLoggerConfig.get("kafka.controller.KafkaController").value()) - assertEquals(initialRootLogLevel, initialLoggerConfig.get("kafka.log.LogCleaner").value()) - assertEquals(initialRootLogLevel, initialLoggerConfig.get("kafka.server.ReplicaManager").value()) + val initialKafkaLogLevel = initialLoggerConfig.get("kafka").value() + assertEquals(initialKafkaLogLevel, initialLoggerConfig.get("kafka.controller.KafkaController").value()) + assertEquals(initialKafkaLogLevel, initialLoggerConfig.get("kafka.log.LogCleaner").value()) + assertEquals(initialKafkaLogLevel, initialLoggerConfig.get("kafka.server.ReplicaManager").value()) val newRootLogLevel = LogLevelConfig.DEBUG_LOG_LEVEL val alterRootLoggerEntry = Seq( - new AlterConfigOp(new ConfigEntry(Log4jController.ROOT_LOGGER, newRootLogLevel), AlterConfigOp.OpType.SET) + new AlterConfigOp(new ConfigEntry("kafka", newRootLogLevel), AlterConfigOp.OpType.SET) ).asJavaCollection // Test validateOnly does not change anything alterBrokerLoggers(alterRootLoggerEntry, validateOnly = true) val validatedLoggerConfig = describeBrokerLoggers() - assertEquals(initialRootLogLevel, validatedLoggerConfig.get(Log4jController.ROOT_LOGGER).value()) - assertEquals(initialRootLogLevel, validatedLoggerConfig.get("kafka.controller.KafkaController").value()) - assertEquals(initialRootLogLevel, validatedLoggerConfig.get("kafka.log.LogCleaner").value()) - assertEquals(initialRootLogLevel, validatedLoggerConfig.get("kafka.server.ReplicaManager").value()) - assertEquals(initialRootLogLevel, validatedLoggerConfig.get("kafka.zookeeper.ZooKeeperClient").value()) + assertEquals(initialKafkaLogLevel, validatedLoggerConfig.get("kafka").value()) + assertEquals(initialKafkaLogLevel, validatedLoggerConfig.get("kafka.controller.KafkaController").value()) + assertEquals(initialKafkaLogLevel, validatedLoggerConfig.get("kafka.log.LogCleaner").value()) + assertEquals(initialKafkaLogLevel, validatedLoggerConfig.get("kafka.server.ReplicaManager").value()) + assertEquals(initialKafkaLogLevel, validatedLoggerConfig.get("kafka.zookeeper.ZooKeeperClient").value()) // test that we can change them and unset loggers still use the root's log level alterBrokerLoggers(alterRootLoggerEntry) val changedRootLoggerConfig = describeBrokerLoggers() - assertEquals(newRootLogLevel, changedRootLoggerConfig.get(Log4jController.ROOT_LOGGER).value()) + assertEquals(newRootLogLevel, changedRootLoggerConfig.get("kafka").value()) assertEquals(newRootLogLevel, changedRootLoggerConfig.get("kafka.controller.KafkaController").value()) assertEquals(newRootLogLevel, changedRootLoggerConfig.get("kafka.log.LogCleaner").value()) assertEquals(newRootLogLevel, changedRootLoggerConfig.get("kafka.server.ReplicaManager").value()) @@ -2121,7 +2130,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { ).asJavaCollection alterBrokerLoggers(alterLogLevelsEntries) val alteredLoggerConfig = describeBrokerLoggers() - assertEquals(newRootLogLevel, alteredLoggerConfig.get(Log4jController.ROOT_LOGGER).value()) + assertEquals(newRootLogLevel, alteredLoggerConfig.get("kafka").value()) assertEquals(LogLevelConfig.INFO_LOG_LEVEL, alteredLoggerConfig.get("kafka.controller.KafkaController").value()) assertEquals(LogLevelConfig.ERROR_LOG_LEVEL, alteredLoggerConfig.get("kafka.log.LogCleaner").value()) assertEquals(LogLevelConfig.TRACE_LOG_LEVEL, alteredLoggerConfig.get("kafka.server.ReplicaManager").value()) @@ -2133,10 +2142,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { * 2. Change kafka.controller.KafkaController logger to INFO * 3. Unset kafka.controller.KafkaController via AlterConfigOp.OpType.DELETE (resets it to the root logger - TRACE) * 4. Change ROOT logger to ERROR - * 5. Ensure the kafka.controller.KafkaController logger's level is ERROR (the curent root logger level) + * 5. Ensure the kafka.controller.KafkaController logger's level is ERROR (the current root logger level) */ @Test - @Disabled // To be re-enabled once KAFKA-8779 is resolved def testIncrementalAlterConfigsForLog4jLogLevelsCanResetLoggerToCurrentRoot(): Unit = { client = Admin.create(createConfig) // step 1 - configure root logger @@ -2178,7 +2186,6 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } @Test - @Disabled // To be re-enabled once KAFKA-8779 is resolved def testIncrementalAlterConfigsForLog4jLogLevelsCannotResetRootLogger(): Unit = { client = Admin.create(createConfig) val deleteRootLoggerEntry = Seq( @@ -2189,7 +2196,6 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } @Test - @Disabled // To be re-enabled once KAFKA-8779 is resolved def testIncrementalAlterConfigsForLog4jLogLevelsDoesNotWorkWithInvalidConfigs(): Unit = { client = Admin.create(createConfig) val validLoggerName = "kafka.server.KafkaRequestHandler" @@ -2213,18 +2219,32 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertTrue(assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(subtractLogLevelEntries)).getCause.isInstanceOf[InvalidRequestException]) assertLogLevelDidNotChange() + /** + * TODO As of present, there are two ways of changing log levels dynamically: + * + * 1. Using JMX, call [[Log4jController.logLevel]] directly. + * 2. Using [[Admin]], call [[Admin#incrementalAlterConfigs]]. + * + * However, there are two problems with method 2: + * + * - Method 1 supports 8 Log4j levels; However, method 2 supports only 6 levels. (see [[LogLevelConfig.VALID_LOG_LEVELS]]) + * So, OFF and ALL are only supported in method 1. + * - If the user tries to call APPEND or SUBTRACT with method 2, [[InvalidRequestException]] is thrown; However, if the user + * specifies the wrong logger name or level, it throws [[InvalidConfigurationException]]. (see [[ConfigAdminManager#validateLogLevelConfigs]]) + * Is this consistent? + */ val invalidLogLevelLogLevelEntries = Seq( new AlterConfigOp(new ConfigEntry("kafka.server.KafkaRequestHandler", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), // valid new AlterConfigOp(new ConfigEntry("kafka.network.SocketServer", "OFF"), AlterConfigOp.OpType.SET) // OFF is not a valid log level ).asJavaCollection - assertTrue(assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(invalidLogLevelLogLevelEntries)).getCause.isInstanceOf[InvalidRequestException]) + assertTrue(assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(invalidLogLevelLogLevelEntries)).getCause.isInstanceOf[InvalidConfigurationException]) assertLogLevelDidNotChange() val invalidLoggerNameLogLevelEntries = Seq( new AlterConfigOp(new ConfigEntry("kafka.server.KafkaRequestHandler", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), // valid new AlterConfigOp(new ConfigEntry("Some Other LogCleaner", LogLevelConfig.ERROR_LOG_LEVEL), AlterConfigOp.OpType.SET) // invalid logger name is not supported ).asJavaCollection - assertTrue(assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(invalidLoggerNameLogLevelEntries)).getCause.isInstanceOf[InvalidRequestException]) + assertTrue(assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(invalidLoggerNameLogLevelEntries)).getCause.isInstanceOf[InvalidConfigurationException]) assertLogLevelDidNotChange() } @@ -2233,7 +2253,6 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { */ @nowarn("cat=deprecation") @Test - @Disabled // To be re-enabled once KAFKA-8779 is resolved def testAlterConfigsForLog4jLogLevelsDoesNotWork(): Unit = { client = Admin.create(createConfig) @@ -2257,28 +2276,6 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def describeBrokerLoggers(): Config = client.describeConfigs(Collections.singletonList(brokerLoggerConfigResource)).values.get(brokerLoggerConfigResource).get() - /** - * Due to the fact that log4j is not re-initialized across tests, changing a logger's log level persists across test classes. - * We need to clean up the changes done while testing. - */ - private def teardownBrokerLoggers(): Unit = { - if (changedBrokerLoggers.nonEmpty) { - val validLoggers = describeBrokerLoggers().entries().asScala.filterNot(_.name.equals(Log4jController.ROOT_LOGGER)).map(_.name).toSet - val unsetBrokerLoggersEntries = changedBrokerLoggers - .intersect(validLoggers) - .map { logger => new AlterConfigOp(new ConfigEntry(logger, ""), AlterConfigOp.OpType.DELETE) } - .asJavaCollection - - // ensure that we first reset the root logger to an arbitrary log level. Note that we cannot reset it to its original value - alterBrokerLoggers(List( - new AlterConfigOp(new ConfigEntry(Log4jController.ROOT_LOGGER, LogLevelConfig.FATAL_LOG_LEVEL), AlterConfigOp.OpType.SET) - ).asJavaCollection) - alterBrokerLoggers(unsetBrokerLoggersEntries) - - changedBrokerLoggers.clear() - } - } - /** * Test that createTopics returns the dynamic configurations of the topics that were created. * diff --git a/core/src/test/scala/unit/kafka/admin/AclCommandTest.scala b/core/src/test/scala/unit/kafka/admin/AclCommandTest.scala index 7cd5a18c22704..85e59877a4f69 100644 --- a/core/src/test/scala/unit/kafka/admin/AclCommandTest.scala +++ b/core/src/test/scala/unit/kafka/admin/AclCommandTest.scala @@ -22,7 +22,7 @@ import javax.management.InstanceAlreadyExistsException import kafka.admin.AclCommand.AclCommandOptions import kafka.security.authorizer.{AclAuthorizer, AclEntry} import kafka.server.{KafkaConfig, KafkaServer} -import kafka.utils.{Exit, LogCaptureAppender, Logging, TestUtils} +import kafka.utils.{Exit, Logging, TestUtils} import kafka.server.QuorumTestHarness import org.apache.kafka.common.acl.{AccessControlEntry, AclOperation, AclPermissionType} import org.apache.kafka.common.acl.AclOperation._ @@ -34,10 +34,13 @@ import org.apache.kafka.common.resource.PatternType.{LITERAL, PREFIXED} import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.common.utils.{AppInfoParser, SecurityUtils} import org.apache.kafka.server.authorizer.Authorizer -import org.apache.log4j.Level +import org.apache.logging.log4j.Level + import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import unit.kafka.utils.LogCaptureContext + class AclCommandTest extends QuorumTestHarness with Logging { var servers: Seq[KafkaServer] = Seq() @@ -199,19 +202,18 @@ class AclCommandTest extends QuorumTestHarness with Logging { createServer(Some(adminClientConfig)) - val appender = LogCaptureAppender.createAndRegister() - val previousLevel = LogCaptureAppender.setClassLoggerLevel(classOf[AppInfoParser], Level.WARN) + val logCaptureContext = LogCaptureContext( + classOf[AppInfoParser].getName, Map(classOf[AppInfoParser].getName -> "WARN") + ) try { testAclCli(adminArgs) + val warning = logCaptureContext.getMessages.find(e => e.getLevel == Level.WARN && + e.getThrown != null && + e.getThrown.getClass.getName.equals(classOf[InstanceAlreadyExistsException].getName)) + assertFalse(warning.isDefined, "There should be no warnings about multiple registration of mbeans") } finally { - LogCaptureAppender.setClassLoggerLevel(classOf[AppInfoParser], previousLevel) - LogCaptureAppender.unregister(appender) + logCaptureContext.close } - val warning = appender.getMessages.find(e => e.getLevel == Level.WARN && - e.getThrowableInformation != null && - e.getThrowableInformation.getThrowable.getClass.getName == classOf[InstanceAlreadyExistsException].getName) - assertFalse(warning.isDefined, "There should be no warnings about multiple registration of mbeans") - } private def testProducerConsumerCli(cmdArgs: Array[String]): Unit = { diff --git a/core/src/test/scala/unit/kafka/controller/ControllerFailoverTest.scala b/core/src/test/scala/unit/kafka/controller/ControllerFailoverTest.scala index eecc616dc88c5..48b568729fc04 100644 --- a/core/src/test/scala/unit/kafka/controller/ControllerFailoverTest.scala +++ b/core/src/test/scala/unit/kafka/controller/ControllerFailoverTest.scala @@ -26,12 +26,10 @@ import kafka.server.KafkaConfig import kafka.utils._ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.metrics.Metrics -import org.apache.log4j.Logger import org.junit.jupiter.api.{AfterEach, Test} import org.junit.jupiter.api.Assertions._ class ControllerFailoverTest extends KafkaServerTestHarness with Logging { - val log = Logger.getLogger(classOf[ControllerFailoverTest]) val numNodes = 2 val numParts = 1 val msgQueueSize = 1 diff --git a/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala b/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala index aa631c95e466f..21607b8811c17 100644 --- a/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala @@ -24,7 +24,7 @@ import com.yammer.metrics.core.Timer import kafka.api.{ApiVersion, KAFKA_2_6_IV0, KAFKA_2_7_IV0, LeaderAndIsr} import kafka.controller.KafkaController.AlterPartitionCallback import kafka.server.{KafkaConfig, KafkaServer, QuorumTestHarness} -import kafka.utils.{LogCaptureAppender, TestUtils} +import kafka.utils.TestUtils import kafka.zk.{FeatureZNodeStatus, _} import org.apache.kafka.common.errors.{ControllerMovedException, StaleBrokerEpochException} import org.apache.kafka.common.feature.Features @@ -33,11 +33,12 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.{ElectionType, TopicPartition, Uuid} import org.apache.kafka.metadata.LeaderRecoveryState import org.apache.kafka.server.metrics.KafkaYammerMetrics -import org.apache.log4j.Level +import org.apache.logging.log4j.Level import org.junit.jupiter.api.Assertions.{assertEquals, assertNotEquals, assertTrue} import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import org.mockito.Mockito.{doAnswer, spy, verify} import org.mockito.invocation.InvocationOnMock +import unit.kafka.utils.LogCaptureContext import scala.collection.{Map, Seq, mutable} import scala.jdk.CollectionConverters._ @@ -589,7 +590,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { testControllerMove(() => { val adminZkClient = new AdminZkClient(zkClient) adminZkClient.createTopicWithAssignment(tp.topic, config = new Properties(), assignment) - }) + }, s"classOf[ControllerIntegrationTest]#testControllerMoveOnTopicCreation") } @Test @@ -603,7 +604,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { testControllerMove(() => { val adminZkClient = new AdminZkClient(zkClient) adminZkClient.deleteTopic(tp.topic()) - }) + }, s"classOf[ControllerIntegrationTest]#testControllerMoveOnTopicDeletion") } @Test @@ -613,7 +614,9 @@ class ControllerIntegrationTest extends QuorumTestHarness { val assignment = Map(tp.partition -> Seq(0)) TestUtils.createTopic(zkClient, tp.topic(), assignment, servers) - testControllerMove(() => zkClient.createPreferredReplicaElection(Set(tp))) + testControllerMove( + () => zkClient.createPreferredReplicaElection(Set(tp)), + s"classOf[ControllerIntegrationTest]#testControllerMoveOnPreferredReplicaElection") } @Test @@ -625,7 +628,9 @@ class ControllerIntegrationTest extends QuorumTestHarness { TestUtils.createTopic(zkClient, tp.topic(), assignment, servers) val reassignment = Map(tp -> Seq(0)) - testControllerMove(() => zkClient.createPartitionReassignment(reassignment)) + testControllerMove( + () => zkClient.createPartitionReassignment(reassignment), + s"classOf[ControllerIntegrationTest]#testControllerMoveOnPartitionReassignment") } @Test @@ -1523,10 +1528,10 @@ class ControllerIntegrationTest extends QuorumTestHarness { assertTrue(servers.head.kafkaController.controllerContext.topicNames.get(topicIdAfterUpgrade.get).isEmpty) } - private def testControllerMove(fun: () => Unit): Unit = { + private def testControllerMove(fun: () => Unit, contextName: String): Unit = { val controller = getController().kafkaController - val appender = LogCaptureAppender.createAndRegister() - val previousLevel = LogCaptureAppender.setClassLoggerLevel(controller.getClass, Level.INFO) + val logCaptureContext = LogCaptureContext(contextName, scala.Predef.Map(classOf[KafkaController].getName -> "INFO")) + logCaptureContext.setLatch(1) try { TestUtils.waitUntilTrue(() => { @@ -1554,14 +1559,13 @@ class ControllerIntegrationTest extends QuorumTestHarness { TestUtils.waitUntilTrue(() => !controller.isActive, "Controller fails to resign") // Expect to capture the ControllerMovedException in the log of ControllerEventThread - val event = appender.getMessages.find(e => e.getLevel == Level.INFO - && e.getThrowableInformation != null - && e.getThrowableInformation.getThrowable.getClass.getName.equals(classOf[ControllerMovedException].getName)) + logCaptureContext.await(30, TimeUnit.SECONDS) + val event = logCaptureContext.getMessages.find(e => e.getLevel == Level.INFO + && e.getThrown != null + && e.getThrown.getClass.getName.equals(classOf[ControllerMovedException].getName)) assertTrue(event.isDefined) - } finally { - LogCaptureAppender.unregister(appender) - LogCaptureAppender.setClassLoggerLevel(controller.eventManager.thread.getClass, previousLevel) + logCaptureContext.close } } diff --git a/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala b/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala index adab6bc88a946..87c251f8ab5e7 100755 --- a/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala +++ b/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala @@ -23,7 +23,6 @@ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import scala.util.Random import scala.jdk.CollectionConverters._ import scala.collection.{Map, Seq} -import org.apache.log4j.{Level, Logger} import java.util.Properties import java.util.concurrent.ExecutionException @@ -35,7 +34,9 @@ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.TimeoutException import org.apache.kafka.common.serialization.StringDeserializer import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigsResult, Config, ConfigEntry} +import org.apache.logging.log4j.Level import org.junit.jupiter.api.Assertions._ +import unit.kafka.utils.LoggingUtil import scala.annotation.nowarn @@ -57,9 +58,6 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { val topic = "topic" + random.nextLong() val partitionId = 0 - val kafkaApisLogger = Logger.getLogger(classOf[kafka.server.KafkaApis]) - val networkProcessorLogger = Logger.getLogger(classOf[kafka.network.Processor]) - @BeforeEach override def setUp(testInfo: TestInfo): Unit = { super.setUp(testInfo) @@ -74,8 +72,8 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { } // temporarily set loggers to a higher level so that tests run quietly - kafkaApisLogger.setLevel(Level.FATAL) - networkProcessorLogger.setLevel(Level.FATAL) + LoggingUtil.setLevel("kafka.server.KafkaApis", Level.FATAL) + LoggingUtil.setLevel("kafka.network.Processor", Level.FATAL) } @AfterEach @@ -84,8 +82,8 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { servers.foreach(server => CoreUtils.delete(server.config.logDirs)) // restore log levels - kafkaApisLogger.setLevel(Level.ERROR) - networkProcessorLogger.setLevel(Level.ERROR) + LoggingUtil.setLevel("kafka.server.KafkaApis", Level.ERROR) + LoggingUtil.setLevel("kafka.network.Processor", Level.ERROR) super.tearDown() } diff --git a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala index 9b4e81ab39ebe..be7fd416a3a9a 100644 --- a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala +++ b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala @@ -47,9 +47,11 @@ import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.common.security.scram.internals.ScramMechanism import org.apache.kafka.common.utils.{AppInfoParser, LogContext, MockTime, Time, Utils} import org.apache.kafka.test.{TestSslUtils, TestUtils => JTestUtils} -import org.apache.log4j.Level +import org.apache.logging.log4j.Level import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api._ +import unit.kafka.utils.LoggingUtil + import java.util.concurrent.atomic.AtomicInteger import org.apache.kafka.server.metrics.KafkaYammerMetrics @@ -82,7 +84,6 @@ class SocketServerTest { server.startup() val sockets = new ArrayBuffer[Socket] - private val kafkaLogger = org.apache.log4j.LogManager.getLogger("kafka") private var logLevelToRestore: Level = _ def endpoint: EndPoint = { KafkaConfig.fromProps(props, doLog = false).dataPlaneListeners.head @@ -93,9 +94,7 @@ class SocketServerTest { @BeforeEach def setUp(): Unit = { // Run the tests with TRACE logging to exercise request logging path - logLevelToRestore = kafkaLogger.getLevel - kafkaLogger.setLevel(Level.TRACE) - + logLevelToRestore = LoggingUtil.setLevel("kafka", Level.TRACE) assertTrue(server.controlPlaneRequestChannelOpt.isEmpty) } @@ -104,7 +103,7 @@ class SocketServerTest { shutdownServerAndMetrics(server) sockets.foreach(_.close()) sockets.clear() - kafkaLogger.setLevel(logLevelToRestore) + LoggingUtil.setLevel("kafka", logLevelToRestore) } def sendRequest(socket: Socket, request: Array[Byte], id: Option[Short] = None, flush: Boolean = true): Unit = { diff --git a/core/src/test/scala/unit/kafka/utils/LogCaptureAppender.scala b/core/src/test/scala/unit/kafka/utils/LogCaptureAppender.scala deleted file mode 100644 index 2d071452829ff..0000000000000 --- a/core/src/test/scala/unit/kafka/utils/LogCaptureAppender.scala +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.utils - -import org.apache.log4j.{AppenderSkeleton, Level, Logger} -import org.apache.log4j.spi.LoggingEvent - -import scala.collection.mutable.ListBuffer - -class LogCaptureAppender extends AppenderSkeleton { - private val events: ListBuffer[LoggingEvent] = ListBuffer.empty - - override protected def append(event: LoggingEvent): Unit = { - events.synchronized { - events += event - } - } - - def getMessages: ListBuffer[LoggingEvent] = { - events.synchronized { - return events.clone() - } - } - - override def close(): Unit = { - events.synchronized { - events.clear() - } - } - - override def requiresLayout: Boolean = false -} - -object LogCaptureAppender { - def createAndRegister(): LogCaptureAppender = { - val logCaptureAppender: LogCaptureAppender = new LogCaptureAppender - Logger.getRootLogger.addAppender(logCaptureAppender) - logCaptureAppender - } - - def setClassLoggerLevel(clazz: Class[_], logLevel: Level): Level = { - val logger = Logger.getLogger(clazz) - val previousLevel = logger.getLevel - Logger.getLogger(clazz).setLevel(logLevel) - previousLevel - } - - def unregister(logCaptureAppender: LogCaptureAppender): Unit = { - Logger.getRootLogger.removeAppender(logCaptureAppender) - } -} diff --git a/core/src/test/scala/unit/kafka/utils/LogCaptureContext.scala b/core/src/test/scala/unit/kafka/utils/LogCaptureContext.scala new file mode 100644 index 0000000000000..f66588ea3e3a4 --- /dev/null +++ b/core/src/test/scala/unit/kafka/utils/LogCaptureContext.scala @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package unit.kafka.utils + +import java.util.concurrent.{CountDownLatch, TimeUnit} + +import org.apache.logging.log4j.Level +import org.apache.logging.log4j.core.{LogEvent, LoggerContext} +import org.apache.logging.log4j.test.appender.ListAppender + +import scala.jdk.CollectionConverters._ + +class LogCaptureContext(listAppender: ListAppender, prevLevelMap: Map[String, Level]) extends AutoCloseable { + + def setLatch(size: Int): Unit = { + this.listAppender.countDownLatch = new CountDownLatch(size) + } + + @throws[InterruptedException] + def await(l: Long, timeUnit: TimeUnit): Unit = { + this.listAppender.countDownLatch.await(l, timeUnit) + } + + def getMessages: Seq[LogEvent] = listAppender.getEvents.asScala.toSeq + + override def close(): Unit = { + val loggerContext = LoggerContext.getContext(false) + loggerContext.getRootLogger.removeAppender(listAppender) + listAppender.stop() + + // Restore previous logger levels + prevLevelMap.foreach { e => + val loggerName = e._1 + val level = e._2 + loggerContext.getLogger(loggerName).setLevel(level) + } + } +} + +object LogCaptureContext { + def apply(name: String, levelMap: Map[String, String] = Map()): LogCaptureContext = { + val loggerContext = LoggerContext.getContext(false) + val listAppender = ListAppender.createAppender(name, + false, false, null, null) + listAppender.start + loggerContext.getConfiguration.addAppender(listAppender) + loggerContext.getRootLogger.addAppender(listAppender) + + // Store the previous logger levels + val preLevelMap = levelMap.keys.map { loggerName => + (loggerName, loggerContext.getLogger(loggerName).getLevel) + }.toMap + + // Change the logger levels + levelMap.foreach { e => + val loggerName = e._1 + val level = e._2 + loggerContext.getLogger(loggerName).setLevel(Level.getLevel(level)) + } + + new LogCaptureContext(listAppender, preLevelMap) + } +} diff --git a/core/src/test/scala/unit/kafka/utils/LoggingUtil.scala b/core/src/test/scala/unit/kafka/utils/LoggingUtil.scala new file mode 100644 index 0000000000000..1ee1cd7b0eb14 --- /dev/null +++ b/core/src/test/scala/unit/kafka/utils/LoggingUtil.scala @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package unit.kafka.utils + +import org.apache.logging.log4j.core.LoggerContext +import org.apache.logging.log4j.{Level, LogManager} + +object LoggingUtil { + def setLevel(logger: String, level: Level): Level = { + val loggerContext = LogManager.getContext(false).asInstanceOf[LoggerContext] + val kafkaLoggerConfig = loggerContext.getConfiguration.getLoggerConfig(logger) + val prevLevel = kafkaLoggerConfig.getLevel + kafkaLoggerConfig.setLevel(level) + loggerContext.updateLoggers + prevLevel + } +} diff --git a/gradle/dependencies.gradle b/gradle/dependencies.gradle index 066d62bc854fe..8201ff5ad9704 100644 --- a/gradle/dependencies.gradle +++ b/gradle/dependencies.gradle @@ -75,6 +75,7 @@ versions += [ jline: "3.21.0", jmh: "1.35", hamcrest: "2.2", + log4j2: "2.14.1", scalaLogging: "3.9.4", jaxb: "2.3.0", jaxrs: "2.1.1", @@ -179,6 +180,10 @@ libs += [ kafkaStreams_30: "org.apache.kafka:kafka-streams:$versions.kafka_30", kafkaStreams_31: "org.apache.kafka:kafka-streams:$versions.kafka_31", log4j: "ch.qos.reload4j:reload4j:$versions.reload4j", + log4j1_2Api: "org.apache.logging.log4j:log4j-1.2-api:$versions.log4j2", + log4j2Api: "org.apache.logging.log4j:log4j-api:$versions.log4j2", + log4j2Core: "org.apache.logging.log4j:log4j-core:$versions.log4j2", + log4j2CoreTest: "org.apache.logging.log4j:log4j-core:$versions.log4j2:tests", lz4: "org.lz4:lz4-java:$versions.lz4", metrics: "com.yammer.metrics:metrics-core:$versions.metrics", dropwizardMetrics: "io.dropwizard.metrics:metrics-core:$versions.dropwizardMetrics", @@ -198,6 +203,7 @@ libs += [ scalaReflect: "org.scala-lang:scala-reflect:$versions.scala", slf4jApi: "org.slf4j:slf4j-api:$versions.slf4j", slf4jlog4j: "org.slf4j:slf4j-log4j12:$versions.slf4j", + slf4jlog4j2: "org.apache.logging.log4j:log4j-slf4j-impl:$versions.log4j2", snappy: "org.xerial.snappy:snappy-java:$versions.snappy", zookeeper: "org.apache.zookeeper:zookeeper:$versions.zookeeper", jfreechart: "jfreechart:jfreechart:$versions.jfreechart", diff --git a/log4j-appender/src/test/java/org/apache/kafka/log4jappender/KafkaLog4jAppenderTest.java b/log4j-appender/src/test/java/org/apache/kafka/log4jappender/KafkaLog4jAppenderTest.java index 90a791f7f30cb..f49188012b790 100644 --- a/log4j-appender/src/test/java/org/apache/kafka/log4jappender/KafkaLog4jAppenderTest.java +++ b/log4j-appender/src/test/java/org/apache/kafka/log4jappender/KafkaLog4jAppenderTest.java @@ -35,6 +35,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeoutException; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.any; @@ -149,7 +150,10 @@ public void testRealProducerConfigWithSyncSendShouldNotThrowException() { Properties props = getLog4jConfigWithRealProducer(true); PropertyConfigurator.configure(props); - logger.error(getMessage(0)); + assertDoesNotThrow(() -> logger.error(getMessage(0))); + + // close the appender. If this process is omitted, `PropertyConfigurator#configure` call may hang up. + Logger.getRootLogger().getAppender("KAFKA").close(); } @Test @@ -158,6 +162,9 @@ public void testRealProducerConfigWithSyncSendAndNotIgnoringExceptionsShouldThro PropertyConfigurator.configure(props); assertThrows(RuntimeException.class, () -> logger.error(getMessage(0))); + + // close the appender. If this process is omitted, `PropertyConfigurator#configure` call may hang up. + Logger.getRootLogger().getAppender("KAFKA").close(); } private void replaceProducerWithMocked(MockKafkaLog4jAppender mockKafkaLog4jAppender, boolean success) { @@ -214,4 +221,3 @@ private Properties getLog4jConfig(boolean syncSend) { return props; } } - diff --git a/tests/kafkatest/services/templates/connect_log4j.properties b/metadata/src/test/resources/log4j2.properties similarity index 60% rename from tests/kafkatest/services/templates/connect_log4j.properties rename to metadata/src/test/resources/log4j2.properties index 4894612c43fa0..e7adca212271e 100644 --- a/tests/kafkatest/services/templates/connect_log4j.properties +++ b/metadata/src/test/resources/log4j2.properties @@ -1,4 +1,3 @@ -## # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. @@ -13,17 +12,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -## +name=TestConfig +appenders=stdout -# Define the root logger with appender file -log4j.rootLogger = {{ log_level|default("INFO") }}, FILE +appender.stdout.type=Console +appender.stdout.name=STDOUT +appender.stdout.layout.type=PatternLayout +appender.stdout.layout.pattern=[%d] %p %m (%c:%L)%n -log4j.appender.FILE=org.apache.log4j.FileAppender -log4j.appender.FILE.File={{ log_file }} -log4j.appender.FILE.ImmediateFlush=true -log4j.appender.FILE.Append=true -log4j.appender.FILE.layout=org.apache.log4j.PatternLayout -log4j.appender.FILE.layout.conversionPattern=[%d] %p %m (%c)%n +rootLogger.level=DEBUG +rootLogger.appenderRefs=stdout +rootLogger.appenderRef.stdout.ref=STDOUT -log4j.logger.org.apache.zookeeper=ERROR -log4j.logger.org.reflections=ERROR +loggers=org.apache.kafka,org.apache.zookeeper + +logger.org.apache.kafka.name=org.apache.kafka +logger.org.apache.kafka.level=DEBUG + +logger.org.apache.zookeeper.name=org.apache.zookeeper +logger.org.apache.zookeeper.level=WARN diff --git a/raft/bin/test-kraft-server-start.sh b/raft/bin/test-kraft-server-start.sh index 701bc1864a458..2eefdd4912a6e 100755 --- a/raft/bin/test-kraft-server-start.sh +++ b/raft/bin/test-kraft-server-start.sh @@ -17,6 +17,7 @@ base_dir=$(dirname $0) if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then + echo "DEPRECATED: using log4j 1.x configuration. To use log4j 2.x configuration, run with: 'export KAFKA_LOG4J_OPTS=\"-Dlog4j.configurationFile=file:$base_dir/../config/kraft-log4j2.properties\"'" export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/kraft-log4j.properties" fi diff --git a/core/src/test/resources/log4j.properties b/raft/config/kraft-log4j2.properties similarity index 66% rename from core/src/test/resources/log4j.properties rename to raft/config/kraft-log4j2.properties index f7fb7364a3c38..9636f9c4a94b2 100644 --- a/core/src/test/resources/log4j.properties +++ b/raft/config/kraft-log4j2.properties @@ -12,15 +12,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -log4j.rootLogger=OFF, stdout +name=RaftConfig +appenders=stderr -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n +appender.stderr.type=Console +appender.stderr.name=STDERR +appender.stderr.layout.type=PatternLayout +appender.stderr.layout.pattern=[%d] %p %m (%c)%n -log4j.logger.kafka=WARN -log4j.logger.org.apache.kafka=WARN +rootLogger.level=INFO +rootLogger.appenderRefs=stderr +rootLogger.appenderRef.stderr.ref=STDERR +loggers=org.apache.kafka.raft -# zkclient can be verbose, during debugging it is common to adjust it separately -log4j.logger.org.apache.zookeeper=WARN +logger.org.apache.kafka.raft.name=org.apache.kafka.raft +logger.org.apache.kafka.raft.level=INFO diff --git a/storage/src/test/resources/log4j.properties b/storage/src/test/resources/log4j2.properties similarity index 52% rename from storage/src/test/resources/log4j.properties rename to storage/src/test/resources/log4j2.properties index 113e15e22f633..d754ea0307655 100644 --- a/storage/src/test/resources/log4j.properties +++ b/storage/src/test/resources/log4j2.properties @@ -12,11 +12,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -log4j.rootLogger=OFF, stdout +name=TestConfig +appenders=stdout -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n +appender.stdout.type=Console +appender.stdout.name=STDOUT +appender.stdout.layout.type=PatternLayout +appender.stdout.layout.pattern=[%d] %p %m (%c:%L)%n -log4j.logger.org.apache.kafka.server.log.remote.storage=INFO -log4j.logger.org.apache.kafka.server.log.remote.metadata.storage=INFO +rootLogger.level=OFF +rootLogger.appenderRefs=stdout +rootLogger.appenderRef.stdout.ref=STDOUT + +loggers=org.apache.kafka.server.log.remote.metadata.storage,org.apache.kafka.server.log.remote.storage + +logger.org.apache.kafka.server.log.remote.metadata.storage.name=org.apache.kafka.server.log.remote.metadata.storage +logger.org.apache.kafka.server.log.remote.metadata.storage.level=INFO + +logger.org.apache.kafka.server.log.remote.storage.name=org.apache.kafka.server.log.remote.storage +logger.org.apache.kafka.server.log.remote.storage.level=INFO diff --git a/clients/src/test/resources/log4j.properties b/streams/quickstart/java/src/main/resources/archetype-resources/src/main/resources/log4j2.properties similarity index 72% rename from clients/src/test/resources/log4j.properties rename to streams/quickstart/java/src/main/resources/archetype-resources/src/main/resources/log4j2.properties index b1d5b7f2b4091..79fbb7d4901d4 100644 --- a/clients/src/test/resources/log4j.properties +++ b/streams/quickstart/java/src/main/resources/archetype-resources/src/main/resources/log4j2.properties @@ -12,10 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -log4j.rootLogger=OFF, stdout +name=StreamsConfig +appenders=console -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n -log4j.logger.org.apache.kafka=ERROR +rootLogger.level=INFO +rootLogger.appenderRefs=stdout +rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java b/streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java index eabb8edc1147c..da141fa17ed37 100644 --- a/streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java @@ -54,11 +54,11 @@ import org.apache.kafka.streams.processor.internals.TopologyMetadata; import org.apache.kafka.streams.processor.internals.ThreadMetadataImpl; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.state.internals.metrics.RocksDBMetricsRecordingTrigger; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockClientSupplier; import org.apache.kafka.test.MockMetricsReporter; import org.apache.kafka.test.MockProcessorSupplier; @@ -476,7 +476,8 @@ public void shouldCleanupResourcesOnCloseWithoutPreviousStart() throws Exception final StreamsBuilder builder = getBuilderWithSource(); builder.globalTable("anyTopic"); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KafkaStreams.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldCleanupResourcesOnCloseWithoutPreviousStart"); final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) { streams.close(); @@ -484,7 +485,7 @@ public void shouldCleanupResourcesOnCloseWithoutPreviousStart() throws Exception () -> streams.state() == KafkaStreams.State.NOT_RUNNING, "Streams never stopped."); - assertThat(appender.getMessages(), not(hasItem(containsString("ERROR")))); + assertThat(logCaptureContext.getMessages(), not(hasItem(containsString("ERROR")))); } assertTrue(supplier.consumer.closed()); @@ -536,8 +537,9 @@ public void testStateGlobalThreadClose() throws Exception { final StreamsBuilder builder = getBuilderWithSource(); builder.globalTable("anyTopic"); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KafkaStreams.class); - final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#testStateGlobalThreadClose"); + final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) { streams.start(); waitForCondition( () -> streams.state() == KafkaStreams.State.RUNNING, @@ -562,7 +564,7 @@ public void testStateGlobalThreadClose() throws Exception { "Thread never stopped." ); - assertThat(appender.getMessages(), hasItem(containsString("ERROR"))); + assertThat(logCaptureContext.getMessages(), hasItem(containsString("ERROR"))); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java b/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java index 4deed5ec9032e..c4998ba98ec2f 100644 --- a/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java @@ -32,7 +32,7 @@ import org.apache.kafka.streams.processor.FailOnInvalidTimestamp; import org.apache.kafka.streams.processor.TimestampExtractor; import org.apache.kafka.streams.processor.internals.StreamsPartitionAssignor; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; +import org.apache.kafka.test.LogCaptureContext; import org.junit.Before; import org.junit.Test; @@ -40,6 +40,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -66,10 +67,10 @@ import static org.apache.kafka.streams.internals.StreamsConfigUtils.getTotalCacheSize; import static org.apache.kafka.test.StreamsTestUtils.getStreamsConfig; import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.core.IsEqual.equalTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; @@ -80,7 +81,6 @@ public class StreamsConfigTest { private final Properties props = new Properties(); - private StreamsConfig streamsConfig; private final String groupId = "example-application"; private final String clientId = "client"; @@ -94,7 +94,6 @@ public void setUp() { props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); props.put("key.deserializer.encoding", StandardCharsets.UTF_8.name()); props.put("value.deserializer.encoding", StandardCharsets.UTF_16.name()); - streamsConfig = new StreamsConfig(props); } @Test @@ -136,6 +135,7 @@ public void shouldThrowExceptionIfBootstrapServersIsNotSet() { @Test public void testGetProducerConfigs() { + final StreamsConfig streamsConfig = new StreamsConfig(props); final Map returnedProps = streamsConfig.getProducerConfigs(clientId); assertThat(returnedProps.get(ProducerConfig.CLIENT_ID_CONFIG), equalTo(clientId)); assertThat(returnedProps.get(ProducerConfig.LINGER_MS_CONFIG), equalTo("100")); @@ -143,6 +143,7 @@ public void testGetProducerConfigs() { @Test public void testGetConsumerConfigs() { + final StreamsConfig streamsConfig = new StreamsConfig(props); final Map returnedProps = streamsConfig.getMainConsumerConfigs(groupId, clientId, threadIdx); assertThat(returnedProps.get(ConsumerConfig.CLIENT_ID_CONFIG), equalTo(clientId)); assertThat(returnedProps.get(ConsumerConfig.GROUP_ID_CONFIG), equalTo(groupId)); @@ -211,6 +212,7 @@ public void testGetMainConsumerConfigsWithMainConsumerOverridenPrefix() { @Test public void testGetRestoreConsumerConfigs() { + final StreamsConfig streamsConfig = new StreamsConfig(props); final Map returnedProps = streamsConfig.getRestoreConsumerConfigs(clientId); assertEquals(returnedProps.get(ConsumerConfig.CLIENT_ID_CONFIG), clientId); assertNull(returnedProps.get(ConsumerConfig.GROUP_ID_CONFIG)); @@ -218,6 +220,7 @@ public void testGetRestoreConsumerConfigs() { @Test public void defaultSerdeShouldBeConfigured() { + final StreamsConfig streamsConfig = new StreamsConfig(props); final Map serializerConfigs = new HashMap<>(); serializerConfigs.put("key.serializer.encoding", StandardCharsets.UTF_8.name()); serializerConfigs.put("value.serializer.encoding", StandardCharsets.UTF_16.name()); @@ -468,6 +471,7 @@ public void testGetRestoreConsumerConfigsWithRestoreConsumerOverridenPrefix() { @Test public void testGetGlobalConsumerConfigs() { + final StreamsConfig streamsConfig = new StreamsConfig(props); final Map returnedProps = streamsConfig.getGlobalConsumerConfigs(clientId); assertEquals(returnedProps.get(ConsumerConfig.CLIENT_ID_CONFIG), clientId + "-global-consumer"); assertNull(returnedProps.get(ConsumerConfig.GROUP_ID_CONFIG)); @@ -523,6 +527,7 @@ public void shouldSetInternalLeaveGroupOnCloseConfigToFalseInConsumer() { @Test public void shouldNotSetInternalThrowOnFetchStableOffsetUnsupportedConfigToFalseInConsumerForEosDisabled() { + final StreamsConfig streamsConfig = new StreamsConfig(props); final Map consumerConfigs = streamsConfig.getMainConsumerConfigs(groupId, clientId, threadIdx); assertThat(consumerConfigs.get("internal.throw.on.fetch.stable.offset.unsupported"), is(nullValue())); } @@ -555,6 +560,7 @@ public void shouldNotSetInternalThrowOnFetchStableOffsetUnsupportedConfigToFalse @Test public void shouldNotSetInternalAutoDowngradeTxnCommitToTrueInProducerForEosDisabled() { + final StreamsConfig streamsConfig = new StreamsConfig(props); final Map producerConfigs = streamsConfig.getProducerConfigs(clientId); assertThat(producerConfigs.get("internal.auto.downgrade.txn.commit"), is(nullValue())); } @@ -1002,6 +1008,7 @@ private void shouldThrowConfigExceptionIfMaxInFlightRequestsPerConnectionIsInval @Test public void shouldStateDirStartsWithJavaIOTmpDir() { + final StreamsConfig streamsConfig = new StreamsConfig(props); final String expectedPrefix = System.getProperty("java.io.tmpdir") + File.separator; final String actual = streamsConfig.getString(STATE_DIR_CONFIG); assertTrue(actual.startsWith(expectedPrefix)); @@ -1009,8 +1016,9 @@ public void shouldStateDirStartsWithJavaIOTmpDir() { @Test public void shouldSpecifyNoOptimizationWhenNotExplicitlyAddedToConfigs() { - final String expectedOptimizeConfig = "none"; + final StreamsConfig streamsConfig = new StreamsConfig(props); final String actualOptimizedConifig = streamsConfig.getString(TOPOLOGY_OPTIMIZATION_CONFIG); + final String expectedOptimizeConfig = "none"; assertEquals("Optimization should be \"none\"", expectedOptimizeConfig, actualOptimizedConifig); } @@ -1032,7 +1040,8 @@ public void shouldThrowConfigExceptionWhenOptimizationConfigNotValueInRange() { @Test public void shouldSpecifyRocksdbWhenNotExplicitlyAddedToConfigs() { final String expectedDefaultStoreType = StreamsConfig.ROCKS_DB; - final String actualDefaultStoreType = streamsConfig.getString(DEFAULT_DSL_STORE_CONFIG); + final StreamsConfig config = new StreamsConfig(props); + final String actualDefaultStoreType = config.getString(DEFAULT_DSL_STORE_CONFIG); assertEquals("default.dsl.store should be \"rocksDB\"", expectedDefaultStoreType, actualDefaultStoreType); } @@ -1053,55 +1062,65 @@ public void shouldThrowConfigExceptionWhenStoreTypeConfigNotValueInRange() { @SuppressWarnings("deprecation") @Test - public void shouldLogWarningWhenEosAlphaIsUsed() { - props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE); + public void shouldLogWarningWhenEosAlphaIsUsed() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldLogWarningWhenEosAlphaIsUsed", + Collections.singletonMap(StreamsConfig.class.getName(), "DEBUG") + )) { + logCaptureContext.setLatch(3); - LogCaptureAppender.setClassLoggerToDebug(StreamsConfig.class); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StreamsConfig.class)) { + props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE); new StreamsConfig(props); + logCaptureContext.await(); assertThat( - appender.getMessages(), - hasItem("Configuration parameter `" + StreamsConfig.EXACTLY_ONCE + - "` is deprecated and will be removed in the 4.0.0 release. " + - "Please use `" + StreamsConfig.EXACTLY_ONCE_V2 + "` instead. " + - "Note that this requires broker version 2.5+ so you should prepare " + - "to upgrade your brokers if necessary.") + logCaptureContext.getMessages(), + hasItem("WARN Configuration parameter `" + StreamsConfig.EXACTLY_ONCE + + "` is deprecated and will be removed in the 4.0.0 release. " + + "Please use `" + StreamsConfig.EXACTLY_ONCE_V2 + "` instead. " + + "Note that this requires broker version 2.5+ so you should prepare " + + "to upgrade your brokers if necessary. ") ); } } @SuppressWarnings("deprecation") @Test - public void shouldLogWarningWhenEosBetaIsUsed() { - props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_BETA); + public void shouldLogWarningWhenEosBetaIsUsed() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldLogWarningWhenEosBetaIsUsed", + Collections.singletonMap(StreamsConfig.class.getName(), "DEBUG") + )) { + logCaptureContext.setLatch(3); - LogCaptureAppender.setClassLoggerToDebug(StreamsConfig.class); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StreamsConfig.class)) { + props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_BETA); new StreamsConfig(props); + logCaptureContext.await(); assertThat( - appender.getMessages(), - hasItem("Configuration parameter `" + StreamsConfig.EXACTLY_ONCE_BETA + - "` is deprecated and will be removed in the 4.0.0 release. " + - "Please use `" + StreamsConfig.EXACTLY_ONCE_V2 + "` instead.") + logCaptureContext.getMessages(), + hasItem("WARN Configuration parameter `" + StreamsConfig.EXACTLY_ONCE_BETA + + "` is deprecated and will be removed in the 4.0.0 release. " + + "Please use `" + StreamsConfig.EXACTLY_ONCE_V2 + "` instead. ") ); } } @SuppressWarnings("deprecation") @Test - public void shouldLogWarningWhenRetriesIsUsed() { - props.put(StreamsConfig.RETRIES_CONFIG, 0); + public void shouldLogWarningWhenRetriesIsUsed() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogWarningWhenRetriesIsUsed")) { + logCaptureContext.setLatch(1); - LogCaptureAppender.setClassLoggerToDebug(StreamsConfig.class); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StreamsConfig.class)) { + props.put(StreamsConfig.RETRIES_CONFIG, 0); new StreamsConfig(props); + logCaptureContext.await(); assertThat( - appender.getMessages(), - hasItem("Configuration parameter `" + StreamsConfig.RETRIES_CONFIG + - "` is deprecated and will be removed in the 4.0.0 release.") + logCaptureContext.getMessages(), + hasItem("WARN Configuration parameter `" + StreamsConfig.RETRIES_CONFIG + + "` is deprecated and will be removed in the 4.0.0 release. ") ); } } @@ -1285,7 +1304,7 @@ public void shouldUseDefaultStateStoreCacheMaxBytesConfigWhenNoConfigIsSet() { static class MisconfiguredSerde implements Serde { @Override - public void configure(final Map configs, final boolean isKey) { + public void configure(final Map configs, final boolean isKey) { throw new RuntimeException("boom"); } diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java index f996238fe1d66..747afaeeb3221 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java @@ -31,8 +31,8 @@ import org.apache.kafka.streams.kstream.Transformer; import org.apache.kafka.streams.processor.ProcessorContext; import org.apache.kafka.streams.processor.PunctuationType; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.test.IntegrationTest; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.TestUtils; import java.util.concurrent.atomic.AtomicBoolean; @@ -63,7 +63,10 @@ import static org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName; import static org.apache.kafka.test.TestUtils.waitForCondition; +import static org.hamcrest.CoreMatchers.both; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.MatcherAssert.assertThat; @@ -71,7 +74,6 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; @Category(IntegrationTest.class) public class AdjustStreamThreadCountTest { @@ -377,22 +379,22 @@ public void shouldResizeCacheAfterThreadRemovalTimesOut() throws InterruptedExce props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2); props.put(StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG, totalCacheBytes); - try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldResizeCacheAfterThreadRemovalTimesOut"); + final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) { + logCaptureContext.setLatch(20); + addStreamStateChangeListener(kafkaStreams); startStreamsAndWaitForRunning(kafkaStreams); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KafkaStreams.class)) { - assertThrows(TimeoutException.class, () -> kafkaStreams.removeStreamThread(Duration.ofSeconds(0))); + assertThrows(TimeoutException.class, () -> kafkaStreams.removeStreamThread(Duration.ofSeconds(0))); - for (final String log : appender.getMessages()) { - // all 10 bytes should be available for remaining thread - if (log.contains("Resizing thread cache/max buffer size due to removal of thread ") && log.contains(", new cache size/max buffer size per thread is 10/536870912")) { - return; - } - } - } + logCaptureContext.await(); + // all 10 bytes should be available for remaining thread + assertThat(logCaptureContext.getMessages(), + hasItems(both(containsString("Resizing thread cache/max buffer size due to removal of thread ")) + .and(containsString(", new cache size/max buffer size per thread is 10/536870912")))); } - fail(); } @Test @@ -403,21 +405,21 @@ public void shouldResizeMaxBufferAfterThreadRemovalTimesOut() throws Interrupted props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2); props.put(StreamsConfig.INPUT_BUFFER_MAX_BYTES_CONFIG, maxBufferBytes); - try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldResizeMaxBufferAfterThreadRemovalTimesOut"); + final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) { + logCaptureContext.setLatch(20); addStreamStateChangeListener(kafkaStreams); startStreamsAndWaitForRunning(kafkaStreams); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KafkaStreams.class)) { - assertThrows(TimeoutException.class, () -> kafkaStreams.removeStreamThread(Duration.ofSeconds(0))); - for (final String log : appender.getMessages()) { - // all 10 bytes should be available for remaining thread - if (log.contains("Resizing thread cache/max buffer size due to removal of thread ") && log.contains(", new cache size/max buffer size per thread is 10485760/10")) { - return; - } - } - } + assertThrows(TimeoutException.class, () -> kafkaStreams.removeStreamThread(Duration.ofSeconds(0))); + + logCaptureContext.await(); + // all 10 bytes should be available for remaining thread + assertThat(logCaptureContext.getMessages(), + hasItems(both(containsString("Resizing thread cache/max buffer size due to removal of thread ")) + .and(containsString(", new cache size/max buffer size per thread is 10485760/10")))); } - fail(); } @Test @@ -436,47 +438,100 @@ public void shouldResizeCacheAndInputBufferAfterThreadReplacement() throws Inter final StreamsBuilder builder = new StreamsBuilder(); final KStream stream = builder.stream(inputTopic); stream.transform(() -> new Transformer>() { - @Override - public void init(final ProcessorContext context) { - context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, timestamp -> { - if (Thread.currentThread().getName().endsWith("StreamThread-1") && injectError.get()) { - injectError.set(false); - throw new RuntimeException("BOOM"); - } - }); - } + @Override + public void init(final ProcessorContext context) { + context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, timestamp -> { + if (Thread.currentThread().getName().endsWith("StreamThread-1") && injectError.get()) { + injectError.set(false); + throw new RuntimeException("BOOM"); + } + }); + } + + @Override + public KeyValue transform(final String key, final String value) { + return new KeyValue<>(key, value); + } - @Override - public KeyValue transform(final String key, final String value) { - return new KeyValue<>(key, value); - } + @Override + public void close() { + } + }); - @Override - public void close() { - } - }); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldResizeCacheAfterThreadReplacement"); + final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) { + logCaptureContext.setLatch(20); - try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) { addStreamStateChangeListener(kafkaStreams); kafkaStreams.setUncaughtExceptionHandler(e -> StreamThreadExceptionResponse.REPLACE_THREAD); startStreamsAndWaitForRunning(kafkaStreams); stateTransitionHistory.clear(); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { - injectError.set(true); - waitForCondition(() -> !injectError.get(), "StreamThread did not hit and reset the injected error"); + injectError.set(true); + waitForCondition(() -> !injectError.get(), "StreamThread did not hit and reset the injected error"); + + waitForTransitionFromRebalancingToRunning(); + + logCaptureContext.await(); + // after we replace the thread there should be two remaining threads with 5 bytes each for + // the cache and 50 for the input buffer + assertThat(logCaptureContext.getMessages(), + hasItems(containsString("Adding StreamThread-3, there are now 2 threads with cache size/max buffer size values as 5/50 per thread. "))); + } + } + + @Test + @SuppressWarnings("deprecation") + public void shouldResizeMaxBufferAfterThreadReplacement() throws InterruptedException { + final long totalCacheBytes = 10L; + final Properties props = new Properties(); + props.putAll(properties); + props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2); + props.put(StreamsConfig.INPUT_BUFFER_MAX_BYTES_CONFIG, totalCacheBytes); - waitForTransitionFromRebalancingToRunning(); + final AtomicBoolean injectError = new AtomicBoolean(false); - for (final String log : appender.getMessages()) { - // after we replace the thread there should be two remaining threads with 5 bytes each for - // the cache and 50 for the input buffer - if (log.endsWith("Adding StreamThread-3, there are now 2 threads with cache size/max buffer size values as 5/50 per thread.")) { - return; + final StreamsBuilder builder = new StreamsBuilder(); + final KStream stream = builder.stream(inputTopic); + stream.transform(() -> new Transformer>() { + @Override + public void init(final ProcessorContext context) { + context.schedule(Duration.ofSeconds(1), PunctuationType.WALL_CLOCK_TIME, timestamp -> { + if (Thread.currentThread().getName().endsWith("StreamThread-1") && injectError.get()) { + injectError.set(false); + throw new RuntimeException("BOOM"); } - } + }); + } + + @Override + public KeyValue transform(final String key, final String value) { + return new KeyValue<>(key, value); } + + @Override + public void close() { + } + }); + + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldResizeMaxBufferAfterThreadReplacement"); + final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) { + logCaptureContext.setLatch(20); + addStreamStateChangeListener(kafkaStreams); + kafkaStreams.setUncaughtExceptionHandler(e -> StreamThreadExceptionResponse.REPLACE_THREAD); + startStreamsAndWaitForRunning(kafkaStreams); + + stateTransitionHistory.clear(); + injectError.set(true); + waitForCondition(() -> !injectError.get(), "StreamThread did not hit and reset the injected error"); + + waitForTransitionFromRebalancingToRunning(); + logCaptureContext.await(); + // all 10 bytes should be available for remaining thread + assertThat(logCaptureContext.getMessages(), + hasItems(containsString("Adding StreamThread-3, there are now 2 threads with cache size/max buffer size values as 5242880/5 per thread. "))); } - fail(); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImplTest.java index 354fbcac3189d..39fdb37a868f5 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImplTest.java @@ -35,10 +35,10 @@ import org.apache.kafka.streams.kstream.TimeWindows; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.Windows; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.SessionStore; import org.apache.kafka.streams.state.ValueAndTimestamp; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockAggregator; import org.apache.kafka.test.MockApiProcessorSupplier; import org.apache.kafka.test.MockInitializer; @@ -581,15 +581,15 @@ public void shouldCountAndMaterializeResults() { public void shouldLogAndMeasureSkipsInAggregate() { groupedStream.count(Materialized.>as("count").withKeySerde(Serdes.String())); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamAggregate.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeasureSkipsInAggregate"); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); assertThat( - appender.getMessages(), - hasItem("Skipping record due to null key or value. topic=[topic] partition=[0] " - + "offset=[6]") + logCaptureContext.getMessages(), + hasItem("WARN Skipping record due to null key or value. topic=[topic] partition=[0] offset=[6] ") ); } } @@ -631,15 +631,15 @@ public void shouldLogAndMeasureSkipsInReduce() { .withValueSerde(Serdes.String()) ); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamReduce.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeasureSkipsInReduce"); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); assertThat( - appender.getMessages(), - hasItem("Skipping record due to null key or value. topic=[topic] partition=[0] " - + "offset=[6]") + logCaptureContext.getMessages(), + hasItem("WARN Skipping record due to null key or value. topic=[topic] partition=[0] offset=[6] ") ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java index d4f716df0843c..f18198d72adcd 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java @@ -33,10 +33,10 @@ import org.apache.kafka.streams.kstream.StreamJoined; import org.apache.kafka.streams.processor.internals.InternalTopicConfig; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.TestInputTopic; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.state.WindowBytesStoreSupplier; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockApiProcessor; import org.apache.kafka.test.MockApiProcessorSupplier; import org.apache.kafka.test.MockValueJoiner; @@ -74,7 +74,7 @@ public class KStreamKStreamJoinTest { private final String errorMessagePrefix = "Window settings mismatch. WindowBytesStoreSupplier settings"; @Test - public void shouldLogAndMeterOnSkippedRecordsWithNullValueWithBuiltInMetricsVersionLatest() { + public void shouldLogAndMeterOnSkippedRecordsWithNullValueWithBuiltInMetricsVersionLatest() throws InterruptedException { final StreamsBuilder builder = new StreamsBuilder(); final KStream left = builder.stream("left", Consumed.with(Serdes.String(), Serdes.Integer())); @@ -89,17 +89,20 @@ public void shouldLogAndMeterOnSkippedRecordsWithNullValueWithBuiltInMetricsVers props.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, StreamsConfig.METRICS_LATEST); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamKStreamJoin.class); - final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogAndMeterOnSkippedRecordsWithNullValueWithBuiltInMetricsVersionLatest")) { + logCaptureContext.setLatch(6); + + final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props); final TestInputTopic inputTopic = driver.createInputTopic("left", new StringSerializer(), new IntegerSerializer()); inputTopic.pipeInput("A", null); + logCaptureContext.await(); assertThat( - appender.getMessages(), - hasItem("Skipping record due to null key or value. topic=[left] partition=[0] offset=[0]") - ); + logCaptureContext.getMessages(), + hasItem("WARN Skipping record due to null key or value. topic=[left] partition=[0] offset=[0] ")); } } @@ -275,7 +278,10 @@ public void shouldThrowExceptionWhenOtherJoinStoreSetsRetainDuplicatesFalse() { @Test public void shouldBuildJoinWithCustomStoresAndCorrectWindowSettings() { - //Case where everything matches up + // Case where everything matches up + final WindowBytesStoreSupplier thisStoreSupplier = buildWindowBytesStoreSupplier("in-memory-join-store", 150, 100, true); + final WindowBytesStoreSupplier otherStoreSupplier = buildWindowBytesStoreSupplier("in-memory-join-store-other", 150, 100, true); + final StreamsBuilder builder = new StreamsBuilder(); final KStream left = builder.stream("left", Consumed.with(Serdes.String(), Serdes.Integer())); final KStream right = builder.stream("right", Consumed.with(Serdes.String(), Serdes.Integer())); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoinTest.java index d162acfa374ba..1e12b006dbc8d 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoinTest.java @@ -43,7 +43,7 @@ import org.apache.kafka.streams.kstream.Joined; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.KTable; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockApiProcessor; import org.apache.kafka.test.MockApiProcessorSupplier; import org.apache.kafka.test.MockValueJoiner; @@ -228,34 +228,38 @@ public void shouldClearTableEntryOnNullValueUpdates() { @Test public void shouldLogAndMeterWhenSkippingNullLeftKey() { - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamKTableJoin.class)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogAndMeterWhenSkippingNullLeftKey")) { final TestInputTopic inputTopic = driver.createInputTopic(streamTopic, new IntegerSerializer(), new StringSerializer()); inputTopic.pipeInput(null, "A"); assertThat( - appender.getMessages(), - hasItem("Skipping record due to null join key or value. topic=[streamTopic] partition=[0] " - + "offset=[0]")); + logCaptureContext.getMessages(), + hasItem("WARN Skipping record due to null join key or value. topic=[streamTopic] partition=[0] " + + "offset=[0] ") + ); } } @Test - public void shouldLogAndMeterWhenSkippingNullLeftValue() { - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamKTableJoin.class)) { + public void shouldLogAndMeterWhenSkippingNullLeftValue() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogAndMeterWhenSkippingNullLeftValue")) { + logCaptureContext.setLatch(5); final TestInputTopic inputTopic = driver.createInputTopic(streamTopic, new IntegerSerializer(), new StringSerializer()); inputTopic.pipeInput(1, null); + logCaptureContext.await(); assertThat( - appender.getMessages(), - hasItem("Skipping record due to null join key or value. topic=[streamTopic] partition=[0] " - + "offset=[0]") + logCaptureContext.getMessages(), + hasItem("WARN Skipping record due to null join key or value. topic=[streamTopic] partition=[0] " + + "offset=[0] ") ); } } - private final String expectedTopologyWithGeneratedRepartitionTopicNames = "Topologies:\n" + " Sub-topology: 0\n" diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregateProcessorTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregateProcessorTest.java index 21c6e6af1228c..fd173f8ec73e5 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregateProcessorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregateProcessorTest.java @@ -37,14 +37,13 @@ import org.apache.kafka.streams.processor.internals.metrics.TaskMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender.Event; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.SessionStore; import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.state.internals.ThreadCache; import org.apache.kafka.test.InternalMockProcessorContext; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockRecordCollector; import org.apache.kafka.test.StreamsTestUtils; import org.apache.kafka.test.TestUtils; @@ -55,7 +54,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.stream.Collectors; import static java.time.Duration.ofMillis; import static org.apache.kafka.common.utils.Utils.mkEntry; @@ -373,17 +371,14 @@ public void shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics() { new ProcessorRecordContext(-1, -2, -3, "topic", new RecordHeaders()) ); - try (final LogCaptureAppender appender = - LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics")) { processor.process(new Record<>(null, "1", 0L)); assertThat( - appender.getEvents().stream() - .filter(e -> e.getLevel().equals("WARN")) - .map(Event::getMessage) - .collect(Collectors.toList()), - hasItem("Skipping record due to null key. topic=[topic] partition=[-3] offset=[-2]") + logCaptureContext.getMessages(), + hasItem("WARN Skipping record due to null key. topic=[topic] partition=[-3] offset=[-2] ") ); } @@ -417,17 +412,17 @@ public void shouldLogAndMeterWhenSkippingLateRecordWithZeroGrace() { context.setRecordContext(new ProcessorRecordContext(11, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("dummy", "dummy", 11L)); - try (final LogCaptureAppender appender = - LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingLateRecordWithZeroGrace")) { // record is late context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("Late1", "1", 0L)); assertThat( - appender.getMessages(), - hasItem("Skipping record for expired window." + - " topic=[topic] partition=[-3] offset=[-2] timestamp=[0] window=[0,0] expiration=[1] streamTime=[11]") + logCaptureContext.getMessages(), + hasItem("WARN Skipping record for expired window." + + " topic=[topic] partition=[-3] offset=[-2] timestamp=[0] window=[0,0] expiration=[1] streamTime=[11] ") ); } @@ -470,8 +465,8 @@ public void shouldLogAndMeterWhenSkippingLateRecordWithNonzeroGrace() { ).get(); processor.init(context); - try (final LogCaptureAppender appender = - LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingLateRecordWithNonzeroGrace")) { // dummy record to establish stream time = 0 context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); @@ -498,9 +493,9 @@ public void shouldLogAndMeterWhenSkippingLateRecordWithNonzeroGrace() { processor.process(new Record<>("Late1", "1", 0L)); assertThat( - appender.getMessages(), - hasItem("Skipping record for expired window." + - " topic=[topic] partition=[-3] offset=[-2] timestamp=[0] window=[0,0] expiration=[1] streamTime=[12]") + logCaptureContext.getMessages(), + hasItem("WARN Skipping record for expired window." + + " topic=[topic] partition=[-3] offset=[-2] timestamp=[0] window=[0,0] expiration=[1] streamTime=[12] ") ); } diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java index b227c7183381d..6269bba4b2816 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java @@ -34,8 +34,6 @@ import org.apache.kafka.streams.kstream.SlidingWindows; import org.apache.kafka.streams.kstream.TimeWindowedDeserializer; import org.apache.kafka.streams.kstream.Windowed; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender.Event; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.state.ValueAndTimestamp; @@ -46,6 +44,7 @@ import org.apache.kafka.streams.state.internals.InMemoryWindowBytesStoreSupplier; import org.apache.kafka.streams.state.internals.InMemoryWindowStore; import org.apache.kafka.streams.test.TestRecord; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockAggregator; import org.apache.kafka.test.MockApiProcessor; import org.apache.kafka.test.MockApiProcessorSupplier; @@ -68,7 +67,6 @@ import java.util.Map; import java.util.Properties; import java.util.Random; -import java.util.stream.Collectors; import static java.time.Duration.ofMillis; import static java.util.Arrays.asList; @@ -1003,7 +1001,7 @@ public void testNoGracePeriodLargeInput() { } @Test - public void shouldLogAndMeterWhenSkippingNullKey() { + public void shouldLogAndMeterWhenSkippingNullKey() throws InterruptedException { final String builtInMetricsVersion = StreamsConfig.METRICS_LATEST; final StreamsBuilder builder = new StreamsBuilder(); final String topic = "topic"; @@ -1014,24 +1012,19 @@ public void shouldLogAndMeterWhenSkippingNullKey() { .aggregate(MockInitializer.STRING_INIT, MockAggregator.toStringInstance("+"), Materialized.>as("topic1-Canonicalized").withValueSerde(Serdes.String())); props.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, builtInMetricsVersion); - - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSlidingWindowAggregate.class); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingNullKey"); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { + logCaptureContext.setLatch(1); final TestInputTopic inputTopic = driver.createInputTopic(topic, new StringSerializer(), new StringSerializer()); inputTopic.pipeInput(null, "1"); - assertThat( - appender.getEvents().stream() - .filter(e -> e.getLevel().equals("WARN")) - .map(Event::getMessage) - .collect(Collectors.toList()), - hasItem("Skipping record due to null key or value. topic=[topic] partition=[0] offset=[0]") - ); + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), hasItem("WARN Skipping record due to null key or value. topic=[topic] partition=[0] offset=[0] ")); } } @Test - public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() { + public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() throws InterruptedException { final String builtInMetricsVersion = StreamsConfig.METRICS_LATEST; final StreamsBuilder builder = new StreamsBuilder(); final String topic = "topic"; @@ -1053,8 +1046,9 @@ public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() { props.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, builtInMetricsVersion); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSlidingWindowAggregate.class); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingNullKey"); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { + logCaptureContext.setLatch(1); final TestInputTopic inputTopic = driver.createInputTopic(topic, new StringSerializer(), new StringSerializer()); @@ -1069,21 +1063,22 @@ public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() { assertLatenessMetrics(driver, is(7.0), is(185.0), is(96.25)); - assertThat(appender.getMessages(), hasItems( + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), hasItems( // left window for k@100 - "Skipping record for expired window. topic=[topic] partition=[0] offset=[1] timestamp=[100] window=[90,100] expiration=[110] streamTime=[200]", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[1] timestamp=[100] window=[90,100] expiration=[110] streamTime=[200] ", // left window for k@101 - "Skipping record for expired window. topic=[topic] partition=[0] offset=[2] timestamp=[101] window=[91,101] expiration=[110] streamTime=[200]", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[2] timestamp=[101] window=[91,101] expiration=[110] streamTime=[200] ", // left window for k@102 - "Skipping record for expired window. topic=[topic] partition=[0] offset=[3] timestamp=[102] window=[92,102] expiration=[110] streamTime=[200]", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[3] timestamp=[102] window=[92,102] expiration=[110] streamTime=[200] ", // left window for k@103 - "Skipping record for expired window. topic=[topic] partition=[0] offset=[4] timestamp=[103] window=[93,103] expiration=[110] streamTime=[200]", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[4] timestamp=[103] window=[93,103] expiration=[110] streamTime=[200] ", // left window for k@104 - "Skipping record for expired window. topic=[topic] partition=[0] offset=[5] timestamp=[104] window=[94,104] expiration=[110] streamTime=[200]", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[5] timestamp=[104] window=[94,104] expiration=[110] streamTime=[200] ", // left window for k@105 - "Skipping record for expired window. topic=[topic] partition=[0] offset=[6] timestamp=[105] window=[95,105] expiration=[110] streamTime=[200]", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[6] timestamp=[105] window=[95,105] expiration=[110] streamTime=[200] ", // left window for k@15 - "Skipping record for expired window. topic=[topic] partition=[0] offset=[7] timestamp=[15] window=[5,15] expiration=[110] streamTime=[200]" + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[7] timestamp=[15] window=[5,15] expiration=[110] streamTime=[200] " )); final TestOutputTopic, String> outputTopic = driver.createOutputTopic("output", new TimeWindowedDeserializer<>(new StringDeserializer(), 10L), new StringDeserializer()); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java index 750f7f508bfea..d2f9f58d56d5f 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java @@ -33,10 +33,10 @@ import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.TimeWindows; import org.apache.kafka.streams.kstream.Windowed; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.state.WindowStore; import org.apache.kafka.streams.TestInputTopic; import org.apache.kafka.streams.test.TestRecord; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockAggregator; import org.apache.kafka.test.MockApiProcessor; import org.apache.kafka.test.MockApiProcessorSupplier; @@ -278,14 +278,15 @@ public void shouldLogAndMeterWhenSkippingNullKey() { Materialized.>as("topic1-Canonicalized").withValueSerde(Serdes.String()) ); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamWindowAggregate.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingNullKey"); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic = driver.createInputTopic(topic, new StringSerializer(), new StringSerializer()); inputTopic.pipeInput(null, "1"); - assertThat(appender.getMessages(), hasItem("Skipping record due to null key. topic=[topic] partition=[0] offset=[0]")); + assertThat(logCaptureContext.getMessages(), hasItem("WARN Skipping record due to null key. topic=[topic] partition=[0] offset=[0] ")); } } @@ -310,7 +311,8 @@ public void shouldLogAndMeterWhenSkippingExpiredWindow() { .map((key, value) -> new KeyValue<>(key.toString(), value)) .to("output"); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamWindowAggregate.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingExpiredWindow"); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic = @@ -331,14 +333,14 @@ public void shouldLogAndMeterWhenSkippingExpiredWindow() { is(84.875) // (0 + 100 + 99 + 98 + 97 + 96 + 95 + 94) / 8 ); - assertThat(appender.getMessages(), hasItems( - "Skipping record for expired window. topic=[topic] partition=[0] offset=[1] timestamp=[0] window=[0,10) expiration=[10] streamTime=[100]", - "Skipping record for expired window. topic=[topic] partition=[0] offset=[2] timestamp=[1] window=[0,10) expiration=[10] streamTime=[100]", - "Skipping record for expired window. topic=[topic] partition=[0] offset=[3] timestamp=[2] window=[0,10) expiration=[10] streamTime=[100]", - "Skipping record for expired window. topic=[topic] partition=[0] offset=[4] timestamp=[3] window=[0,10) expiration=[10] streamTime=[100]", - "Skipping record for expired window. topic=[topic] partition=[0] offset=[5] timestamp=[4] window=[0,10) expiration=[10] streamTime=[100]", - "Skipping record for expired window. topic=[topic] partition=[0] offset=[6] timestamp=[5] window=[0,10) expiration=[10] streamTime=[100]", - "Skipping record for expired window. topic=[topic] partition=[0] offset=[7] timestamp=[6] window=[0,10) expiration=[10] streamTime=[100]" + assertThat(logCaptureContext.getMessages(), hasItems( + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[1] timestamp=[0] window=[0,10) expiration=[10] streamTime=[100] ", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[2] timestamp=[1] window=[0,10) expiration=[10] streamTime=[100] ", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[3] timestamp=[2] window=[0,10) expiration=[10] streamTime=[100] ", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[4] timestamp=[3] window=[0,10) expiration=[10] streamTime=[100] ", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[5] timestamp=[4] window=[0,10) expiration=[10] streamTime=[100] ", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[6] timestamp=[5] window=[0,10) expiration=[10] streamTime=[100] ", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[7] timestamp=[6] window=[0,10) expiration=[10] streamTime=[100] " )); final TestOutputTopic outputTopic = @@ -369,7 +371,8 @@ public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() { .map((key, value) -> new KeyValue<>(key.toString(), value)) .to("output"); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamWindowAggregate.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingExpiredWindowByGrace"); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic = @@ -385,14 +388,14 @@ public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() { assertLatenessMetrics(driver, is(7.0), is(194.0), is(97.375)); - assertThat(appender.getMessages(), hasItems( - "Skipping record for expired window. topic=[topic] partition=[0] offset=[1] timestamp=[100] window=[100,110) expiration=[110] streamTime=[200]", - "Skipping record for expired window. topic=[topic] partition=[0] offset=[2] timestamp=[101] window=[100,110) expiration=[110] streamTime=[200]", - "Skipping record for expired window. topic=[topic] partition=[0] offset=[3] timestamp=[102] window=[100,110) expiration=[110] streamTime=[200]", - "Skipping record for expired window. topic=[topic] partition=[0] offset=[4] timestamp=[103] window=[100,110) expiration=[110] streamTime=[200]", - "Skipping record for expired window. topic=[topic] partition=[0] offset=[5] timestamp=[104] window=[100,110) expiration=[110] streamTime=[200]", - "Skipping record for expired window. topic=[topic] partition=[0] offset=[6] timestamp=[105] window=[100,110) expiration=[110] streamTime=[200]", - "Skipping record for expired window. topic=[topic] partition=[0] offset=[7] timestamp=[6] window=[0,10) expiration=[110] streamTime=[200]" + assertThat(logCaptureContext.getMessages(), hasItems( + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[1] timestamp=[100] window=[100,110) expiration=[110] streamTime=[200] ", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[2] timestamp=[101] window=[100,110) expiration=[110] streamTime=[200] ", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[3] timestamp=[102] window=[100,110) expiration=[110] streamTime=[200] ", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[4] timestamp=[103] window=[100,110) expiration=[110] streamTime=[200] ", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[5] timestamp=[104] window=[100,110) expiration=[110] streamTime=[200] ", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[6] timestamp=[105] window=[100,110) expiration=[110] streamTime=[200] ", + "WARN Skipping record for expired window. topic=[topic] partition=[0] offset=[7] timestamp=[6] window=[0,10) expiration=[110] streamTime=[200] " )); final TestOutputTopic outputTopic = diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java index 1039cb70ddfde..3154fa665bd34 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java @@ -30,9 +30,9 @@ import org.apache.kafka.streams.processor.api.MockProcessorContext; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.test.TestRecord; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockApiProcessor; import org.apache.kafka.test.MockApiProcessorSupplier; import org.apache.kafka.test.MockValueJoiner; @@ -261,12 +261,13 @@ public void shouldLogAndMeterSkippedRecordsDueToNullLeftKey() { context.setRecordMetadata("left", -1, -2); join.init(context); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableKTableInnerJoin.class)) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterSkippedRecordsDueToNullLeftKey")) { join.process(new Record<>(null, new Change<>("new", "old"), 0)); assertThat( - appender.getMessages(), - hasItem("Skipping record due to null key. topic=[left] partition=[-1] offset=[-2]") + logCaptureContext.getMessages(), + hasItem("WARN Skipping record due to null key. topic=[left] partition=[-1] offset=[-2] ") ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java index 4af2e3934e131..61c9fbcc4bb75 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java @@ -34,9 +34,9 @@ import org.apache.kafka.streams.processor.api.MockProcessorContext; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.test.TestRecord; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockApiProcessor; import org.apache.kafka.test.MockApiProcessorSupplier; import org.apache.kafka.test.MockReducer; @@ -527,12 +527,13 @@ public void shouldLogAndMeterSkippedRecordsDueToNullLeftKey() { context.setRecordMetadata("left", -1, -2); join.init(context); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableKTableLeftJoin.class)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogAndMeterSkippedRecordsDueToNullLeftKey")) { join.process(new Record<>(null, new Change<>("new", "old"), 0)); assertThat( - appender.getMessages(), - hasItem("Skipping record due to null key. topic=[left] partition=[-1] offset=[-2]") + logCaptureContext.getMessages(), + hasItem("WARN Skipping record due to null key. topic=[left] partition=[-1] offset=[-2] ") ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableOuterJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableOuterJoinTest.java index b14973cc93476..d71bf8e65acee 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableOuterJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableOuterJoinTest.java @@ -29,8 +29,8 @@ import org.apache.kafka.streams.processor.api.MockProcessorContext; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.test.TestRecord; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockApiProcessor; import org.apache.kafka.test.MockApiProcessorSupplier; import org.apache.kafka.test.MockValueJoiner; @@ -418,12 +418,13 @@ public void shouldLogAndMeterSkippedRecordsDueToNullLeftKey() { context.setRecordMetadata("left", -1, -2); join.init(context); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableKTableOuterJoin.class)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogAndMeterSkippedRecordsDueToNullLeftKey")) { join.process(new Record<>(null, new Change<>("new", "old"), 0)); assertThat( - appender.getMessages(), - hasItem("Skipping record due to null key. topic=[left] partition=[-1] offset=[-2]") + logCaptureContext.getMessages(), + hasItem("WARN Skipping record due to null key. topic=[left] partition=[-1] offset=[-2] ") ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableRightJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableRightJoinTest.java index eaf74c8660ad1..5efba9ea1f993 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableRightJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableRightJoinTest.java @@ -23,13 +23,11 @@ import org.apache.kafka.streams.processor.api.MockProcessorContext; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.Record; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender.Event; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.StreamsTestUtils; import org.junit.Test; import java.util.Properties; -import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.MatcherAssert.assertThat; @@ -54,15 +52,14 @@ public void shouldLogAndMeterSkippedRecordsDueToNullLeftKeyWithBuiltInMetricsVer context.setRecordMetadata("left", -1, -2); join.init(context); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableKTableRightJoin.class)) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + + "#shouldLogAndMeterSkippedRecordsDueToNullLeftKeyWithBuiltInMetricsVersionLatest")) { join.process(new Record<>(null, new Change<>("new", "old"), 0)); assertThat( - appender.getEvents().stream() - .filter(e -> e.getLevel().equals("WARN")) - .map(Event::getMessage) - .collect(Collectors.toList()), - hasItem("Skipping record due to null key. topic=[left] partition=[-1] offset=[-2]") + logCaptureContext.getMessages(), + hasItem("WARN Skipping record due to null key. topic=[left] partition=[-1] offset=[-2] ") ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableSourceTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableSourceTest.java index 70e1bccdbe65e..d48c931e821d7 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableSourceTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableSourceTest.java @@ -33,10 +33,9 @@ import org.apache.kafka.streams.kstream.KTable; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender.Event; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.test.TestRecord; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockApiProcessor; import org.apache.kafka.test.MockApiProcessorSupplier; import org.apache.kafka.test.StreamsTestUtils; @@ -45,8 +44,8 @@ import java.time.Duration; import java.time.Instant; +import java.util.Collections; import java.util.Properties; -import java.util.stream.Collectors; import static java.util.Arrays.asList; import static org.apache.kafka.test.StreamsTestUtils.getMetricByName; @@ -136,7 +135,9 @@ public void kTableShouldLogAndMeterOnSkippedRecords() { final String topic = "topic"; builder.table(topic, stringConsumed); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableSource.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#kTableShouldLogAndMeterOnSkippedRecords", + Collections.singletonMap(KTableSource.class.getName(), "WARN")); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic = @@ -150,11 +151,8 @@ public void kTableShouldLogAndMeterOnSkippedRecords() { inputTopic.pipeInput(null, "value"); assertThat( - appender.getEvents().stream() - .filter(e -> e.getLevel().equals("WARN")) - .map(Event::getMessage) - .collect(Collectors.toList()), - hasItem("Skipping record due to null key. topic=[topic] partition=[0] offset=[0]") + logCaptureContext.getMessages(), + hasItem("WARN Skipping record due to null key. topic=[topic] partition=[0] offset=[0] ") ); } } @@ -165,7 +163,9 @@ public void kTableShouldLogOnOutOfOrder() { final String topic = "topic"; builder.table(topic, stringConsumed, Materialized.as("store")); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableSource.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#kTableShouldLogOnOutOfOrder", + Collections.singletonMap(KTableSource.class.getName(), "WARN")); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic = @@ -179,13 +179,8 @@ public void kTableShouldLogOnOutOfOrder() { inputTopic.pipeInput("key", "value", 10L); inputTopic.pipeInput("key", "value", 5L); - assertThat( - appender.getEvents().stream() - .filter(e -> e.getLevel().equals("WARN")) - .map(Event::getMessage) - .collect(Collectors.toList()), - hasItem("Detected out-of-order KTable update for store, old timestamp=[10] new timestamp=[5]. topic=[topic] partition=[0] offset=[1].") - ); + assertThat(logCaptureContext.getMessages(), + hasItem("WARN Detected out-of-order KTable update for store, old timestamp=[10] new timestamp=[5]. topic=[topic] partition=[0] offset=[1]. ")); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java index 0670fedb0aa63..da7059ec9982d 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java @@ -35,8 +35,8 @@ import org.apache.kafka.streams.state.TimestampedBytesStore; import org.apache.kafka.streams.state.internals.OffsetCheckpoint; import org.apache.kafka.streams.state.internals.WrappedStateStore; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.test.InternalMockProcessorContext; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockStateRestoreListener; import org.apache.kafka.test.NoOpReadOnlyStore; import org.apache.kafka.test.TestUtils; @@ -159,7 +159,7 @@ public void shouldReadCheckpointOffsets() throws IOException { } @Test - public void shouldLogWarningMessageWhenIOExceptionInCheckPoint() throws IOException { + public void shouldLogWarningMessageWhenIOExceptionInCheckPoint() throws IOException, InterruptedException { final Map offsets = Collections.singletonMap(t1, 25L); stateManager.initialize(); stateManager.updateChangelogOffsets(offsets); @@ -170,9 +170,12 @@ public void shouldLogWarningMessageWhenIOExceptionInCheckPoint() throws IOExcept file.createNewFile(); file.setWritable(false); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(GlobalStateManagerImpl.class)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogWarningMessageWhenIOExceptionInCheckPoint")) { + logCaptureContext.setLatch(2); stateManager.checkpoint(); - assertThat(appender.getMessages(), hasItem(containsString( + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), hasItem(containsString( "Failed to write offset checkpoint file to " + checkpointFile.getPath() + " for global stores"))); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java index 74cd6cfaa230c..6bfce8316f831 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java @@ -52,7 +52,7 @@ import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.errors.StreamsException; import org.apache.kafka.streams.processor.internals.InternalTopicManager.ValidationResult; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; +import org.apache.kafka.test.LogCaptureContext; import org.easymock.EasyMock; import org.junit.After; import org.junit.Before; @@ -73,6 +73,7 @@ import static org.apache.kafka.common.utils.Utils.mkSet; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasItem; @@ -85,6 +86,7 @@ import static org.junit.Assert.fail; public class InternalTopicManagerTest { + private final Node broker1 = new Node(0, "dummyHost-1", 1234); private final Node broker2 = new Node(1, "dummyHost-2", 1234); private final List cluster = new ArrayList(2) { @@ -777,32 +779,35 @@ public void shouldExhaustRetriesOnTimeoutExceptionForMakeReady() { } @Test - public void shouldLogWhenTopicNotFoundAndNotThrowException() { - mockAdminClient.addTopic( - false, - topic1, - Collections.singletonList(new TopicPartitionInfo(0, broker1, cluster, Collections.emptyList())), - null); + public void shouldLogWhenTopicNotFoundAndNotThrowException() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldLogWhenTopicNotFoundAndNotThrowException", + Collections.singletonMap(InternalTopicManager.class.getName(), "DEBUG"))) { + logCaptureContext.setLatch(4); + + mockAdminClient.addTopic( + false, + topic1, + Collections.singletonList(new TopicPartitionInfo(0, broker1, cluster, Collections.emptyList())), + null); - final InternalTopicConfig internalTopicConfig = new RepartitionTopicConfig(topic1, Collections.emptyMap()); - internalTopicConfig.setNumberOfPartitions(1); + final InternalTopicConfig internalTopicConfig = new RepartitionTopicConfig(topic1, Collections.emptyMap()); + internalTopicConfig.setNumberOfPartitions(1); - final InternalTopicConfig internalTopicConfigII = - new RepartitionTopicConfig("internal-topic", Collections.emptyMap()); - internalTopicConfigII.setNumberOfPartitions(1); + final InternalTopicConfig internalTopicConfigII = + new RepartitionTopicConfig("internal-topic", Collections.emptyMap()); + internalTopicConfigII.setNumberOfPartitions(1); - final Map topicConfigMap = new HashMap<>(); - topicConfigMap.put(topic1, internalTopicConfig); - topicConfigMap.put("internal-topic", internalTopicConfigII); + final Map topicConfigMap = new HashMap<>(); + topicConfigMap.put(topic1, internalTopicConfig); + topicConfigMap.put("internal-topic", internalTopicConfigII); - LogCaptureAppender.setClassLoggerToDebug(InternalTopicManager.class); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(InternalTopicManager.class)) { internalTopicManager.makeReady(topicConfigMap); + logCaptureContext.await(); assertThat( - appender.getMessages(), - hasItem("stream-thread [" + threadName + "] Topic internal-topic is unknown or not found, hence not existed yet.\n" + - "Error message was: org.apache.kafka.common.errors.UnknownTopicOrPartitionException: Topic internal-topic not found.") + logCaptureContext.getMessages(), + hasItem(containsString("Topic internal-topic is unknown or not found, hence not existed yet")) ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/PartitionGroupTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/PartitionGroupTest.java index 389d0d58c8d7f..93d39da9c43ff 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/PartitionGroupTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/PartitionGroupTest.java @@ -35,19 +35,18 @@ import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.errors.LogAndContinueExceptionHandler; import org.apache.kafka.streams.processor.TimestampExtractor; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.test.InternalMockProcessorContext; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockSourceNode; import org.apache.kafka.test.MockTimestampExtractor; -import org.hamcrest.Matchers; import org.junit.Test; import java.util.Arrays; import java.util.HashMap; +import java.util.Collections; import java.util.List; import java.util.OptionalLong; import java.util.UUID; -import java.util.Collections; import java.util.Optional; import static org.apache.kafka.common.utils.Utils.mkEntry; @@ -571,20 +570,16 @@ public void shouldNeverWaitIfIdlingIsDisabled() { group.addRawRecords(partition1, list1); assertThat(group.allPartitionsBufferedLocally(), is(false)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(PartitionGroup.class)) { - LogCaptureAppender.setClassLoggerToTrace(PartitionGroup.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldNeverWaitIfIdlingIsDisabled", + Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(0L), is(true)); assertThat( - appender.getEvents(), - hasItem(Matchers.allOf( - Matchers.hasProperty("level", equalTo("TRACE")), - Matchers.hasProperty("message", equalTo( - "[test] Ready for processing because max.task.idle.ms is disabled.\n" + - "\tThere may be out-of-order processing for this task as a result.\n" + - "\tBuffered partitions: [topic-1]\n" + - "\tNon-buffered partitions: [topic-2]" - )) - )) + logCaptureContext.getMessages(), + hasItem("TRACE [test] Ready for processing because max.task.idle.ms is disabled.\n" + + "\tThere may be out-of-order processing for this task as a result.\n" + + "\tBuffered partitions: [topic-1]\n" + + "\tNon-buffered partitions: [topic-2] ") ); } } @@ -615,15 +610,13 @@ public void shouldBeReadyIfAllPartitionsAreBuffered() { group.addRawRecords(partition2, list2); assertThat(group.allPartitionsBufferedLocally(), is(true)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(PartitionGroup.class)) { - LogCaptureAppender.setClassLoggerToTrace(PartitionGroup.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldBeReadyIfAllPartitionsAreBuffered", + Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(0L), is(true)); assertThat( - appender.getEvents(), - hasItem(Matchers.allOf( - Matchers.hasProperty("level", equalTo("TRACE")), - Matchers.hasProperty("message", equalTo("[test] All partitions were buffered locally, so this task is ready for processing.")) - )) + logCaptureContext.getMessages(), + hasItem("TRACE [test] All partitions were buffered locally, so this task is ready for processing. ") ); } } @@ -650,15 +643,13 @@ public void shouldWaitForFetchesWhenMetadataIsIncomplete() { group.addRawRecords(partition1, list1); assertThat(group.allPartitionsBufferedLocally(), is(false)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(PartitionGroup.class)) { - LogCaptureAppender.setClassLoggerToTrace(PartitionGroup.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldWaitForFetchesWhenMetadataIsIncomplete", + Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(0L), is(false)); assertThat( - appender.getEvents(), - hasItem(Matchers.allOf( - Matchers.hasProperty("level", equalTo("TRACE")), - Matchers.hasProperty("message", equalTo("[test] Waiting to fetch data for topic-2")) - )) + logCaptureContext.getMessages(), + hasItem("TRACE [test] Waiting to fetch data for topic-2 ") ); } lags.put(partition2, OptionalLong.of(0L)); @@ -690,15 +681,13 @@ public void shouldWaitForPollWhenLagIsNonzero() { assertThat(group.allPartitionsBufferedLocally(), is(false)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(PartitionGroup.class)) { - LogCaptureAppender.setClassLoggerToTrace(PartitionGroup.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldWaitForPollWhenLagIsNonzero", + Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(0L), is(false)); assertThat( - appender.getEvents(), - hasItem(Matchers.allOf( - Matchers.hasProperty("level", equalTo("TRACE")), - Matchers.hasProperty("message", equalTo("[test] Lag for topic-2 is currently 1, but no data is buffered locally. Waiting to buffer some records.")) - )) + logCaptureContext.getMessages(), + hasItem("TRACE [test] Lag for topic-2 is currently 1, but no data is buffered locally. Waiting to buffer some records. ") ); } } @@ -725,53 +714,43 @@ public void shouldIdleAsSpecifiedWhenLagIsZero() { assertThat(group.allPartitionsBufferedLocally(), is(false)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(PartitionGroup.class)) { - LogCaptureAppender.setClassLoggerToTrace(PartitionGroup.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldIdleAsSpecifiedWhenLagIsZero.1", + Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(0L), is(false)); assertThat( - appender.getEvents(), - hasItem(Matchers.allOf( - Matchers.hasProperty("level", equalTo("TRACE")), - Matchers.hasProperty("message", equalTo("[test] Lag for topic-2 is currently 0 and current time is 0. Waiting for new data to be produced for configured idle time 1 (deadline is 1).")) - )) + logCaptureContext.getMessages(), + hasItem("TRACE [test] Lag for topic-2 is currently 0 and current time is 0. Waiting for new data to be produced for configured idle time 1 (deadline is 1). ") ); } - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(PartitionGroup.class)) { - LogCaptureAppender.setClassLoggerToTrace(PartitionGroup.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldIdleAsSpecifiedWhenLagIsZero.2", + Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(1L), is(true)); assertThat( - appender.getEvents(), - hasItem(Matchers.allOf( - Matchers.hasProperty("level", equalTo("TRACE")), - Matchers.hasProperty("message", equalTo( - "[test] Continuing to process although some partitions are empty on the broker.\n" + - "\tThere may be out-of-order processing for this task as a result.\n" + - "\tPartitions with local data: [topic-1].\n" + - "\tPartitions we gave up waiting for, with their corresponding deadlines: {topic-2=1}.\n" + - "\tConfigured max.task.idle.ms: 1.\n" + - "\tCurrent wall-clock time: 1." - )) - )) + logCaptureContext.getMessages(), + hasItem("TRACE [test] Continuing to process although some partitions are empty on the broker.\n" + + "\tThere may be out-of-order processing for this task as a result.\n" + + "\tPartitions with local data: [topic-1].\n" + + "\tPartitions we gave up waiting for, with their corresponding deadlines: {topic-2=1}.\n" + + "\tConfigured max.task.idle.ms: 1.\n" + + "\tCurrent wall-clock time: 1. ") ); } - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(PartitionGroup.class)) { - LogCaptureAppender.setClassLoggerToTrace(PartitionGroup.class); + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldIdleAsSpecifiedWhenLagIsZero.3", + Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(2L), is(true)); assertThat( - appender.getEvents(), - hasItem(Matchers.allOf( - Matchers.hasProperty("level", equalTo("TRACE")), - Matchers.hasProperty("message", equalTo( - "[test] Continuing to process although some partitions are empty on the broker.\n" + - "\tThere may be out-of-order processing for this task as a result.\n" + - "\tPartitions with local data: [topic-1].\n" + - "\tPartitions we gave up waiting for, with their corresponding deadlines: {topic-2=1}.\n" + - "\tConfigured max.task.idle.ms: 1.\n" + - "\tCurrent wall-clock time: 2." - )) - )) + logCaptureContext.getMessages(), + hasItem("TRACE [test] Continuing to process although some partitions are empty on the broker.\n" + + "\tThere may be out-of-order processing for this task as a result.\n" + + "\tPartitions with local data: [topic-1].\n" + + "\tPartitions we gave up waiting for, with their corresponding deadlines: {topic-2=1}.\n" + + "\tConfigured max.task.idle.ms: 1.\n" + + "\tCurrent wall-clock time: 2. ") ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java index 5947842a74c13..f8bc4a605465c 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java @@ -31,12 +31,12 @@ import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.TaskId; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata; import org.apache.kafka.streams.query.Position; import org.apache.kafka.streams.state.TimestampedBytesStore; import org.apache.kafka.streams.state.internals.OffsetCheckpoint; import org.apache.kafka.streams.state.internals.StoreQueryUtils; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockKeyValueStore; import org.apache.kafka.test.MockRestoreCallback; import org.apache.kafka.test.TestUtils; @@ -75,9 +75,12 @@ import static org.easymock.EasyMock.replay; import static org.easymock.EasyMock.reset; import static org.easymock.EasyMock.verify; +import static org.hamcrest.CoreMatchers.allOf; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertEquals; @@ -780,30 +783,28 @@ public void shouldThrowIfRestoringUnregisteredStore() { @SuppressWarnings("OptionalGetWithoutIsPresent") @Test - public void shouldLogAWarningIfCheckpointThrowsAnIOException() { + public void shouldLogAWarningIfCheckpointThrowsAnIOException() throws InterruptedException { final ProcessorStateManager stateMgr = getStateManager(Task.TaskType.ACTIVE); stateMgr.registerStore(persistentStore, persistentStore.stateRestoreCallback, null); stateDirectory.clean(); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(ProcessorStateManager.class)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldLogAWarningIfCheckpointThrowsAnIOException")) { + logCaptureContext.setLatch(4); + stateMgr.updateChangelogOffsets(singletonMap(persistentStorePartition, 10L)); stateMgr.checkpoint(); - boolean foundExpectedLogMessage = false; - for (final LogCaptureAppender.Event event : appender.getEvents()) { - if ("WARN".equals(event.getLevel()) - && event.getMessage().startsWith("process-state-manager-test Failed to write offset checkpoint file to [") - && event.getMessage().endsWith(".checkpoint]." + - " This may occur if OS cleaned the state.dir in case when it located in ${java.io.tmpdir} directory." + - " This may also occur due to running multiple instances on the same machine using the same state dir." + - " Changing the location of state.dir may resolve the problem.") - && event.getThrowableInfo().get().startsWith("java.io.FileNotFoundException: ")) { - - foundExpectedLogMessage = true; - break; - } - } - assertTrue(foundExpectedLogMessage); + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), + hasItem( + allOf( + startsWith("WARN process-state-manager-test Failed to write offset checkpoint file to ["), + containsString(".checkpoint]." + + " This may occur if OS cleaned the state.dir in case when it located in ${java.io.tmpdir} directory." + + " This may also occur due to running multiple instances on the same machine using the same state dir." + + " Changing the location of state.dir may resolve the problem. "), + containsString("java.io.FileNotFoundException: ")))); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java index 48364f27db583..9dd8ccfad83d3 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java @@ -53,7 +53,7 @@ import org.apache.kafka.streams.processor.StreamPartitioner; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockClientSupplier; import java.util.UUID; @@ -729,47 +729,42 @@ public void shouldThrowStreamsExceptionOnSubsequentCloseIfFatalEvenWithContinueE } @Test - public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler() { - final RecordCollector collector = new RecordCollectorImpl( - logContext, - taskId, - getExceptionalStreamsProducerOnSend(new Exception()), - new AlwaysContinueProductionExceptionHandler(), - streamsMetrics - ); + public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler")) { + logCaptureContext.setLatch(1); - try (final LogCaptureAppender logCaptureAppender = - LogCaptureAppender.createAndRegister(RecordCollectorImpl.class)) { + final RecordCollector collector = new RecordCollectorImpl( + logContext, + taskId, + getExceptionalStreamsProducerOnSend(new Exception()), + new AlwaysContinueProductionExceptionHandler(), + streamsMetrics + ); collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner); collector.flush(); - final List messages = logCaptureAppender.getMessages(); - final StringBuilder errorMessage = new StringBuilder("Messages received:"); - for (final String error : messages) { - errorMessage.append("\n - ").append(error); - } - assertTrue( - errorMessage.toString(), - messages.get(messages.size() - 1) - .endsWith("Exception handler choose to CONTINUE processing in spite of this error but written offsets would not be recorded.") - ); - } - - final Metric metric = streamsMetrics.metrics().get(new MetricName( - "dropped-records-total", - "stream-task-metrics", - "The total number of dropped records", - mkMap( - mkEntry("thread-id", Thread.currentThread().getName()), - mkEntry("task-id", taskId.toString()) - ) - )); - assertEquals(1.0, metric.metricValue()); + logCaptureContext.await(); + final List messages = logCaptureContext.getMessages(); + assertTrue(messages.get(0) + .contains("Exception handler choose to CONTINUE processing in spite of this error but written offsets would not be recorded. ")); + + final Metric metric = streamsMetrics.metrics().get(new MetricName( + "dropped-records-total", + "stream-task-metrics", + "The total number of dropped records", + mkMap( + mkEntry("thread-id", Thread.currentThread().getName()), + mkEntry("task-id", taskId.toString()) + ) + )); + assertEquals(1.0, metric.metricValue()); - collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner); - collector.flush(); - collector.closeClean(); + collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner); + collector.flush(); + collector.closeClean(); + } } @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java index 81bc7d7562d28..6165b70c59869 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java @@ -32,8 +32,8 @@ import org.apache.kafka.streams.errors.ProcessorStateException; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.StateDirectory.TaskDirectory; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.state.internals.OffsetCheckpoint; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.TestUtils; import org.junit.After; @@ -74,6 +74,7 @@ import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -334,20 +335,24 @@ public void shouldCleanupStateDirectoriesWhenLastModifiedIsLessThanNowMinusClean } @Test - public void shouldCleanupObsoleteTaskDirectoriesAndDeleteTheDirectoryItself() { + public void shouldCleanupObsoleteTaskDirectoriesAndDeleteTheDirectoryItself() throws InterruptedException { final File dir = directory.getOrCreateDirectoryForTask(new TaskId(2, 0)); assertTrue(new File(dir, "store").mkdir()); assertEquals(1, directory.listAllTaskDirectories().size()); assertEquals(1, directory.listNonEmptyTaskDirectories().size()); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldCleanupObsoleteStateDirectoriesOnlyOnce.0")) { time.sleep(5000); + logCaptureContext.setLatch(1); directory.cleanRemovedTasks(0); + logCaptureContext.await(); assertFalse(dir.exists()); assertEquals(0, directory.listAllTaskDirectories().size()); assertEquals(0, directory.listNonEmptyTaskDirectories().size()); + logCaptureContext.await(); assertThat( - appender.getMessages(), + logCaptureContext.getMessages(), hasItem(containsString("Deleting obsolete state directory")) ); } @@ -493,11 +498,12 @@ public void shouldCleanupAllTaskDirectoriesIncludingGlobalOne() { @Test public void shouldNotCreateBaseDirectory() throws IOException { - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldNotCreateBaseDirectory")) { initializeStateDirectory(false, false); assertThat(stateDir.exists(), is(false)); assertThat(appDir.exists(), is(false)); - assertThat(appender.getMessages(), + assertThat(logCaptureContext.getMessages(), not(hasItem(containsString("Error changing permissions for the state or base directory")))); } } @@ -573,12 +579,14 @@ public void shouldNotDeleteAppDirWhenCleanUpIfNotEmpty() throws IOException { final File dummyFile = new File(appDir, "dummy"); assertTrue(dummyFile.createNewFile()); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldNotDeleteAppDirWhenCleanUpIfNotEmpty")) { + logCaptureContext.setLatch(4); // call StateDirectory#clean directory.clean(); assertThat( - appender.getMessages(), - hasItem(endsWith(String.format("Failed to delete state store directory of %s for it is not empty", appDir.getAbsolutePath()))) + logCaptureContext.getMessages(), + hasItem(containsString(String.format("Failed to delete state store directory of %s for it is not empty", appDir.getAbsolutePath()))) ); } } @@ -591,11 +599,14 @@ public void shouldLogManualUserCallMessage() { assertThat(testFile.mkdir(), is(true)); assertThat(directory.directoryForTaskIsEmpty(taskId), is(false)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogManualUserCallMessage")) { + logCaptureContext.setLatch(2); + directory.clean(); assertThat( - appender.getMessages(), - hasItem(endsWith("as user calling cleanup.")) + logCaptureContext.getMessages(), + hasItem(endsWith("as user calling cleanup. ")) ); } } @@ -608,17 +619,22 @@ public void shouldLogStateDirCleanerMessage() { assertThat(testFile.mkdir(), is(true)); assertThat(directory.directoryForTaskIsEmpty(taskId), is(false)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogStateDirCleanerMessage")) { + logCaptureContext.setLatch(2); final long cleanupDelayMs = 0; time.sleep(5000); directory.cleanRemovedTasks(cleanupDelayMs); - assertThat(appender.getMessages(), hasItem(endsWith("ms has elapsed (cleanup delay is " + cleanupDelayMs + "ms)."))); + assertThat(logCaptureContext.getMessages(), hasItem(endsWith("ms has elapsed (cleanup delay is " + cleanupDelayMs + "ms). "))); } } @Test public void shouldLogTempDirMessage() { - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogTempDirMessage")) { + logCaptureContext.setLatch(4); + new StateDirectory( new StreamsConfig( mkMap( @@ -631,10 +647,10 @@ public void shouldLogTempDirMessage() { false ); assertThat( - appender.getMessages(), - hasItem("Using an OS temp directory in the state.dir property can cause failures with writing the" + - " checkpoint file due to the fact that this directory can be cleared by the OS." + - " Resolved state.dir: [" + System.getProperty("java.io.tmpdir") + "/kafka-streams]") + logCaptureContext.getMessages(), + hasItem(startsWith("WARN Using an OS temp directory in the state.dir property can cause failures with writing the" + + " checkpoint file due to the fact that this directory can be cleared by the OS." + + " Resolved state.dir: [" + System.getProperty("java.io.tmpdir") + "/kafka-streams]")) ); } } @@ -753,7 +769,10 @@ public void shouldCleanupObsoleteTaskDirectoriesInNamedTopologiesAndDeleteThePar assertThat(directory.listAllTaskDirectories().size(), is(1)); assertThat(directory.listNonEmptyTaskDirectories().size(), is(1)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StateDirectory.class)) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + + "#shouldCleanupObsoleteTaskDirectoriesInNamedTopologiesAndDeleteTheParentDirectories")) { + logCaptureContext.setLatch(2); time.sleep(5000); directory.cleanRemovedTasks(0); assertThat(taskDir.exists(), is(false)); @@ -761,7 +780,7 @@ public void shouldCleanupObsoleteTaskDirectoriesInNamedTopologiesAndDeleteThePar assertThat(directory.listAllTaskDirectories().size(), is(0)); assertThat(directory.listNonEmptyTaskDirectories().size(), is(0)); assertThat( - appender.getMessages(), + logCaptureContext.getMessages(), hasItem(containsString("Deleting obsolete state directory")) ); } @@ -832,7 +851,6 @@ public FutureStateDirectoryProcessFile() { FutureStateDirectoryProcessFile(final UUID processId, final String newField) { this.processId = processId; this.newField = newField; - } } @@ -860,4 +878,4 @@ public void run() { } } } -} \ No newline at end of file +} diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateManagerUtilTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateManagerUtilTest.java index bc7fb14ba7da7..52bb790c61fd5 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateManagerUtilTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateManagerUtilTest.java @@ -32,6 +32,7 @@ import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PowerMockIgnore; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; import org.slf4j.Logger; @@ -51,6 +52,7 @@ import static org.powermock.api.easymock.PowerMock.replayAll; @RunWith(PowerMockRunner.class) +@PowerMockIgnore("javax.management.*") @PrepareForTest(Utils.class) public class StateManagerUtilTest { @@ -193,7 +195,6 @@ public void testCloseStateManagerThrowsExceptionWhenClean() { @Test public void testCloseStateManagerThrowsExceptionWhenDirty() { expect(stateManager.taskId()).andReturn(taskId); - expect(stateDirectory.lock(taskId)).andReturn(true); stateManager.close(); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java index 594fc7e842ffc..7d14e6c1fe16a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java @@ -35,7 +35,7 @@ import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockStateRestoreListener; import org.apache.kafka.test.StreamsTestUtils; import org.easymock.EasyMock; @@ -1127,15 +1127,19 @@ public void shouldThrowIfRestoreCallbackThrows() { } @Test - public void shouldNotThrowOnUnknownRevokedPartition() { - LogCaptureAppender.setClassLoggerToDebug(StoreChangelogReader.class); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(StoreChangelogReader.class)) { + public void shouldNotThrowOnUnknownRevokedPartition() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldNotThrowOnUnknownRevokedPartition", + Collections.singletonMap(StoreChangelogReader.class.getName(), "DEBUG"))) { + logCaptureContext.setLatch(2); + changelogReader.unregister(Collections.singletonList(new TopicPartition("unknown", 0))); - assertThat( - appender.getMessages(), - hasItem("test-reader Changelog partition unknown-0 could not be found," + - " it could be already cleaned up during the handling of task corruption and never restore again") + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), hasItem( + "DEBUG test-reader Changelog partition unknown-0 could not be found, " + + "it could be already cleaned up during the handling " + + "of task corruption and never restore again ") ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java index 51151b4f4080a..dd454856dd678 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java @@ -71,11 +71,11 @@ import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.assignment.ReferenceContainer; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.state.internals.OffsetCheckpoint; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockApiProcessor; import org.apache.kafka.test.MockClientSupplier; import org.apache.kafka.test.MockKeyValueStoreBuilder; @@ -129,9 +129,11 @@ import static org.easymock.EasyMock.niceMock; import static org.easymock.EasyMock.verify; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isA; @@ -2115,14 +2117,15 @@ public void shouldLogAndRecordSkippedMetricForDeserializationException() { new RecordHeaders(), Optional.empty())); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RecordDeserializer.class)) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndRecordSkippedMetricForDeserializationException")) { thread.runOnce(); - final List strings = appender.getMessages(); - assertTrue(strings.contains("stream-thread [" + Thread.currentThread().getName() + "] task [0_1]" + - " Skipping record due to deserialization error. topic=[topic1] partition=[1] offset=[0]")); - assertTrue(strings.contains("stream-thread [" + Thread.currentThread().getName() + "] task [0_1]" + - " Skipping record due to deserialization error. topic=[topic1] partition=[1] offset=[1]")); + final List strings = logCaptureContext.getMessages(); + assertThat(strings, hasItem(containsString("WARN stream-thread [" + Thread.currentThread().getName() + "] task [0_1]" + + " Skipping record due to deserialization error. topic=[topic1] partition=[1] offset=[0] "))); + assertThat(strings, hasItem(containsString("WARN stream-thread [" + Thread.currentThread().getName() + "] task [0_1]" + + " Skipping record due to deserialization error. topic=[topic1] partition=[1] offset=[1] "))); } } @@ -2630,7 +2633,8 @@ public void shouldLogAndRecordSkippedRecordsForInvalidTimestamps() { Collections.singletonMap("client-id", thread.getName()) ); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RecordQueue.class)) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndRecordSkippedRecordsForInvalidTimestamps")) { long offset = -1; addRecord(mockConsumer, ++offset); addRecord(mockConsumer, ++offset); @@ -2646,38 +2650,38 @@ public void shouldLogAndRecordSkippedRecordsForInvalidTimestamps() { addRecord(mockConsumer, ++offset, 1L); thread.runOnce(); - final List strings = appender.getMessages(); + final List strings = logCaptureContext.getMessages(); - final String threadTaskPrefix = "stream-thread [" + Thread.currentThread().getName() + "] task [0_1] "; + final String threadTaskPrefix = "WARN stream-thread [" + Thread.currentThread().getName() + "] task [0_1] "; assertTrue(strings.contains( threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[0] extractedTimestamp=[-1] " + - "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]" + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp] " )); assertTrue(strings.contains( threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[1] extractedTimestamp=[-1] " + - "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]" + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp] " )); assertTrue(strings.contains( threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[2] extractedTimestamp=[-1] " + - "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]" + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp] " )); assertTrue(strings.contains( threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[3] extractedTimestamp=[-1] " + - "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]" + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp] " )); assertTrue(strings.contains( threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[4] extractedTimestamp=[-1] " + - "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]" + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp] " )); assertTrue(strings.contains( threadTaskPrefix + "Skipping record due to negative extracted timestamp. " + "topic=[topic1] partition=[1] offset=[5] extractedTimestamp=[-1] " + - "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp]" + "extractor=[org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp] " )); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java index 7f442d3131b9f..1cb26f79fac12 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java @@ -49,10 +49,8 @@ import org.apache.kafka.streams.processor.internals.Task.State; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; import org.apache.kafka.streams.processor.internals.testutil.DummyStreamsConfig; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.state.internals.OffsetCheckpoint; - -import java.util.ArrayList; +import org.apache.kafka.test.LogCaptureContext; import org.easymock.EasyMock; import org.easymock.EasyMockRunner; import org.easymock.Mock; @@ -65,6 +63,7 @@ import org.junit.runner.RunWith; import java.io.File; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -383,7 +382,7 @@ public void shouldComputeOffsetSumFromCheckpointFileForUninitializedTask() throw replay(activeTaskCreator); taskManager.handleAssignment(taskId00Assignment, emptyMap()); - assertThat(uninitializedTask.state(), is(State.CREATED)); + assertThat(uninitializedTask.state(), is(Task.State.CREATED)); assertThat(taskManager.getTaskOffsetSums(), is(expectedOffsetSums)); } @@ -410,7 +409,7 @@ public void shouldComputeOffsetSumFromCheckpointFileForClosedTask() throws Excep closedTask.suspend(); closedTask.closeClean(); - assertThat(closedTask.state(), is(State.CLOSED)); + assertThat(closedTask.state(), is(Task.State.CLOSED)); assertThat(taskManager.getTaskOffsetSums(), is(expectedOffsetSums)); } @@ -2762,20 +2761,23 @@ public Collection changelogPartitions() { } @Test - public void shouldHaveRemainingPartitionsUncleared() { - final StateMachineTask task00 = new StateMachineTask(taskId00, taskId00Partitions, true); - final Map offsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null)); - task00.setCommittableOffsetsAndMetadata(offsets); + public void shouldHaveRemainingPartitionsUncleared() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldHaveRemainingPartitionsUncleared", + Collections.singletonMap(TaskManager.class.getName(), "DEBUG"))) { + logCaptureContext.setLatch(4); - expectRestoreToBeCompleted(consumer, changeLogReader); - expect(activeTaskCreator.createTasks(anyObject(), eq(taskId00Assignment))).andReturn(singletonList(task00)); - consumer.commitSync(offsets); - expectLastCall(); + final StateMachineTask task00 = new StateMachineTask(taskId00, taskId00Partitions, true); + final Map offsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null)); + task00.setCommittableOffsetsAndMetadata(offsets); - replay(activeTaskCreator, consumer, changeLogReader); + expectRestoreToBeCompleted(consumer, changeLogReader); + expect(activeTaskCreator.createTasks(anyObject(), eq(taskId00Assignment))).andReturn(singletonList(task00)); + consumer.commitSync(offsets); + expectLastCall(); + + replay(activeTaskCreator, consumer, changeLogReader); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(TaskManager.class)) { - LogCaptureAppender.setClassLoggerToDebug(TaskManager.class); taskManager.handleAssignment(taskId00Assignment, emptyMap()); assertThat(taskManager.tryToCompleteRestoration(time.milliseconds(), null), is(true)); assertThat(task00.state(), is(Task.State.RUNNING)); @@ -2783,13 +2785,13 @@ public void shouldHaveRemainingPartitionsUncleared() { taskManager.handleRevocation(mkSet(t1p0, new TopicPartition("unknown", 0))); assertThat(task00.state(), is(Task.State.SUSPENDED)); - final List messages = appender.getMessages(); + logCaptureContext.await(); assertThat( - messages, - hasItem("taskManagerTestThe following revoked partitions [unknown-0] are missing " + + logCaptureContext.getMessages(), + hasItem("DEBUG taskManagerTestThe following revoked partitions [unknown-0] are missing " + "from the current task partitions. It could potentially be due to race " + "condition of consumer detecting the heartbeat failure, or the " + - "tasks have been cleaned up by the handleAssignment callback.") + "tasks have been cleaned up by the handleAssignment callback. ") ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/testutil/LogCaptureAppender.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/testutil/LogCaptureAppender.java deleted file mode 100644 index 41d15da8c8a60..0000000000000 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/testutil/LogCaptureAppender.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.streams.processor.internals.testutil; - -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; - -import java.util.LinkedList; -import java.util.List; -import java.util.Optional; - -public class LogCaptureAppender extends AppenderSkeleton implements AutoCloseable { - private final List events = new LinkedList<>(); - - @SuppressWarnings("OptionalUsedAsFieldOrParameterType") - public static class Event { - private final String level; - private final String message; - private final Optional throwableInfo; - - Event(final String level, final String message, final Optional throwableInfo) { - this.level = level; - this.message = message; - this.throwableInfo = throwableInfo; - } - - public String getLevel() { - return level; - } - - public String getMessage() { - return message; - } - - public Optional getThrowableInfo() { - return throwableInfo; - } - } - - public static LogCaptureAppender createAndRegister() { - final LogCaptureAppender logCaptureAppender = new LogCaptureAppender(); - Logger.getRootLogger().addAppender(logCaptureAppender); - return logCaptureAppender; - } - - public static LogCaptureAppender createAndRegister(final Class clazz) { - final LogCaptureAppender logCaptureAppender = new LogCaptureAppender(); - Logger.getLogger(clazz).addAppender(logCaptureAppender); - return logCaptureAppender; - } - - public static void setClassLoggerToDebug(final Class clazz) { - Logger.getLogger(clazz).setLevel(Level.DEBUG); - } - - public static void setClassLoggerToTrace(final Class clazz) { - Logger.getLogger(clazz).setLevel(Level.TRACE); - } - - public static void unregister(final LogCaptureAppender logCaptureAppender) { - Logger.getRootLogger().removeAppender(logCaptureAppender); - } - - @Override - protected void append(final LoggingEvent event) { - synchronized (events) { - events.add(event); - } - } - - public List getMessages() { - final LinkedList result = new LinkedList<>(); - synchronized (events) { - for (final LoggingEvent event : events) { - result.add(event.getRenderedMessage()); - } - } - return result; - } - - public List getEvents() { - final LinkedList result = new LinkedList<>(); - synchronized (events) { - for (final LoggingEvent event : events) { - final String[] throwableStrRep = event.getThrowableStrRep(); - final Optional throwableString; - if (throwableStrRep == null) { - throwableString = Optional.empty(); - } else { - final StringBuilder throwableStringBuilder = new StringBuilder(); - - for (final String s : throwableStrRep) { - throwableStringBuilder.append(s); - } - - throwableString = Optional.of(throwableStringBuilder.toString()); - } - - result.add(new Event(event.getLevel().toString(), event.getRenderedMessage(), throwableString)); - } - } - return result; - } - - @Override - public void close() { - unregister(this); - } - - @Override - public boolean requiresLayout() { - return false; - } -} diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java index e8d578d017b0b..b7a801cab3a83 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java @@ -44,7 +44,6 @@ import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; import org.apache.kafka.streams.processor.internals.Task.TaskType; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.query.Position; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.StateSerdes; @@ -52,6 +51,7 @@ import org.apache.kafka.streams.state.internals.PrefixedWindowKeySchemas.TimeFirstWindowKeySchema; import org.apache.kafka.streams.state.internals.SegmentedBytesStore.KeySchema; import org.apache.kafka.test.InternalMockProcessorContext; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockRecordCollector; import org.apache.kafka.test.StreamsTestUtils; import org.apache.kafka.test.TestUtils; @@ -1012,7 +1012,7 @@ private List> getChangelogRecordsWithoutHeaders() @Test - public void shouldLogAndMeasureExpiredRecords() { + public void shouldLogAndMeasureExpiredRecords() throws InterruptedException { final Properties streamsConfig = StreamsTestUtils.getStreamsConfig(); final AbstractDualSchemaRocksDBSegmentedBytesStore bytesStore = getBytesStore(); final InternalMockProcessorContext context = new InternalMockProcessorContext( @@ -1023,7 +1023,10 @@ public void shouldLogAndMeasureExpiredRecords() { context.setSystemTimeMs(time.milliseconds()); bytesStore.init((StateStoreContext) context, bytesStore); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogAndMeasureExpiredRecords")) { + logCaptureContext.setLatch(1); + // write a record to advance stream time, with a high enough timestamp // that the subsequent record in windows[0] will already be expired. bytesStore.put(serializeKey(new Windowed<>("dummy", nextSegmentWindow)), serializeValue(0)); @@ -1032,8 +1035,9 @@ public void shouldLogAndMeasureExpiredRecords() { final byte[] value = serializeValue(5); bytesStore.put(key, value); - final List messages = appender.getMessages(); - assertThat(messages, hasItem("Skipping record for expired segment.")); + logCaptureContext.await(); + final List messages = logCaptureContext.getMessages(); + assertThat(messages, hasItem("WARN Skipping record for expired segment. ")); } final Map metrics = context.metrics().metrics(); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractKeyValueStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractKeyValueStoreTest.java index 19b057a8c98c1..79c6c4c1dcb42 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractKeyValueStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractKeyValueStoreTest.java @@ -23,10 +23,10 @@ import org.apache.kafka.common.utils.Utils; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.processor.StateStoreContext; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.KeyValueStoreTestDriver; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.InternalMockProcessorContext; import org.junit.After; import org.junit.Before; @@ -578,73 +578,81 @@ public void shouldNotThrowConcurrentModificationException() { } @Test - public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey")) { + logCaptureContext.setLatch(2); + try (final KeyValueIterator iterator = store.range(-1, 1)) { assertFalse(iterator.hasNext()); } - final List messages = appender.getMessages(); - assertThat( - messages, - hasItem("Returning empty iterator for fetch with invalid key range: from > to." + + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), + hasItem("WARN Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers") + " Note that the built-in numerical serdes do not follow this for negative numbers ") ); } } @Test - public void shouldNotThrowInvalidReverseRangeExceptionWithNegativeFromKey() { - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + public void shouldNotThrowInvalidReverseRangeExceptionWithNegativeFromKey() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldNotThrowInvalidReverseRangeExceptionWithNegativeFromKey")) { + logCaptureContext.setLatch(2); + try (final KeyValueIterator iterator = store.reverseRange(-1, 1)) { assertFalse(iterator.hasNext()); } - final List messages = appender.getMessages(); - assertThat( - messages, - hasItem("Returning empty iterator for fetch with invalid key range: from > to." + + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), + hasItem("WARN Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers") + " Note that the built-in numerical serdes do not follow this for negative numbers ") ); } } @Test - public void shouldNotThrowInvalidRangeExceptionWithFromLargerThanTo() { - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + public void shouldNotThrowInvalidRangeExceptionWithFromLargerThanTo() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithFromLargerThanTo")) { + logCaptureContext.setLatch(2); + try (final KeyValueIterator iterator = store.range(2, 1)) { assertFalse(iterator.hasNext()); } - final List messages = appender.getMessages(); - assertThat( - messages, - hasItem("Returning empty iterator for fetch with invalid key range: from > to." + + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), + hasItem("WARN Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers") + " Note that the built-in numerical serdes do not follow this for negative numbers ") ); } } @Test - public void shouldNotThrowInvalidReverseRangeExceptionWithFromLargerThanTo() { - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + public void shouldNotThrowInvalidReverseRangeExceptionWithFromLargerThanTo() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldNotThrowInvalidReverseRangeExceptionWithFromLargerThanTo")) { + logCaptureContext.setLatch(2); + try (final KeyValueIterator iterator = store.reverseRange(2, 1)) { assertFalse(iterator.hasNext()); } - final List messages = appender.getMessages(); - assertThat( - messages, - hasItem("Returning empty iterator for fetch with invalid key range: from > to." + + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), + hasItem("WARN Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers") + " Note that the built-in numerical serdes do not follow this for negative numbers ") ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java index 32e33860ec9cb..8c68f66c0c94d 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java @@ -44,11 +44,11 @@ import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; import org.apache.kafka.streams.processor.internals.Task.TaskType; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.query.Position; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.StateSerdes; import org.apache.kafka.test.InternalMockProcessorContext; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockRecordCollector; import org.apache.kafka.test.StreamsTestUtils; import org.apache.kafka.test.TestUtils; @@ -803,7 +803,8 @@ public void shouldLogAndMeasureExpiredRecords() { context.setSystemTimeMs(time.milliseconds()); bytesStore.init((StateStoreContext) context, bytesStore); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeasureExpiredRecords")) { // write a record to advance stream time, with a high enough timestamp // that the subsequent record in windows[0] will already be expired. bytesStore.put(serializeKey(new Windowed<>("dummy", nextSegmentWindow)), serializeValue(0)); @@ -812,8 +813,8 @@ public void shouldLogAndMeasureExpiredRecords() { final byte[] value = serializeValue(5); bytesStore.put(key, value); - final List messages = appender.getMessages(); - assertThat(messages, hasItem("Skipping record for expired segment.")); + final List messages = logCaptureContext.getMessages(); + assertThat(messages, hasItem("WARN Skipping record for expired segment. ")); } final Map metrics = context.metrics().metrics(); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java index 6e93f6a7ba1ad..da7097765f8e3 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java @@ -32,9 +32,9 @@ import org.apache.kafka.streams.kstream.internals.SessionWindow; import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.SessionStore; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.InternalMockProcessorContext; import org.apache.kafka.test.MockRecordCollector; import org.apache.kafka.test.StreamsTestUtils; @@ -58,8 +58,8 @@ import static org.apache.kafka.common.utils.Utils.toList; import static org.apache.kafka.test.StreamsTestUtils.valuesToSet; import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasItem; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; @@ -709,7 +709,7 @@ public void shouldReturnSameResultsForSingleKeyFindSessionsAndEqualKeyRangeFindS } @Test - public void shouldLogAndMeasureExpiredRecords() { + public void shouldLogAndMeasureExpiredRecords() throws InterruptedException { final Properties streamsConfig = StreamsTestUtils.getStreamsConfig(); final SessionStore sessionStore = buildSessionStore(RETENTION_PERIOD, Serdes.String(), Serdes.Long()); final InternalMockProcessorContext context = new InternalMockProcessorContext( @@ -722,7 +722,10 @@ public void shouldLogAndMeasureExpiredRecords() { context.setSystemTimeMs(time.milliseconds()); sessionStore.init((StateStoreContext) context, sessionStore); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeasureExpiredRecords")) { + logCaptureContext.setLatch(2); + // Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired // Note that rocksdb will only expire segments at a time (where segment interval = 60,000 for this retention period) sessionStore.put(new Windowed<>("initial record", new SessionWindow(0, 2 * SEGMENT_INTERVAL)), 0L); @@ -731,8 +734,8 @@ public void shouldLogAndMeasureExpiredRecords() { sessionStore.put(new Windowed<>("late record", new SessionWindow(0, 0)), 0L); sessionStore.put(new Windowed<>("another on-time record", new SessionWindow(0, 2 * SEGMENT_INTERVAL)), 0L); - final List messages = appender.getMessages(); - assertThat(messages, hasItem("Skipping record for expired segment.")); + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), hasItem("WARN Skipping record for expired segment. ")); } final Map metrics = context.metrics().metrics(); @@ -790,23 +793,26 @@ public void shouldThrowNullPointerExceptionOnPutNullKey() { } @Test - public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { + public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() throws InterruptedException { final String keyFrom = Serdes.String().deserializer() .deserialize("", Serdes.Integer().serializer().serialize("", -1)); final String keyTo = Serdes.String().deserializer() .deserialize("", Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(); - final KeyValueIterator, Long> iterator = sessionStore.findSessions(keyFrom, keyTo, 0L, 10L)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey")) { + logCaptureContext.setLatch(1); + + final KeyValueIterator, Long> iterator = sessionStore.findSessions(keyFrom, keyTo, 0L, 10L); assertFalse(iterator.hasNext()); - final List messages = appender.getMessages(); + logCaptureContext.await(); assertThat( - messages, - hasItem("Returning empty iterator for fetch with invalid key range: from > to." + + logCaptureContext.getMessages(), + hasItem("WARN Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers") + " Note that the built-in numerical serdes do not follow this for negative numbers ") ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java index e93f758c5cffc..164a4ab9617e3 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java @@ -34,12 +34,12 @@ import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.StateSerdes; import org.apache.kafka.streams.state.WindowStore; import org.apache.kafka.streams.state.WindowStoreIterator; import org.apache.kafka.test.InternalMockProcessorContext; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.MockRecordCollector; import org.apache.kafka.test.StreamsTestUtils; import org.apache.kafka.test.TestUtils; @@ -968,17 +968,18 @@ public void shouldReturnSameResultsForSingleKeyFetchAndEqualKeyRangeFetch() { @Test public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(); - final KeyValueIterator, String> iterator = windowStore.fetch(-1, 1, 0L, 10L)) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey")) { + final KeyValueIterator, String> iterator = windowStore.fetch(-1, 1, 0L, 10L); assertFalse(iterator.hasNext()); - final List messages = appender.getMessages(); + final List messages = logCaptureContext.getMessages(); assertThat( messages, - hasItem("Returning empty iterator for fetch with invalid key range: from > to." + + hasItem("WARN Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers") + " Note that the built-in numerical serdes do not follow this for negative numbers ") ); } } @@ -998,7 +999,8 @@ public void shouldLogAndMeasureExpiredRecords() { context.setTime(1L); windowStore.init((StateStoreContext) context, windowStore); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogAndMeasureExpiredRecords")) { // Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired windowStore.put(1, "initial record", 2 * RETENTION_PERIOD); @@ -1006,8 +1008,8 @@ public void shouldLogAndMeasureExpiredRecords() { windowStore.put(1, "late record", 0L); windowStore.put(1, "another on-time record", RETENTION_PERIOD + 1); - final List messages = appender.getMessages(); - assertThat(messages, hasItem("Skipping record for expired segment.")); + final List messages = logCaptureContext.getMessages(); + assertThat(messages, hasItem("WARN Skipping record for expired segment. ")); } final Map metrics = context.metrics().metrics(); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemorySessionStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemorySessionStoreTest.java index d5aa667c0c560..01ffb04736134 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemorySessionStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemorySessionStoreTest.java @@ -35,10 +35,10 @@ import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.query.Position; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.SessionStore; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.InternalMockProcessorContext; import org.apache.kafka.test.TestUtils; import org.easymock.EasyMock; @@ -803,43 +803,50 @@ public void shouldThrowNullPointerExceptionOnPutNullKey() { } @Test - public void shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey() { + public void shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey() throws InterruptedException { final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingSessionStore.class); - final KeyValueIterator, byte[]> iterator = cachingStore.backwardFindSessions(keyFrom, keyTo, 0L, 10L)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey")) { + logCaptureContext.setLatch(2); + + final KeyValueIterator, byte[]> iterator = cachingStore.backwardFindSessions(keyFrom, keyTo, 0L, 10L); assertFalse(iterator.hasNext()); - final List messages = appender.getMessages(); + logCaptureContext.await(); + final List messages = logCaptureContext.getMessages(); assertThat( messages, hasItem( - "Returning empty iterator for fetch with invalid key range: from > to." + + "WARN Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers" + " Note that the built-in numerical serdes do not follow this for negative numbers " ) ); } } @Test - public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { - final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); - final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey")) { + logCaptureContext.setLatch(2); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingSessionStore.class); - final KeyValueIterator, byte[]> iterator = cachingStore.findSessions(keyFrom, keyTo, 0L, 10L)) { + final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + + final KeyValueIterator, byte[]> iterator = cachingStore.findSessions(keyFrom, keyTo, 0L, 10L); assertFalse(iterator.hasNext()); - final List messages = appender.getMessages(); + logCaptureContext.await(); assertThat( - messages, - hasItem("Returning empty iterator for fetch with invalid key range: from > to." + + logCaptureContext.getMessages(), + hasItem("WARN Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers") + " Note that the built-in numerical serdes do not follow this for negative numbers ") ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentSessionStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentSessionStoreTest.java index 50fd88a276954..f3384a415aa19 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentSessionStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentSessionStoreTest.java @@ -34,11 +34,11 @@ import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.query.Position; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.SessionStore; import org.apache.kafka.test.InternalMockProcessorContext; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.TestUtils; import org.easymock.EasyMock; import org.junit.After; @@ -814,23 +814,27 @@ public void shouldThrowNullPointerExceptionOnPutNullKey() { } @Test - public void shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey() { + public void shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey() throws InterruptedException { final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingSessionStore.class); - final KeyValueIterator, byte[]> iterator = - cachingStore.backwardFindSessions(keyFrom, keyTo, 0L, 10L)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey")) { + logCaptureContext.setLatch(2); + + final KeyValueIterator, byte[]> iterator = + cachingStore.backwardFindSessions(keyFrom, keyTo, 0L, 10L); assertFalse(iterator.hasNext()); - final List messages = appender.getMessages(); + logCaptureContext.await(); + final List messages = logCaptureContext.getMessages(); assertThat( messages, hasItem( - "Returning empty iterator for fetch with invalid key range: from > to." + + "WARN Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers" + " Note that the built-in numerical serdes do not follow this for negative numbers " ) ); } @@ -841,18 +845,19 @@ public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingSessionStore.class); - final KeyValueIterator, byte[]> iterator = cachingStore.findSessions(keyFrom, keyTo, 0L, 10L)) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey")) { + final KeyValueIterator, byte[]> iterator = cachingStore.findSessions(keyFrom, keyTo, 0L, 10L); assertFalse(iterator.hasNext()); - final List messages = appender.getMessages(); + final List messages = logCaptureContext.getMessages(); assertThat( messages, hasItem( - "Returning empty iterator for fetch with invalid key range: from > to." + + "WARN Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers" + " Note that the built-in numerical serdes do not follow this for negative numbers " ) ); } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentWindowStoreTest.java index 83136c33e8175..ebbd5be893142 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentWindowStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentWindowStoreTest.java @@ -38,13 +38,13 @@ import org.apache.kafka.streams.processor.StateStoreContext; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.query.Position; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.StoreBuilder; import org.apache.kafka.streams.state.Stores; import org.apache.kafka.streams.state.WindowStore; import org.apache.kafka.streams.state.WindowStoreIterator; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.InternalMockProcessorContext; import org.apache.kafka.test.TestUtils; import org.easymock.EasyMock; @@ -997,41 +997,47 @@ public void shouldThrowNullPointerExceptionOnFetchNullKey() { } @Test - public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { - final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); - final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey")) { + logCaptureContext.setLatch(2); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingWindowStore.class); - final KeyValueIterator, byte[]> iterator = cachingStore.fetch(keyFrom, keyTo, 0L, 10L)) { + final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + + final KeyValueIterator, byte[]> iterator = cachingStore.fetch(keyFrom, keyTo, 0L, 10L); assertFalse(iterator.hasNext()); - final List messages = appender.getMessages(); + logCaptureContext.await(); assertThat( - messages, - hasItem("Returning empty iterator for fetch with invalid key range: from > to." + - " This may be due to range arguments set in the wrong order, " + - "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers") + logCaptureContext.getMessages(), + hasItem("WARN Returning empty iterator for fetch with invalid key range: from > to." + + " This may be due to range arguments set in the wrong order," + + " or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + + " Note that the built-in numerical serdes do not follow this for negative numbers ") ); } } @Test - public void shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey() { + public void shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey() throws InterruptedException { final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(CachingWindowStore.class); - final KeyValueIterator, byte[]> iterator = - cachingStore.backwardFetch(keyFrom, keyTo, Instant.ofEpochMilli(0L), Instant.ofEpochMilli(10L))) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey")) { + logCaptureContext.setLatch(2); + + final KeyValueIterator, byte[]> iterator = + cachingStore.backwardFetch(keyFrom, keyTo, Instant.ofEpochMilli(0L), Instant.ofEpochMilli(10L)); assertFalse(iterator.hasNext()); - final List messages = appender.getMessages(); + logCaptureContext.await(); assertThat( - messages, - hasItem("Returning empty iterator for fetch with invalid key range: from > to." + + logCaptureContext.getMessages(), + hasItem("WARN Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers") + " Note that the built-in numerical serdes do not follow this for negative numbers ") ); } } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java index 4cafecfa2c3ab..72afac31f5cc1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.streams.state.internals; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; +import org.apache.kafka.test.LogCaptureContext; import org.easymock.EasyMockRunner; import org.junit.Test; import org.junit.runner.RunWith; @@ -54,20 +54,20 @@ import java.lang.reflect.Method; import java.util.Arrays; -import java.util.Set; import java.util.ArrayList; +import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import static org.easymock.EasyMock.mock; import static org.easymock.EasyMock.replay; import static org.easymock.EasyMock.reset; import static org.easymock.EasyMock.verify; -import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.matchesPattern; import static org.junit.Assert.fail; @@ -330,9 +330,11 @@ public String name() { @Test public void shouldLogWarningWhenSettingWalOptions() throws Exception { - - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.class)) { - + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldLogWarningWhenSettingWalOptions", + Collections.singletonMap(RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.class.getName(), "WARN") + )) { + logCaptureContext.setLatch(16); final RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter adapter = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(new DBOptions(), new ColumnFamilyOptions()); @@ -344,13 +346,10 @@ public void shouldLogWarningWhenSettingWalOptions() throws Exception { final List walOptions = Arrays.asList("walDir", "walFilter", "walRecoveryMode", "walBytesPerSync", "walSizeLimitMB", "manualWalFlush", "maxTotalWalSize", "walTtlSeconds"); - final Set logMessages = appender.getEvents().stream() - .filter(e -> e.getLevel().equals("WARN")) - .map(LogCaptureAppender.Event::getMessage) - .collect(Collectors.toSet()); - - walOptions.forEach(option -> assertThat(logMessages, hasItem(String.format("WAL is explicitly disabled by Streams in RocksDB. Setting option '%s' will be ignored", option)))); - + logCaptureContext.await(); + walOptions.forEach(option -> assertThat(logCaptureContext.getMessages(), + hasItem(containsString(String.format("WARN WAL is explicitly disabled by Streams in RocksDB. Setting option '%s' will be ignored", option)))) + ); } } } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBSessionStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBSessionStoreTest.java index deabea85960d3..cef273659c500 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBSessionStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBSessionStoreTest.java @@ -85,4 +85,4 @@ public void shouldMatchPositionAfterPut() { final Position actual = rocksDBSessionStore.getPosition(); assertEquals(expected, actual); } -} \ No newline at end of file +} diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimestampedStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimestampedStoreTest.java index a1d511ae1ca71..40a65cc178c63 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimestampedStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimestampedStoreTest.java @@ -21,8 +21,8 @@ import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.processor.StateStoreContext; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.state.KeyValueIterator; +import org.apache.kafka.test.LogCaptureContext; import org.hamcrest.core.IsNull; import org.junit.Test; import org.rocksdb.ColumnFamilyDescriptor; @@ -52,72 +52,79 @@ RocksDBStore getRocksDBStore() { } @Test - public void shouldOpenNewStoreInRegularMode() { - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) { + public void shouldOpenNewStoreInRegularMode() throws InterruptedException { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldOpenNewStoreInRegularMode")) { + logCaptureContext.setLatch(2); rocksDBStore.init((StateStoreContext) context, rocksDBStore); - assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode")); - } + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), hasItem("INFO Opening store " + DB_NAME + " in regular mode ")); - try (final KeyValueIterator iterator = rocksDBStore.all()) { - assertThat(iterator.hasNext(), is(false)); + try (final KeyValueIterator iterator = rocksDBStore.all()) { + assertThat(iterator.hasNext(), is(false)); + } } } @Test public void shouldOpenExistingStoreInRegularMode() throws Exception { - // prepare store - rocksDBStore.init((StateStoreContext) context, rocksDBStore); - rocksDBStore.put(new Bytes("key".getBytes()), "timestamped".getBytes()); - rocksDBStore.close(); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + this.getClass().getName() + "#shouldOpenExistingStoreInRegularMode")) { + logCaptureContext.setLatch(3); - // re-open store - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) { + // prepare store rocksDBStore.init((StateStoreContext) context, rocksDBStore); - - assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode")); - } finally { + rocksDBStore.put(new Bytes("key".getBytes()), "timestamped".getBytes()); rocksDBStore.close(); - } - - // verify store - final DBOptions dbOptions = new DBOptions(); - final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions(); - - final List columnFamilyDescriptors = asList( - new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), - new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions)); - final List columnFamilies = new ArrayList<>(columnFamilyDescriptors.size()); - RocksDB db = null; - ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null; - try { - db = RocksDB.open( - dbOptions, - new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), - columnFamilyDescriptors, - columnFamilies); + // re-open store + rocksDBStore = getRocksDBStore(); + rocksDBStore.init((StateStoreContext) context, rocksDBStore); - noTimestampColumnFamily = columnFamilies.get(0); - withTimestampColumnFamily = columnFamilies.get(1); + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), hasItem("INFO Opening store " + DB_NAME + " in regular mode ")); + rocksDBStore.close(); - assertThat(db.get(noTimestampColumnFamily, "key".getBytes()), new IsNull<>()); - assertThat(db.getLongProperty(noTimestampColumnFamily, "rocksdb.estimate-num-keys"), is(0L)); - assertThat(db.get(withTimestampColumnFamily, "key".getBytes()).length, is(11)); - assertThat(db.getLongProperty(withTimestampColumnFamily, "rocksdb.estimate-num-keys"), is(1L)); - } finally { - // Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions - if (noTimestampColumnFamily != null) { - noTimestampColumnFamily.close(); - } - if (withTimestampColumnFamily != null) { - withTimestampColumnFamily.close(); - } - if (db != null) { - db.close(); + // verify store + final DBOptions dbOptions = new DBOptions(); + final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions(); + + final List columnFamilyDescriptors = asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions), + new ColumnFamilyDescriptor("keyValueWithTimestamp".getBytes(StandardCharsets.UTF_8), columnFamilyOptions)); + final List columnFamilies = new ArrayList<>(columnFamilyDescriptors.size()); + + RocksDB db = null; + ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null; + try { + db = RocksDB.open( + dbOptions, + new File(new File(context.stateDir(), "rocksdb"), DB_NAME).getAbsolutePath(), + columnFamilyDescriptors, + columnFamilies); + + noTimestampColumnFamily = columnFamilies.get(0); + withTimestampColumnFamily = columnFamilies.get(1); + + assertThat(db.get(noTimestampColumnFamily, "key".getBytes()), new IsNull<>()); + assertThat(db.getLongProperty(noTimestampColumnFamily, "rocksdb.estimate-num-keys"), is(0L)); + assertThat(db.get(withTimestampColumnFamily, "key".getBytes()).length, is(11)); + assertThat(db.getLongProperty(withTimestampColumnFamily, "rocksdb.estimate-num-keys"), is(1L)); + } finally { + // Order of closing must follow: ColumnFamilyHandle > RocksDB > DBOptions > ColumnFamilyOptions + if (noTimestampColumnFamily != null) { + noTimestampColumnFamily.close(); + } + if (withTimestampColumnFamily != null) { + withTimestampColumnFamily.close(); + } + if (db != null) { + db.close(); + } + dbOptions.close(); + columnFamilyOptions.close(); } - dbOptions.close(); - columnFamilyOptions.close(); } } @@ -125,89 +132,93 @@ public void shouldOpenExistingStoreInRegularMode() throws Exception { public void shouldMigrateDataFromDefaultToTimestampColumnFamily() throws Exception { prepareOldStore(); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldMigrateDataFromDefaultToTimestampColumnFamily")) { + logCaptureContext.setLatch(3); - assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode")); - } + rocksDBStore.init((StateStoreContext) context, rocksDBStore); - // approx: 7 entries on old CF, 0 in new CF - assertThat(rocksDBStore.approximateNumEntries(), is(7L)); - - // get() - - // should be no-op on both CF - assertThat(rocksDBStore.get(new Bytes("unknown".getBytes())), new IsNull<>()); - // approx: 7 entries on old CF, 0 in new CF - assertThat(rocksDBStore.approximateNumEntries(), is(7L)); - - // should migrate key1 from old to new CF - // must return timestamp plus value, ie, it's not 1 byte but 9 bytes - assertThat(rocksDBStore.get(new Bytes("key1".getBytes())).length, is(8 + 1)); - // one delete on old CF, one put on new CF - // approx: 6 entries on old CF, 1 in new CF - assertThat(rocksDBStore.approximateNumEntries(), is(7L)); - - // put() - - // should migrate key2 from old to new CF with new value - rocksDBStore.put(new Bytes("key2".getBytes()), "timestamp+22".getBytes()); - // one delete on old CF, one put on new CF - // approx: 5 entries on old CF, 2 in new CF - assertThat(rocksDBStore.approximateNumEntries(), is(7L)); - - // should delete key3 from old and new CF - rocksDBStore.put(new Bytes("key3".getBytes()), null); - // count is off by one, due to two delete operations (even if one does not delete anything) - // approx: 4 entries on old CF, 1 in new CF - assertThat(rocksDBStore.approximateNumEntries(), is(5L)); - - // should add new key8 to new CF - rocksDBStore.put(new Bytes("key8".getBytes()), "timestamp+88888888".getBytes()); - // one delete on old CF, one put on new CF - // approx: 3 entries on old CF, 2 in new CF - assertThat(rocksDBStore.approximateNumEntries(), is(5L)); - - // putIfAbsent() - - // should migrate key4 from old to new CF with old value - assertThat(rocksDBStore.putIfAbsent(new Bytes("key4".getBytes()), "timestamp+4444".getBytes()).length, is(8 + 4)); - // one delete on old CF, one put on new CF - // approx: 2 entries on old CF, 3 in new CF - assertThat(rocksDBStore.approximateNumEntries(), is(5L)); - - // should add new key11 to new CF - assertThat(rocksDBStore.putIfAbsent(new Bytes("key11".getBytes()), "timestamp+11111111111".getBytes()), new IsNull<>()); - // one delete on old CF, one put on new CF - // approx: 1 entries on old CF, 4 in new CF - assertThat(rocksDBStore.approximateNumEntries(), is(5L)); - - // should not delete key5 but migrate to new CF - assertThat(rocksDBStore.putIfAbsent(new Bytes("key5".getBytes()), null).length, is(8 + 5)); - // one delete on old CF, one put on new CF - // approx: 0 entries on old CF, 5 in new CF - assertThat(rocksDBStore.approximateNumEntries(), is(5L)); - - // should be no-op on both CF - assertThat(rocksDBStore.putIfAbsent(new Bytes("key12".getBytes()), null), new IsNull<>()); - // two delete operation, however, only one is counted because old CF count was zero before already - // approx: 0 entries on old CF, 4 in new CF - assertThat(rocksDBStore.approximateNumEntries(), is(4L)); - - // delete() - - // should delete key6 from old and new CF - assertThat(rocksDBStore.delete(new Bytes("key6".getBytes())).length, is(8 + 6)); - // two delete operation, however, only one is counted because old CF count was zero before already - // approx: 0 entries on old CF, 3 in new CF - assertThat(rocksDBStore.approximateNumEntries(), is(3L)); - - iteratorsShouldNotMigrateData(); - assertThat(rocksDBStore.approximateNumEntries(), is(3L)); + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), hasItem("INFO Opening store " + DB_NAME + " in upgrade mode ")); + + // approx: 7 entries on old CF, 0 in new CF + assertThat(rocksDBStore.approximateNumEntries(), is(7L)); + + // get() + + // should be no-op on both CF + assertThat(rocksDBStore.get(new Bytes("unknown".getBytes())), new IsNull<>()); + // approx: 7 entries on old CF, 0 in new CF + assertThat(rocksDBStore.approximateNumEntries(), is(7L)); + + // should migrate key1 from old to new CF + // must return timestamp plus value, ie, it's not 1 byte but 9 bytes + assertThat(rocksDBStore.get(new Bytes("key1".getBytes())).length, is(8 + 1)); + // one delete on old CF, one put on new CF + // approx: 6 entries on old CF, 1 in new CF + assertThat(rocksDBStore.approximateNumEntries(), is(7L)); + + // put() + + // should migrate key2 from old to new CF with new value + rocksDBStore.put(new Bytes("key2".getBytes()), "timestamp+22".getBytes()); + // one delete on old CF, one put on new CF + // approx: 5 entries on old CF, 2 in new CF + assertThat(rocksDBStore.approximateNumEntries(), is(7L)); + + // should delete key3 from old and new CF + rocksDBStore.put(new Bytes("key3".getBytes()), null); + // count is off by one, due to two delete operations (even if one does not delete anything) + // approx: 4 entries on old CF, 1 in new CF + assertThat(rocksDBStore.approximateNumEntries(), is(5L)); + + // should add new key8 to new CF + rocksDBStore.put(new Bytes("key8".getBytes()), "timestamp+88888888".getBytes()); + // one delete on old CF, one put on new CF + // approx: 3 entries on old CF, 2 in new CF + assertThat(rocksDBStore.approximateNumEntries(), is(5L)); + + // putIfAbsent() + + // should migrate key4 from old to new CF with old value + assertThat(rocksDBStore.putIfAbsent(new Bytes("key4".getBytes()), "timestamp+4444".getBytes()).length, is(8 + 4)); + // one delete on old CF, one put on new CF + // approx: 2 entries on old CF, 3 in new CF + assertThat(rocksDBStore.approximateNumEntries(), is(5L)); + + // should add new key11 to new CF + assertThat(rocksDBStore.putIfAbsent(new Bytes("key11".getBytes()), "timestamp+11111111111".getBytes()), new IsNull<>()); + // one delete on old CF, one put on new CF + // approx: 1 entries on old CF, 4 in new CF + assertThat(rocksDBStore.approximateNumEntries(), is(5L)); + + // should not delete key5 but migrate to new CF + assertThat(rocksDBStore.putIfAbsent(new Bytes("key5".getBytes()), null).length, is(8 + 5)); + // one delete on old CF, one put on new CF + // approx: 0 entries on old CF, 5 in new CF + assertThat(rocksDBStore.approximateNumEntries(), is(5L)); + + // should be no-op on both CF + assertThat(rocksDBStore.putIfAbsent(new Bytes("key12".getBytes()), null), new IsNull<>()); + // two delete operation, however, only one is counted because old CF count was zero before already + // approx: 0 entries on old CF, 4 in new CF + assertThat(rocksDBStore.approximateNumEntries(), is(4L)); + + // delete() + + // should delete key6 from old and new CF + assertThat(rocksDBStore.delete(new Bytes("key6".getBytes())).length, is(8 + 6)); + // two delete operation, however, only one is counted because old CF count was zero before already + // approx: 0 entries on old CF, 3 in new CF + assertThat(rocksDBStore.approximateNumEntries(), is(3L)); + + iteratorsShouldNotMigrateData(); + assertThat(rocksDBStore.approximateNumEntries(), is(3L)); - rocksDBStore.close(); + rocksDBStore.close(); - verifyOldAndNewColumnFamily(); + verifyOldAndNewColumnFamily(logCaptureContext); + } } private void iteratorsShouldNotMigrateData() { @@ -358,7 +369,7 @@ private void iteratorsShouldNotMigrateData() { } } - private void verifyOldAndNewColumnFamily() throws Exception { + private void verifyOldAndNewColumnFamily(final LogCaptureContext logCaptureContext) throws Exception { final DBOptions dbOptions = new DBOptions(); final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions(); @@ -370,6 +381,8 @@ private void verifyOldAndNewColumnFamily() throws Exception { RocksDB db = null; ColumnFamilyHandle noTimestampColumnFamily = null, withTimestampColumnFamily = null; boolean errorOccurred = false; + + logCaptureContext.setLatch(2); try { db = RocksDB.open( dbOptions, @@ -423,18 +436,17 @@ private void verifyOldAndNewColumnFamily() throws Exception { } // check that still in upgrade mode - try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); - - assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in upgrade mode")); - } finally { - rocksDBStore.close(); - } + rocksDBStore.init((StateStoreContext) context, rocksDBStore); + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), hasItem("INFO Opening store " + DB_NAME + " in upgrade mode ")); + rocksDBStore.close(); // clear old CF columnFamilies.clear(); db = null; noTimestampColumnFamily = null; + + logCaptureContext.setLatch(2); try { db = RocksDB.open( dbOptions, @@ -457,11 +469,9 @@ private void verifyOldAndNewColumnFamily() throws Exception { } // check that still in regular mode - try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister(RocksDBTimestampedStore.class)) { - rocksDBStore.init((StateStoreContext) context, rocksDBStore); - - assertThat(appender.getMessages(), hasItem("Opening store " + DB_NAME + " in regular mode")); - } + rocksDBStore.init((StateStoreContext) context, rocksDBStore); + logCaptureContext.await(); + assertThat(logCaptureContext.getMessages(), hasItem("INFO Opening store " + DB_NAME + " in regular mode ")); } private void prepareOldStore() { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingPersistentWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingPersistentWindowStoreTest.java index 697e3c6f510d4..e0802561b4108 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingPersistentWindowStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingPersistentWindowStoreTest.java @@ -40,7 +40,6 @@ import org.apache.kafka.streams.processor.api.Record; import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; -import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; import org.apache.kafka.streams.query.Position; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.StoreBuilder; @@ -52,6 +51,7 @@ import org.apache.kafka.streams.state.internals.PrefixedWindowKeySchemas.KeyFirstWindowKeySchema; import org.apache.kafka.streams.state.internals.PrefixedWindowKeySchemas.TimeFirstWindowKeySchema; import org.apache.kafka.test.InternalMockProcessorContext; +import org.apache.kafka.test.LogCaptureContext; import org.apache.kafka.test.TestUtils; import org.easymock.EasyMock; import org.junit.After; @@ -1130,17 +1130,18 @@ public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(TimeOrderedCachingWindowStore.class); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey"); final KeyValueIterator, byte[]> iterator = cachingStore.fetch(keyFrom, keyTo, 0L, 10L)) { assertFalse(iterator.hasNext()); - final List messages = appender.getMessages(); + final List messages = logCaptureContext.getMessages(); assertThat( messages, - hasItem("Returning empty iterator for fetch with invalid key range: from > to." + + hasItem(containsString("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to range arguments set in the wrong order, " + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers") + " Note that the built-in numerical serdes do not follow this for negative numbers")) ); } } @@ -1150,17 +1151,18 @@ public void shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey() { final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(TimeOrderedCachingWindowStore.class); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + + "#shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey"); final KeyValueIterator, byte[]> iterator = cachingStore.backwardFetch(keyFrom, keyTo, Instant.ofEpochMilli(0L), Instant.ofEpochMilli(10L))) { assertFalse(iterator.hasNext()); - final List messages = appender.getMessages(); + final List messages = logCaptureContext.getMessages(); assertThat( messages, - hasItem("Returning empty iterator for fetch with invalid key range: from > to." + + hasItem(containsString("Returning empty iterator for fetch with invalid key range: from > to." + " This may be due to serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + - " Note that the built-in numerical serdes do not follow this for negative numbers") + " Note that the built-in numerical serdes do not follow this for negative numbers")) ); } } diff --git a/streams/src/test/java/org/apache/kafka/test/LogCaptureContext.java b/streams/src/test/java/org/apache/kafka/test/LogCaptureContext.java new file mode 100644 index 0000000000000..6849e4bd50628 --- /dev/null +++ b/streams/src/test/java/org/apache/kafka/test/LogCaptureContext.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.test; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.core.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.layout.PatternLayout; +import org.apache.logging.log4j.test.appender.ListAppender; + +public class LogCaptureContext implements AutoCloseable { + private final ListAppender listAppender; + private final Map prevLevelMap = new HashMap<>(); + + public static LogCaptureContext create(final String name) { + return create(name, new HashMap<>()); + } + + public static LogCaptureContext create(final String name, final Map levelMap) { + return new LogCaptureContext(name, levelMap); + } + + private LogCaptureContext(final String name, final Map levelMap) { + final LoggerContext loggerContext = LoggerContext.getContext(false); + listAppender = ListAppender.createAppender(name, false, false, + PatternLayout.newBuilder().withPattern("%p %m %throwable").build(), null); + listAppender.start(); + loggerContext.getConfiguration().addAppender(listAppender); + loggerContext.getRootLogger().addAppender(listAppender); + + for (final String loggerName : levelMap.keySet()) { + final Logger logger = loggerContext.getLogger(loggerName); + + // Store the previous logger level + this.prevLevelMap.put(loggerName, logger.getLevel()); + + // Change the logger level + logger.setLevel(Level.getLevel(levelMap.get(loggerName))); + } + } + + /** + * Set the expected number of events. + * + * @param size number of expected logging events + */ + public void setLatch(final int size) { + this.listAppender.countDownLatch = new CountDownLatch(size); + } + + /** + * Wait for the appender to finish processing the expected number of events. + * + * @throws InterruptedException + */ + public void await() throws InterruptedException { + await(10, TimeUnit.SECONDS); + } + + /** + * Wait for the appender to finish processing the expected number of events. + * + * @throws InterruptedException + */ + public void await(final long l, final TimeUnit timeUnit) throws InterruptedException { + this.listAppender.countDownLatch.await(l, timeUnit); + } + + /** + * Returns the appended log messages. + * + * @return appended log messages + */ + public List getMessages() { + return listAppender.getMessages(); + } + + @Override + public void close() { + final LoggerContext loggerContext = LoggerContext.getContext(false); + loggerContext.getRootLogger().removeAppender(listAppender); + listAppender.stop(); + + for (final String loggerName : this.prevLevelMap.keySet()) { + final Logger logger = loggerContext.getLogger(loggerName); + + // Restore previous logger level + logger.setLevel(this.prevLevelMap.get(loggerName)); + } + } +} diff --git a/streams/src/test/resources/log4j.properties b/streams/src/test/resources/log4j.properties deleted file mode 100644 index 050cd679f06e7..0000000000000 --- a/streams/src/test/resources/log4j.properties +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -log4j.rootLogger=INFO, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n - -log4j.logger.kafka=ERROR -log4j.logger.state.change.logger=ERROR -log4j.logger.org.apache.kafka=ERROR -log4j.logger.org.apache.zookeeper=ERROR - -# printing out the configs takes up a huge amount of the allotted characters, -# and provides little value as we can always figure out the test configs without the logs -log4j.logger.org.apache.kafka.clients.producer.ProducerConfig=ERROR -log4j.logger.org.apache.kafka.clients.consumer.ConsumerConfig=ERROR -log4j.logger.org.apache.kafka.clients.admin.AdminClientConfig=ERROR -log4j.logger.org.apache.kafka.streams.StreamsConfig=ERROR - -# These are the only logs we will likely ever find anything useful in to debug Streams test failures -log4j.logger.org.apache.kafka.clients=INFO -log4j.logger.org.apache.kafka.streams=INFO diff --git a/streams/src/test/resources/log4j2.properties b/streams/src/test/resources/log4j2.properties new file mode 100644 index 0000000000000..8fd8a3e425035 --- /dev/null +++ b/streams/src/test/resources/log4j2.properties @@ -0,0 +1,62 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name=TestConfig +appenders=console + +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=[%d] %p %m (%c:%L)%n + +rootLogger.level=INFO +rootLogger.appenderRefs=stdout +rootLogger.appenderRef.stdout.ref=STDOUT + +loggers=kafka,org.apache.zookeeper,org.apache.kafka.clients.producer.ProducerConfig,org.apache.kafka.clients.consumer.ConsumerConfig,org.apache.kafka.clients.admin.AdminClientConfig,org.apache.kafka.clients,org.apache.kafka.streams.StreamsConfig,org.apache.kafka.streams,org.apache.kafka,state.change.logger + +logger.kafka.name=kafka +logger.kafka.level=ERROR + +logger.org.apache.zookeeper.name=org.apache.zookeeper +logger.org.apache.zookeeper.level=ERROR + +# printing out the configs takes up a huge amount of the allotted characters, +# and provides little value as we can always figure out the test configs without the logs + +logger.org.apache.kafka.clients.producer.ProducerConfig.name=org.apache.kafka.clients.producer.ProducerConfig +logger.org.apache.kafka.clients.producer.ProducerConfig.level=ERROR + +logger.org.apache.kafka.clients.consumer.ConsumerConfig.name=org.apache.kafka.clients.consumer.ConsumerConfig +logger.org.apache.kafka.clients.consumer.ConsumerConfig.level=ERROR + +logger.org.apache.kafka.clients.admin.AdminClientConfig.name=org.apache.kafka.clients.admin.AdminClientConfig +logger.org.apache.kafka.clients.admin.AdminClientConfig.level=ERROR + +logger.org.apache.kafka.streams.StreamsConfig.name=org.apache.kafka.streams.StreamsConfig +logger.org.apache.kafka.streams.StreamsConfig.level=WARN + +# These are the only logs we will likely ever find anything useful in to debug Streams test failures +logger.org.apache.kafka.clients.name=org.apache.kafka.clients +logger.org.apache.kafka.clients.level=INFO + +logger.org.apache.kafka.streams.name=org.apache.kafka.streams +logger.org.apache.kafka.streams.level=INFO + +logger.org.apache.kafka.name=org.apache.kafka +logger.org.apache.kafka.level=ERROR + +logger.state.change.logger.name=state.change.logger +logger.state.change.logger.level=ERROR + diff --git a/streams/test-utils/src/test/resources/log4j.properties b/streams/test-utils/src/test/resources/log4j.properties deleted file mode 100644 index be36f90299a77..0000000000000 --- a/streams/test-utils/src/test/resources/log4j.properties +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -log4j.rootLogger=INFO, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n - -log4j.logger.org.apache.kafka=INFO diff --git a/metadata/src/test/resources/log4j.properties b/streams/test-utils/src/test/resources/log4j2.properties similarity index 57% rename from metadata/src/test/resources/log4j.properties rename to streams/test-utils/src/test/resources/log4j2.properties index db3879386f10f..691a13e25819e 100644 --- a/metadata/src/test/resources/log4j.properties +++ b/streams/test-utils/src/test/resources/log4j2.properties @@ -12,11 +12,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -log4j.rootLogger=DEBUG, stdout +name=TestConfig +status = INFO -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n +appenders = console + +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=[%d] %p %m (%c:%L)%n + +rootLogger.level = info +rootLogger.appenderRefs = stdout +rootLogger.appenderRef.stdout.ref = STDOUT + +loggers=org.apache.kafka.raft,org.apache.kafka.snapshot + +logger.org.apache.kafka.raft.name=org.apache.kafka.raft +logger.org.apache.kafka.raft.level=ERROR + +logger.org.apache.kafka.snapshot.name=org.apache.kafka.snapshot +logger.org.apache.kafka.snapshot.level=ERROR -log4j.logger.org.apache.kafka=DEBUG -log4j.logger.org.apache.zookeeper=WARN diff --git a/tests/kafkatest/services/connect.py b/tests/kafkatest/services/connect.py index 41c33ccb9e102..19415d92b3a8d 100644 --- a/tests/kafkatest/services/connect.py +++ b/tests/kafkatest/services/connect.py @@ -38,7 +38,7 @@ class ConnectServiceBase(KafkaPathResolverMixin, Service): LOG_FILE = os.path.join(PERSISTENT_ROOT, "connect.log") STDOUT_FILE = os.path.join(PERSISTENT_ROOT, "connect.stdout") STDERR_FILE = os.path.join(PERSISTENT_ROOT, "connect.stderr") - LOG4J_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "connect-log4j.properties") + LOG4J2_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "connect-log4j2.properties") PID_FILE = os.path.join(PERSISTENT_ROOT, "connect.pid") EXTERNAL_CONFIGS_FILE = os.path.join(PERSISTENT_ROOT, "connect-external-configs.properties") CONNECT_REST_PORT = 8083 @@ -317,7 +317,7 @@ def node(self): return self.nodes[0] def start_cmd(self, node, connector_configs): - cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG_FILE + cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configurationFile=file:%s\"; " % self.LOG4J2_CONFIG_FILE heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \ self.logs["connect_heap_dump_file"]["path"] other_kafka_opts = self.security_config.kafka_opts.strip('\"') @@ -341,7 +341,7 @@ def start_node(self, node): if self.external_config_template_func: node.account.create_file(self.EXTERNAL_CONFIGS_FILE, self.external_config_template_func(node)) node.account.create_file(self.CONFIG_FILE, self.config_template_func(node)) - node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('connect_log4j.properties', log_file=self.LOG_FILE)) + node.account.create_file(self.LOG4J2_CONFIG_FILE, self.render('connect_log4j2.properties', log_file=self.LOG_FILE)) remote_connector_configs = [] for idx, template in enumerate(self.connector_config_templates): target_file = os.path.join(self.PERSISTENT_ROOT, "connect-connector-" + str(idx) + ".properties") @@ -377,7 +377,7 @@ def __init__(self, context, num_nodes, kafka, files, offsets_topic="connect-offs # connector_configs argument is intentionally ignored in distributed service. def start_cmd(self, node, connector_configs): - cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG_FILE + cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configurationFile=file:%s\"; " % self.LOG4J2_CONFIG_FILE heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \ self.logs["connect_heap_dump_file"]["path"] other_kafka_opts = self.security_config.kafka_opts.strip('\"') @@ -398,7 +398,7 @@ def start_node(self, node): if self.external_config_template_func: node.account.create_file(self.EXTERNAL_CONFIGS_FILE, self.external_config_template_func(node)) node.account.create_file(self.CONFIG_FILE, self.config_template_func(node)) - node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('connect_log4j.properties', log_file=self.LOG_FILE)) + node.account.create_file(self.LOG4J2_CONFIG_FILE, self.render('connect_log4j2.properties', log_file=self.LOG_FILE)) if self.connector_config_templates: raise DucktapeError("Config files are not valid in distributed mode, submit connectors via the REST API") diff --git a/tests/kafkatest/services/kafka/kafka.py b/tests/kafkatest/services/kafka/kafka.py index 55b5b7b87141b..d3e69648c37b8 100644 --- a/tests/kafkatest/services/kafka/kafka.py +++ b/tests/kafkatest/services/kafka/kafka.py @@ -144,7 +144,7 @@ class for details. """ PERSISTENT_ROOT = "/mnt/kafka" STDOUT_STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "server-start-stdout-stderr.log") - LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "kafka-log4j.properties") + LOG4J2_CONFIG = os.path.join(PERSISTENT_ROOT, "kafka-log4j2.properties") # Logs such as controller.log, server.log, etc all go here OPERATIONAL_LOG_DIR = os.path.join(PERSISTENT_ROOT, "kafka-operational-logs") OPERATIONAL_LOG_INFO_DIR = os.path.join(OPERATIONAL_LOG_DIR, "info") @@ -744,7 +744,7 @@ def render_configs(self, configs): def start_cmd(self, node): cmd = "export JMX_PORT=%d; " % self.jmx_port - cmd += "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG + cmd += "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configurationFile=file:%s\"; " % self.LOG4J2_CONFIG heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \ self.logs["kafka_heap_dump_file"]["path"] security_kafka_opts = self.security_config.kafka_opts.strip('\"') @@ -807,7 +807,7 @@ def start_node(self, node, timeout_sec=60): self.logger.info("kafka.properties:") self.logger.info(prop_file) node.account.create_file(KafkaService.CONFIG_FILE, prop_file) - node.account.create_file(self.LOG4J_CONFIG, self.render('log4j.properties', log_dir=KafkaService.OPERATIONAL_LOG_DIR)) + node.account.create_file(self.LOG4J2_CONFIG, self.render('log4j2.properties', log_dir=KafkaService.OPERATIONAL_LOG_DIR)) if self.quorum_info.using_kraft: # format log directories if necessary diff --git a/tests/kafkatest/services/kafka/templates/log4j.properties b/tests/kafkatest/services/kafka/templates/log4j.properties deleted file mode 100644 index 5963c39c089df..0000000000000 --- a/tests/kafkatest/services/kafka/templates/log4j.properties +++ /dev/null @@ -1,136 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -log4j.rootLogger={{ log_level|default("DEBUG") }}, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n - -# INFO level appenders -log4j.appender.kafkaInfoAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.kafkaInfoAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.kafkaInfoAppender.File={{ log_dir }}/info/server.log -log4j.appender.kafkaInfoAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.kafkaInfoAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.kafkaInfoAppender.Threshold=INFO - -log4j.appender.stateChangeInfoAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.stateChangeInfoAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.stateChangeInfoAppender.File={{ log_dir }}/info/state-change.log -log4j.appender.stateChangeInfoAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.stateChangeInfoAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.stateChangeInfoAppender.Threshold=INFO - -log4j.appender.requestInfoAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.requestInfoAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.requestInfoAppender.File={{ log_dir }}/info/kafka-request.log -log4j.appender.requestInfoAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.requestInfoAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.requestInfoAppender.Threshold=INFO - -log4j.appender.cleanerInfoAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.cleanerInfoAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.cleanerInfoAppender.File={{ log_dir }}/info/log-cleaner.log -log4j.appender.cleanerInfoAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.cleanerInfoAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.cleanerInfoAppender.Threshold=INFO - -log4j.appender.controllerInfoAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.controllerInfoAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.controllerInfoAppender.File={{ log_dir }}/info/controller.log -log4j.appender.controllerInfoAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.controllerInfoAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.controllerInfoAppender.Threshold=INFO - -log4j.appender.authorizerInfoAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.authorizerInfoAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.authorizerInfoAppender.File={{ log_dir }}/info/kafka-authorizer.log -log4j.appender.authorizerInfoAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.authorizerInfoAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.authorizerInfoAppender.Threshold=INFO - -# DEBUG level appenders -log4j.appender.kafkaDebugAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.kafkaDebugAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.kafkaDebugAppender.File={{ log_dir }}/debug/server.log -log4j.appender.kafkaDebugAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.kafkaDebugAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.kafkaDebugAppender.Threshold=DEBUG - -log4j.appender.stateChangeDebugAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.stateChangeDebugAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.stateChangeDebugAppender.File={{ log_dir }}/debug/state-change.log -log4j.appender.stateChangeDebugAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.stateChangeDebugAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.stateChangeDebugAppender.Threshold=DEBUG - -log4j.appender.requestDebugAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.requestDebugAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.requestDebugAppender.File={{ log_dir }}/debug/kafka-request.log -log4j.appender.requestDebugAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.requestDebugAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.requestDebugAppender.Threshold=DEBUG - -log4j.appender.cleanerDebugAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.cleanerDebugAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.cleanerDebugAppender.File={{ log_dir }}/debug/log-cleaner.log -log4j.appender.cleanerDebugAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.cleanerDebugAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.cleanerDebugAppender.Threshold=DEBUG - -log4j.appender.controllerDebugAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.controllerDebugAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.controllerDebugAppender.File={{ log_dir }}/debug/controller.log -log4j.appender.controllerDebugAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.controllerDebugAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.controllerDebugAppender.Threshold=DEBUG - -log4j.appender.authorizerDebugAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.authorizerDebugAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.authorizerDebugAppender.File={{ log_dir }}/debug/kafka-authorizer.log -log4j.appender.authorizerDebugAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.authorizerDebugAppender.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.authorizerDebugAppender.Threshold=DEBUG - -# Turn on all our debugging info -log4j.logger.kafka.producer.async.DefaultEventHandler={{ log_level|default("DEBUG") }}, kafkaInfoAppender, kafkaDebugAppender -log4j.logger.kafka.client.ClientUtils={{ log_level|default("DEBUG") }}, kafkaInfoAppender, kafkaDebugAppender -log4j.logger.kafka.perf={{ log_level|default("DEBUG") }}, kafkaInfoAppender, kafkaDebugAppender -log4j.logger.kafka.perf.ProducerPerformance$ProducerThread={{ log_level|default("DEBUG") }}, kafkaInfoAppender, kafkaDebugAppender -log4j.logger.kafka={{ log_level|default("DEBUG") }}, kafkaInfoAppender, kafkaDebugAppender - -log4j.logger.kafka.network.RequestChannel$={{ log_level|default("DEBUG") }}, requestInfoAppender, requestDebugAppender -log4j.additivity.kafka.network.RequestChannel$=false - -log4j.logger.kafka.network.Processor={{ log_level|default("DEBUG") }}, requestInfoAppender, requestDebugAppender -log4j.logger.kafka.server.KafkaApis={{ log_level|default("DEBUG") }}, requestInfoAppender, requestDebugAppender -log4j.additivity.kafka.server.KafkaApis=false -log4j.logger.kafka.request.logger={{ log_level|default("DEBUG") }}, requestInfoAppender, requestDebugAppender -log4j.additivity.kafka.request.logger=false - -log4j.logger.kafka.controller={{ log_level|default("DEBUG") }}, controllerInfoAppender, controllerDebugAppender -log4j.additivity.kafka.controller=false - -log4j.logger.kafka.log.LogCleaner={{ log_level|default("DEBUG") }}, cleanerInfoAppender, cleanerDebugAppender -log4j.additivity.kafka.log.LogCleaner=false - -log4j.logger.state.change.logger={{ log_level|default("DEBUG") }}, stateChangeInfoAppender, stateChangeDebugAppender -log4j.additivity.state.change.logger=false - -#Change this to debug to get the actual audit log for authorizer. -log4j.logger.kafka.authorizer.logger={{ log_level|default("DEBUG") }}, authorizerInfoAppender, authorizerDebugAppender -log4j.additivity.kafka.authorizer.logger=false - diff --git a/tests/kafkatest/services/kafka/templates/log4j2.properties b/tests/kafkatest/services/kafka/templates/log4j2.properties new file mode 100644 index 0000000000000..d182c8afa8dd0 --- /dev/null +++ b/tests/kafkatest/services/kafka/templates/log4j2.properties @@ -0,0 +1,297 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name=TestConfig + +appenders=stdout,kafkaInfoAppender,requestInfoAppender,controllerInfoAppender,cleanerInfoAppender,stateChangeInfoAppender,authorizerInfoAppender,kafkaDebugAppender,requestDebugAppender,controllerDebugAppender,cleanerDebugAppender,stateChangeDebugAppender,authorizerDebugAppender + +appender.stdout.type=Console +appender.stdout.name=STDOUT +appender.stdout.layout.type=PatternLayout +appender.stdout.layout.pattern=[%d] %p %m (%c)%n + +# INFO level appenders +appender.kafkaInfoAppender.type=RollingFile +appender.kafkaInfoAppender.name=KAFKA_INFO_APPENDER +appender.kafkaInfoAppender.fileName={{ log_dir }}/info/server.log +appender.kafkaInfoAppender.filePattern={{ log_dir }}/info/server.log.%d{yyyy-MM-dd}.log.gz +appender.kafkaInfoAppender.layout.type=PatternLayout +appender.kafkaInfoAppender.layout.pattern=[%d] %p %m (%c)%n +appender.kafkaInfoAppender.filter.threshold.type=ThresholdFilter +appender.kafkaInfoAppender.filter.threshold.level=INFO +appender.kafkaInfoAppender.policies.type=Policies +appender.kafkaInfoAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.kafkaInfoAppender.policies.time.interval=1 +appender.kafkaInfoAppender.policies.time.modulate=true +appender.kafkaInfoAppender.strategy.type=DefaultRolloverStrategy +appender.kafkaInfoAppender.strategy.max=1 + +appender.requestInfoAppender.type=RollingFile +appender.requestInfoAppender.name=REQUEST_INFO_APPENDER +appender.requestInfoAppender.fileName={{ log_dir }}/info/kafka-request.log +appender.requestInfoAppender.filePattern={{ log_dir }}/info/kafka-request.log.%d{yyyy-MM-dd}.log.gz +appender.requestInfoAppender.layout.type=PatternLayout +appender.requestInfoAppender.layout.pattern=[%d] %p %m (%c)%n +appender.requestInfoAppender.filter.threshold.type=ThresholdFilter +appender.requestInfoAppender.filter.threshold.level=INFO +appender.requestInfoAppender.policies.type=Policies +appender.requestInfoAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.requestInfoAppender.policies.time.interval=1 +appender.requestInfoAppender.policies.time.modulate=true +appender.requestInfoAppender.strategy.type=DefaultRolloverStrategy +appender.requestInfoAppender.strategy.max=1 + +appender.controllerInfoAppender.type=RollingFile +appender.controllerInfoAppender.name=CONTROLLER_INFO_APPENDER +appender.controllerInfoAppender.fileName={{ log_dir }}/info/controller.log +appender.controllerInfoAppender.filePattern={{ log_dir }}/info/controller.log.%d{yyyy-MM-dd}.log.gz +appender.controllerInfoAppender.layout.type=PatternLayout +appender.controllerInfoAppender.layout.pattern=[%d] %p %m (%c)%n +appender.controllerInfoAppender.filter.threshold.type=ThresholdFilter +appender.controllerInfoAppender.filter.threshold.level=INFO +appender.controllerInfoAppender.policies.type=Policies +appender.controllerInfoAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.controllerInfoAppender.policies.time.interval=1 +appender.controllerInfoAppender.policies.time.modulate=true +appender.controllerInfoAppender.strategy.type=DefaultRolloverStrategy +appender.controllerInfoAppender.strategy.max=1 + +appender.cleanerInfoAppender.type=RollingFile +appender.cleanerInfoAppender.name=CLEANER_INFO_APPENDER +appender.cleanerInfoAppender.fileName={{ log_dir }}/info/log-cleaner.log +appender.cleanerInfoAppender.filePattern={{ log_dir }}/info/log-cleaner.log.%d{yyyy-MM-dd}.log.gz +appender.cleanerInfoAppender.layout.type=PatternLayout +appender.cleanerInfoAppender.layout.pattern=[%d] %p %m (%c)%n +appender.cleanerInfoAppender.filter.threshold.type=ThresholdFilter +appender.cleanerInfoAppender.filter.threshold.level=INFO +appender.cleanerInfoAppender.policies.type=Policies +appender.cleanerInfoAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.cleanerInfoAppender.policies.time.interval=1 +appender.cleanerInfoAppender.policies.time.modulate=true +appender.cleanerInfoAppender.strategy.type=DefaultRolloverStrategy +appender.cleanerInfoAppender.strategy.max=1 + +appender.stateChangeInfoAppender.type=RollingFile +appender.stateChangeInfoAppender.name=STATE_CHANGE_INFO_APPENDER +appender.stateChangeInfoAppender.fileName={{ log_dir }}/info/state-change.log +appender.stateChangeInfoAppender.filePattern={{ log_dir }}/info/state-change.log.%d{yyyy-MM-dd}.log.gz +appender.stateChangeInfoAppender.layout.type=PatternLayout +appender.stateChangeInfoAppender.layout.pattern=[%d] %p %m (%c)%n +appender.stateChangeInfoAppender.filter.threshold.type=ThresholdFilter +appender.stateChangeInfoAppender.filter.threshold.level=INFO +appender.stateChangeInfoAppender.policies.type=Policies +appender.stateChangeInfoAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.stateChangeInfoAppender.policies.time.interval=1 +appender.stateChangeInfoAppender.policies.time.modulate=true +appender.stateChangeInfoAppender.strategy.type=DefaultRolloverStrategy +appender.stateChangeInfoAppender.strategy.max=1 + +appender.authorizerInfoAppender.type=RollingFile +appender.authorizerInfoAppender.name=AUTHORIZER_INFO_APPENDER +appender.authorizerInfoAppender.fileName={{ log_dir }}/info/kafka-authorizer.log +appender.authorizerInfoAppender.filePattern={{ log_dir }}/info/kafka-authorizer.log.%d{yyyy-MM-dd}.log.gz +appender.authorizerInfoAppender.layout.type=PatternLayout +appender.authorizerInfoAppender.layout.pattern=[%d] %p %m (%c)%n +appender.authorizerInfoAppender.filter.threshold.type=ThresholdFilter +appender.authorizerInfoAppender.filter.threshold.level=INFO +appender.authorizerInfoAppender.policies.type=Policies +appender.authorizerInfoAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.authorizerInfoAppender.policies.time.interval=1 +appender.authorizerInfoAppender.policies.time.modulate=true +appender.authorizerInfoAppender.strategy.type=DefaultRolloverStrategy +appender.authorizerInfoAppender.strategy.max=1 + +# DEBUG level appenders +appender.kafkaDebugAppender.type=RollingFile +appender.kafkaDebugAppender.name=KAFKA_DEBUG_APPENDER +appender.kafkaDebugAppender.fileName={{ log_dir }}/debug/server.log +appender.kafkaDebugAppender.filePattern={{ log_dir }}/debug/server.log.%d{yyyy-MM-dd}.log.gz +appender.kafkaDebugAppender.layout.type=PatternLayout +appender.kafkaDebugAppender.layout.pattern=[%d] %p %m (%c)%n +appender.kafkaDebugAppender.filter.threshold.type=ThresholdFilter +appender.kafkaDebugAppender.filter.threshold.level=DEBUG +appender.kafkaDebugAppender.policies.type=Policies +appender.kafkaDebugAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.kafkaDebugAppender.policies.time.interval=1 +appender.kafkaDebugAppender.policies.time.modulate=true +appender.kafkaDebugAppender.strategy.type=DefaultRolloverStrategy +appender.kafkaDebugAppender.strategy.max=1 + +appender.requestDebugAppender.type=RollingFile +appender.requestDebugAppender.name=REQUEST_DEBUG_APPENDER +appender.requestDebugAppender.fileName={{ log_dir }}/debug/kafka-request.log +appender.requestDebugAppender.filePattern={{ log_dir }}/debug/kafka-request.log.%d{yyyy-MM-dd}.log.gz +appender.requestDebugAppender.layout.type=PatternLayout +appender.requestDebugAppender.layout.pattern=[%d] %p %m (%c)%n +appender.requestDebugAppender.filter.threshold.type=ThresholdFilter +appender.requestDebugAppender.filter.threshold.level=DEBUG +appender.requestDebugAppender.policies.type=Policies +appender.requestDebugAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.requestDebugAppender.policies.time.interval=1 +appender.requestDebugAppender.policies.time.modulate=true +appender.requestDebugAppender.strategy.type=DefaultRolloverStrategy +appender.requestDebugAppender.strategy.max=1 + +appender.controllerDebugAppender.type=RollingFile +appender.controllerDebugAppender.name=CONTROLLER_DEBUG_APPENDER +appender.controllerDebugAppender.fileName={{ log_dir }}/debug/controller.log +appender.controllerDebugAppender.filePattern={{ log_dir }}/debug/controller.log.%d{yyyy-MM-dd}.log.gz +appender.controllerDebugAppender.layout.type=PatternLayout +appender.controllerDebugAppender.layout.pattern=[%d] %p %m (%c)%n +appender.controllerDebugAppender.filter.threshold.type=ThresholdFilter +appender.controllerDebugAppender.filter.threshold.level=DEBUG +appender.controllerDebugAppender.policies.type=Policies +appender.controllerDebugAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.controllerDebugAppender.policies.time.interval=1 +appender.controllerDebugAppender.policies.time.modulate=true +appender.controllerDebugAppender.strategy.type=DefaultRolloverStrategy +appender.controllerDebugAppender.strategy.max=1 + +appender.cleanerDebugAppender.type=RollingFile +appender.cleanerDebugAppender.name=CLEANER_DEBUG_APPENDER +appender.cleanerDebugAppender.fileName={{ log_dir }}/debug/log-cleaner.log +appender.cleanerDebugAppender.filePattern={{ log_dir }}/debug/log-cleaner.log.%d{yyyy-MM-dd}.log.gz +appender.cleanerDebugAppender.layout.type=PatternLayout +appender.cleanerDebugAppender.layout.pattern=[%d] %p %m (%c)%n +appender.cleanerDebugAppender.filter.threshold.type=ThresholdFilter +appender.cleanerDebugAppender.filter.threshold.level=DEBUG +appender.cleanerDebugAppender.policies.type=Policies +appender.cleanerDebugAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.cleanerDebugAppender.policies.time.interval=1 +appender.cleanerDebugAppender.policies.time.modulate=true +appender.cleanerDebugAppender.strategy.type=DefaultRolloverStrategy +appender.cleanerDebugAppender.strategy.max=1 + +appender.stateChangeDebugAppender.type=RollingFile +appender.stateChangeDebugAppender.name=STATE_CHANGE_DEBUG_APPENDER +appender.stateChangeDebugAppender.fileName={{ log_dir }}/debug/state-change.log +appender.stateChangeDebugAppender.filePattern={{ log_dir }}/debug/state-change.log.%d{yyyy-MM-dd}.log.gz +appender.stateChangeDebugAppender.layout.type=PatternLayout +appender.stateChangeDebugAppender.layout.pattern=[%d] %p %m (%c)%n +appender.stateChangeDebugAppender.filter.threshold.type=ThresholdFilter +appender.stateChangeDebugAppender.filter.threshold.level=DEBUG +appender.stateChangeDebugAppender.policies.type=Policies +appender.stateChangeDebugAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.stateChangeDebugAppender.policies.time.interval=1 +appender.stateChangeDebugAppender.policies.time.modulate=true +appender.stateChangeDebugAppender.strategy.type=DefaultRolloverStrategy +appender.stateChangeDebugAppender.strategy.max=1 + +appender.authorizerDebugAppender.type=RollingFile +appender.authorizerDebugAppender.name=AUTHORIZER_DEBUG_APPENDER +appender.authorizerDebugAppender.fileName={{ log_dir }}/debug/kafka-authorizer.log +appender.authorizerDebugAppender.filePattern={{ log_dir }}/debug/kafka-authorizer.log.%d{yyyy-MM-dd}.log.gz +appender.authorizerDebugAppender.layout.type=PatternLayout +appender.authorizerDebugAppender.layout.pattern=[%d] %p %m (%c)%n +appender.authorizerDebugAppender.filter.threshold.type=ThresholdFilter +appender.authorizerDebugAppender.filter.threshold.level=DEBUG +appender.authorizerDebugAppender.policies.type=Policies +appender.authorizerDebugAppender.policies.time.type=TimeBasedTriggeringPolicy +appender.authorizerDebugAppender.policies.time.interval=1 +appender.authorizerDebugAppender.policies.time.modulate=true +appender.authorizerDebugAppender.strategy.type=DefaultRolloverStrategy +appender.authorizerDebugAppender.strategy.max=1 + +rootLogger.level={{ log_level|default("DEBUG") }} +rootLogger.appenderRefs=stdout +rootLogger.appenderRef.stdout.ref=STDOUT + +# Turn on all our debugging info +loggers=state.change.logger,kafka.server.KafkaApis,kafka.request.logger,kafka.producer.async.DefaultEventHandler,kafka.perf.ProducerPerformance$ProducerThread,kafka.perf,kafka.network.RequestChannel$,kafka.network.Processor,kafka.log.LogCleaner,kafka.controller,kafka.client.ClientUtils,kafka.authorizer.logger,kafka + +logger.state.change.logger.name=state.change.logger +logger.state.change.logger.level={{ log_level|default("DEBUG") }} +logger.state.change.logger.additivity=false +logger.state.change.logger.appenderRefs=stateChangeInfoAppender,stateChangeDebugAppender +logger.state.change.logger.stateChangeInfoAppender.ref=STATE_CHANGE_INFO_APPENDER +logger.state.change.logger.stateChangeDebugAppender.ref=STATE_CHANGE_DEBUG_APPENDER + +logger.kafka.server.KafkaApis.name=kafka.server.KafkaApis +logger.kafka.server.KafkaApis.level={{ log_level|default("DEBUG") }} +logger.kafka.server.KafkaApis.additivity=false +logger.kafka.server.KafkaApis.appenderRefs=requestInfoAppender,requestDebugAppender +logger.kafka.server.KafkaApis.requestInfoAppender.ref=REQUEST_INFO_APPENDER +logger.kafka.server.KafkaApis.requestDebugAppender.ref=REQUEST_DEBUG_APPENDER + +logger.kafka.request.logger.name=kafka.request.logger +logger.kafka.request.logger.level={{ log_level|default("DEBUG") }} +logger.kafka.request.logger.additivity=false +logger.kafka.request.logger.appenderRefs=requestInfoAppender,requestDebugAppender +logger.kafka.request.logger.requestInfoAppender.ref=REQUEST_INFO_APPENDER +logger.kafka.request.logger.requestDebugAppender.ref=REQUEST_DEBUG_APPENDER + +logger.kafka.producer.async.DefaultEventHandler.name=kafka.producer.async.DefaultEventHandler +logger.kafka.producer.async.DefaultEventHandler.level={{ log_level|default("DEBUG") }} +logger.kafka.producer.async.DefaultEventHandler.appenderRefs=kafkaInfoAppender,kafkaDebugAppender +logger.kafka.producer.async.DefaultEventHandler.kafkaInfoAppender.ref=KAFKA_INFO_APPENDER +logger.kafka.producer.async.DefaultEventHandler.kafkaDebugAppender.ref=KAFKA_DEBUG_APPENDER + +logger.kafka.perf.ProducerPerformance$ProducerThread.name=kafka.perf.ProducerPerformance$ProducerThread +logger.kafka.perf.ProducerPerformance$ProducerThread.level={{ log_level|default("DEBUG") }} +logger.kafka.perf.ProducerPerformance$ProducerThread.appenderRefs=kafkaInfoAppender,kafkaDebugAppender +logger.kafka.perf.ProducerPerformance$ProducerThread.kafkaInfoAppender.ref=KAFKA_INFO_APPENDER +logger.kafka.perf.ProducerPerformance$ProducerThread.kafkaDebugAppender.ref=KAFKA_DEBUG_APPENDER + +logger.kafka.perf.name=kafka.perf +logger.kafka.perf.level={{ log_level|default("DEBUG") }} +logger.kafka.perf.appenderRefs=kafkaInfoAppender,kafkaDebugAppender +logger.kafka.perf.kafkaInfoAppender.ref=KAFKA_INFO_APPENDER +logger.kafka.perf.kafkaDebugAppender.ref=KAFKA_DEBUG_APPENDER + +logger.kafka.network.RequestChannel$.name=kafka.network.RequestChannel$ +logger.kafka.network.RequestChannel$.level={{ log_level|default("DEBUG") }} +logger.kafka.network.RequestChannel$.additivity=false +logger.kafka.network.RequestChannel$.appenderRefs=requestInfoAppender,requestDebugAppender +logger.kafka.network.RequestChannel$.requestInfoAppender.ref=REQUEST_INFO_APPENDER +logger.kafka.network.RequestChannel$.requestDebugAppender.ref=REQUEST_DEBUG_APPENDER + +logger.kafka.network.Processor.name=kafka.network.Processor +logger.kafka.network.Processor.level={{ log_level|default("DEBUG") }} +logger.kafka.network.Processor.appenderRefs=requestInfoAppender,requestDebugAppender +logger.kafka.network.Processor.requestInfoAppender.ref=REQUEST_INFO_APPENDER +logger.kafka.network.Processor.requestDebugAppender.ref=REQUEST_DEBUG_APPENDER + +logger.kafka.log.LogCleaner.name=kafka.log.LogCleaner +logger.kafka.log.LogCleaner.level={{ log_level|default("DEBUG") }} +logger.kafka.log.LogCleaner.additivity=false +logger.kafka.log.LogCleaner.appenderRefs=cleanerInfoAppender,cleanerDebugAppender +logger.kafka.log.LogCleaner.cleanerInfoAppender.ref=CLEANER_INFO_APPENDER +logger.kafka.log.LogCleaner.cleanerDebugAppender.ref=CLEANER_DEBUG_APPENDER + +logger.kafka.controller.name=kafka.controller +logger.kafka.controller.level={{ log_level|default("DEBUG") }} +logger.kafka.controller.additivity=false +logger.kafka.controller.appenderRefs=controllerInfoAppender,controllerDebugAppender +logger.kafka.controller.controllerInfoAppender.ref=CONTROLLER_INFO_APPENDER +logger.kafka.controller.controllerDebugAppender.ref=CONTROLLER_DEBUG_APPENDER + +logger.kafka.client.ClientUtils.name=kafka.client.ClientUtils +logger.kafka.client.ClientUtils.level={{ log_level|default("DEBUG") }} +logger.kafka.client.ClientUtils.appenderRefs=kafkaInfoAppender,kafkaDebugAppender +logger.kafka.client.ClientUtils.kafkaInfoAppender.ref=KAFKA_INFO_APPENDER +logger.kafka.client.ClientUtils.kafkaDebugAppender.ref=KAFKA_DEBUG_APPENDER + +# Change this to debug to get the actual audit log for authorizer. +logger.kafka.authorizer.logger.name=kafka.authorizer.logger +logger.kafka.authorizer.logger.level={{ log_level|default("DEBUG") }} +logger.kafka.authorizer.logger.additivity=false +logger.kafka.authorizer.logger.appenderRefs=authorizerInfoAppender,authorizerDebugAppender +logger.kafka.authorizer.logger.authorizerInfoAppender.ref=AUTHORIZER_INFO_APPENDER +logger.kafka.authorizer.logger.authorizerDebugAppender.ref=AUTHORIZER_DEBUG_APPENDER + +logger.kafka.name=kafka +logger.kafka.level={{ log_level|default("DEBUG") }} +logger.kafka.appenderRefs=kafkaInfoAppender,kafkaDebugAppender +logger.kafka.kafkaInfoAppender.ref=KAFKA_INFO_APPENDER +logger.kafka.kafkaDebugAppender.ref=KAFKA_DEBUG_APPENDER diff --git a/tests/kafkatest/services/streams.py b/tests/kafkatest/services/streams.py index 5dedc57916331..beb0c66adb450 100644 --- a/tests/kafkatest/services/streams.py +++ b/tests/kafkatest/services/streams.py @@ -306,7 +306,7 @@ def start_node(self, node): node.account.mkdirs(self.PERSISTENT_ROOT) prop_file = self.prop_file() node.account.create_file(self.CONFIG_FILE, prop_file) - node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('tools_log4j.properties', log_file=self.LOG_FILE)) + node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('tools_log4j2.properties', log_file=self.LOG_FILE)) self.logger.info("Starting StreamsTest process on " + str(node.account)) with node.account.monitor_log(self.STDOUT_FILE) as monitor: @@ -368,7 +368,7 @@ def start_cmd(self, node): args['version'] = self.KAFKA_STREAMS_VERSION args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node) - cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\";" \ + cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configurationFile=file:%(log4j)s\";" \ " INCLUDE_TEST_JARS=true UPGRADE_KAFKA_STREAMS_TEST_VERSION=%(version)s" \ " %(kafka_run_class)s %(streams_class_name)s" \ " %(config_file)s %(user_test_args1)s" \ diff --git a/tests/kafkatest/services/templates/connect_log4j2.properties b/tests/kafkatest/services/templates/connect_log4j2.properties new file mode 100644 index 0000000000000..4867aebabd483 --- /dev/null +++ b/tests/kafkatest/services/templates/connect_log4j2.properties @@ -0,0 +1,39 @@ +## +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name=TestConfig + +appenders=file + +appender.file.type=File +appender.file.name=FILE +appender.file.fileName={{ log_file }} +appender.file.append=true +appender.file.immediateFlush=true +appender.file.layout.type=PatternLayout +appender.file.layout.pattern=[%d] %p %m (%c)%n + +# Define the root logger with appender file +rootLogger.level={{ log_level|default("INFO`") }} +rootLogger.appenderRefs=file +rootLogger.appenderRef.file.ref=FILE + +loggers=org.apache.zookeeper,org.reflections + +logger.org.apache.zookeeper.name=org.apache.zookeeper +logger.org.apache.zookeeper.level=ERROR + +logger.org.reflections.name=org.reflections +logger.org.reflections.level=ERROR diff --git a/tests/kafkatest/tests/streams/templates/log4j_template.properties b/tests/kafkatest/services/templates/tools_log4j2.properties similarity index 64% rename from tests/kafkatest/tests/streams/templates/log4j_template.properties rename to tests/kafkatest/services/templates/tools_log4j2.properties index 3f83b4220a1f5..36436e8b56438 100644 --- a/tests/kafkatest/tests/streams/templates/log4j_template.properties +++ b/tests/kafkatest/services/templates/tools_log4j2.properties @@ -1,3 +1,4 @@ +## # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. @@ -12,20 +13,28 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +name=TestConfig + +appenders=file + +appender.file.type=File +appender.file.name=FILE +appender.file.fileName={{ log_file }} +appender.file.append=true +appender.file.immediateFlush=true +appender.file.layout.type=PatternLayout +appender.file.layout.pattern=[%d] %p %m (%c)%n # Define the root logger with appender file -log4j.rootLogger = {{ log_level|default("INFO") }}, FILE +rootLogger.level={{ log_level|default("INFO") }} +rootLogger.appenderRefs=file +rootLogger.appenderRef.file.ref=FILE {% if loggers is defined %} +loggers={{ loggers|sort(reverse=true)|join(',') }} + {% for logger, log_level in loggers.items() %} -log4j.logger.{{ logger }}={{ log_level }} +logger.{{ logger }}.name={{ logger }} +logger.{{ logger }}.level={{ log_level }} {% endfor %} {% endif %} - -log4j.appender.FILE=org.apache.log4j.FileAppender -log4j.appender.FILE.File={{ log_file }} -log4j.appender.FILE.ImmediateFlush=true -# Set the append to true -log4j.appender.FILE.Append=true -log4j.appender.FILE.layout=org.apache.log4j.PatternLayout -log4j.appender.FILE.layout.conversionPattern=[%d] %p %m (%c)%n diff --git a/tests/kafkatest/services/trogdor/templates/log4j.properties b/tests/kafkatest/services/trogdor/templates/log4j.properties deleted file mode 100644 index 252668e3dabf8..0000000000000 --- a/tests/kafkatest/services/trogdor/templates/log4j.properties +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -log4j.rootLogger=DEBUG, mylogger -log4j.logger.kafka=DEBUG -log4j.logger.org.apache.kafka=DEBUG -log4j.logger.org.eclipse=INFO -log4j.appender.mylogger=org.apache.log4j.FileAppender -log4j.appender.mylogger.File={{ log_path }} -log4j.appender.mylogger.layout=org.apache.log4j.PatternLayout -log4j.appender.mylogger.layout.ConversionPattern=[%d] %p %m (%c)%n diff --git a/tests/kafkatest/services/trogdor/templates/log4j2.properties b/tests/kafkatest/services/trogdor/templates/log4j2.properties new file mode 100644 index 0000000000000..32079002b14ea --- /dev/null +++ b/tests/kafkatest/services/trogdor/templates/log4j2.properties @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name=TestConfig + +appenders=file + +appender.file.type=File +appender.file.name=FILE +appender.file.fileName={{ log_path }} +appender.file.layout.type=PatternLayout +appender.file.layout.pattern=[%d] %p %m (%c)%n + +# Define the root logger with appender file +rootLogger.level=DEBUG +rootLogger.appenderRefs=file +rootLogger.appenderRef.file.ref=FILE + +loggers=kafka,org.apache.kafka,org.eclipse + +logger.kafka.name=kafka +logger.kafka.level=DEBUG + +logger.org.apache.kafka.name=org.apache.kafka +logger.org.apache.kafka.level=DEBUG + +logger.org.eclipse.name=org.eclipse +logger.org.eclipse.level=INFO diff --git a/tests/kafkatest/services/trogdor/trogdor.py b/tests/kafkatest/services/trogdor/trogdor.py index bd18bddb6268e..a42775bb75c2f 100644 --- a/tests/kafkatest/services/trogdor/trogdor.py +++ b/tests/kafkatest/services/trogdor/trogdor.py @@ -34,8 +34,8 @@ class TrogdorService(KafkaPathResolverMixin, Service): AGENT_STDOUT_STDERR The path where we store the agents's stdout/stderr output. COORDINATOR_LOG The path where we store the coordinator's log4j output. AGENT_LOG The path where we store the agent's log4j output. - AGENT_LOG4J_PROPERTIES The path to the agent log4j.properties file for log config. - COORDINATOR_LOG4J_PROPERTIES The path to the coordinator log4j.properties file for log config. + AGENT_LOG4J2_PROPERTIES The path to the agent log4j2.properties file for log config. + COORDINATOR_LOG4J2_PROPERTIES The path to the coordinator log4j2.properties file for log config. CONFIG_PATH The path to the trogdor configuration file. DEFAULT_AGENT_PORT The default port to use for trogdor_agent daemons. DEFAULT_COORDINATOR_PORT The default port to use for trogdor_coordinator daemons. @@ -48,8 +48,8 @@ class TrogdorService(KafkaPathResolverMixin, Service): AGENT_STDOUT_STDERR = os.path.join(PERSISTENT_ROOT, "trogdor-agent-stdout-stderr.log") COORDINATOR_LOG = os.path.join(PERSISTENT_ROOT, "trogdor-coordinator.log") AGENT_LOG = os.path.join(PERSISTENT_ROOT, "trogdor-agent.log") - COORDINATOR_LOG4J_PROPERTIES = os.path.join(PERSISTENT_ROOT, "trogdor-coordinator-log4j.properties") - AGENT_LOG4J_PROPERTIES = os.path.join(PERSISTENT_ROOT, "trogdor-agent-log4j.properties") + COORDINATOR_LOG4J2_PROPERTIES = os.path.join(PERSISTENT_ROOT, "trogdor-coordinator-log4j2.properties") + AGENT_LOG4J2_PROPERTIES = os.path.join(PERSISTENT_ROOT, "trogdor-agent-log4j2.properties") CONFIG_PATH = os.path.join(PERSISTENT_ROOT, "trogdor.conf") DEFAULT_AGENT_PORT=8888 DEFAULT_COORDINATOR_PORT=8889 @@ -141,26 +141,26 @@ def start_node(self, node): self._start_agent_node(node) def _start_coordinator_node(self, node): - node.account.create_file(TrogdorService.COORDINATOR_LOG4J_PROPERTIES, - self.render('log4j.properties', + node.account.create_file(TrogdorService.COORDINATOR_LOG4J2_PROPERTIES, + self.render('log4j2.properties', log_path=TrogdorService.COORDINATOR_LOG)) self._start_trogdor_daemon("coordinator", TrogdorService.COORDINATOR_STDOUT_STDERR, - TrogdorService.COORDINATOR_LOG4J_PROPERTIES, + TrogdorService.COORDINATOR_LOG4J2_PROPERTIES, TrogdorService.COORDINATOR_LOG, node) self.logger.info("Started trogdor coordinator on %s." % node.name) def _start_agent_node(self, node): - node.account.create_file(TrogdorService.AGENT_LOG4J_PROPERTIES, - self.render('log4j.properties', + node.account.create_file(TrogdorService.AGENT_LOG4J2_PROPERTIES, + self.render('log4j2.properties', log_path=TrogdorService.AGENT_LOG)) self._start_trogdor_daemon("agent", TrogdorService.AGENT_STDOUT_STDERR, - TrogdorService.AGENT_LOG4J_PROPERTIES, + TrogdorService.AGENT_LOG4J2_PROPERTIES, TrogdorService.AGENT_LOG, node) self.logger.info("Started trogdor agent on %s." % node.name) def _start_trogdor_daemon(self, daemon_name, stdout_stderr_capture_path, - log4j_properties_path, log_path, node): - cmd = "export KAFKA_LOG4J_OPTS='-Dlog4j.configuration=file:%s'; " % log4j_properties_path + log4j2_properties_path, log_path, node): + cmd = "export KAFKA_LOG4J_OPTS='-Dlog4j.configurationFile=file:%s'; " % log4j2_properties_path cmd += "%s %s --%s.config %s --node-name %s 1>> %s 2>> %s &" % \ (self.path.script("trogdor.sh", node), daemon_name, diff --git a/tests/kafkatest/tests/streams/streams_relational_smoke_test.py b/tests/kafkatest/tests/streams/streams_relational_smoke_test.py index fe10a29369e91..836bba8ba8fa7 100644 --- a/tests/kafkatest/tests/streams/streams_relational_smoke_test.py +++ b/tests/kafkatest/tests/streams/streams_relational_smoke_test.py @@ -32,10 +32,10 @@ def __init__(self, test_context, kafka, mode, nodeId, processing_guarantee): self.mode = mode self.nodeId = nodeId self.processing_guarantee = processing_guarantee - self.log4j_template = 'log4j_template.properties' + self.log4j_template = 'log4j2_template.properties' def start_cmd(self, node): - return "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \ + return "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configurationFile=file:%(log4j)s\"; " \ "INCLUDE_TEST_JARS=true %(kafka_run_class)s org.apache.kafka.streams.tests.RelationalSmokeTest " \ " %(mode)s %(kafka)s %(nodeId)s %(processing_guarantee)s %(state_dir)s" \ " & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % { @@ -54,7 +54,7 @@ def start_cmd(self, node): def start_node(self, node): node.account.mkdirs(self.PERSISTENT_ROOT) node.account.create_file(self.LOG4J_CONFIG_FILE, - self.render("log4j_template.properties", log_file=self.LOG_FILE)) + self.render("log4j2_template.properties", log_file=self.LOG_FILE)) self.logger.info("Starting process on " + str(node.account)) node.account.ssh(self.start_cmd(node)) diff --git a/tests/kafkatest/tests/streams/templates/log4j2_template.properties b/tests/kafkatest/tests/streams/templates/log4j2_template.properties new file mode 100644 index 0000000000000..6b30fdc84a469 --- /dev/null +++ b/tests/kafkatest/tests/streams/templates/log4j2_template.properties @@ -0,0 +1,40 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name=TestConfig + +appenders=file + +appender.file.type=File +appender.file.name=FILE +appender.file.fileName={{ log_file }} +# Set the append to true +appender.file.append=true +appender.file.immediateFlush=true +appender.file.layout.type=PatternLayout +appender.file.layout.pattern=[%d] %p %m (%c)%n + +# Define the root logger with appender file +rootLogger.level={{ log_level | default("INFO") }} +rootLogger.appenderRefs=file +rootLogger.appenderRef.file.ref=FILE + +{% if loggers is defined %} +loggers={{ loggers | sort(reverse=true) | join(',') }} + +{% for logger, log_level in loggers.iteritems() %} +logger.{{ logger }}.name={{ logger }} +logger.{{ logger }}.level={{ log_level }} +{% endfor %} +{% endif %} From 2db1e28f0a148b3c2cf01ec1a40bfe3793469df6 Mon Sep 17 00:00:00 2001 From: Lee Dongjin Date: Thu, 27 Jan 2022 20:12:29 +0900 Subject: [PATCH 2/9] Improve documentation on Level#toLevel semantics --- .../main/scala/kafka/utils/Log4jController.scala | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/core/src/main/scala/kafka/utils/Log4jController.scala b/core/src/main/scala/kafka/utils/Log4jController.scala index af18a67f5f9a1..6d64757f56029 100755 --- a/core/src/main/scala/kafka/utils/Log4jController.scala +++ b/core/src/main/scala/kafka/utils/Log4jController.scala @@ -43,9 +43,9 @@ object Log4jController { */ /** - * Returns a map of the log4j loggers and their assigned log level. - * If a logger does not have a log level assigned, we return the log level of the first ancestor with a level configured. - */ + * Returns a map of the log4j loggers and their assigned log level. + * If a logger does not have a log level assigned, we return the log level of the first ancestor with a level configured. + */ def loggers: Map[String, String] = { val logContext = LogManager.getContext(false).asInstanceOf[LoggerContext] val rootLoggerLevel = logContext.getRootLogger.getLevel.toString @@ -69,10 +69,13 @@ object Log4jController { } /** - * Sets the log level of a particular logger - */ + * Sets the log level of a particular logger. If the given logLevel is not an available log4j level + * (i.e., one of OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL) it falls back to DEBUG. + * + * @see [[Level.toLevel]] + */ def logLevel(loggerName: String, logLevel: String): Boolean = { - val level = Level.toLevel(logLevel.toUpperCase(Locale.ROOT), null) + val level = Level.toLevel(logLevel.toUpperCase(Locale.ROOT)) if (loggerName == ROOT_LOGGER) { Configurator.setAllLevels(LogManager.ROOT_LOGGER_NAME, level) From cb6e861ab1225fb5b81878359af9eddfdd534f08 Mon Sep 17 00:00:00 2001 From: Lee Dongjin Date: Thu, 27 Jan 2022 20:14:13 +0900 Subject: [PATCH 3/9] Make LogCaptureContext randomly-generated --- .../unit/kafka/admin/AclCommandTest.scala | 4 +--- .../ControllerIntegrationTest.scala | 14 +++++------- .../unit/kafka/utils/LogCaptureContext.scala | 7 +++--- .../kafka/streams/KafkaStreamsTest.java | 6 ++--- .../kafka/streams/StreamsConfigTest.java | 6 +---- .../AdjustStreamThreadCountTest.java | 12 ++++------ .../internals/KGroupedStreamImplTest.java | 4 ++-- .../internals/KStreamKStreamJoinTest.java | 3 +-- .../internals/KStreamKTableJoinTest.java | 6 ++--- ...amSessionWindowAggregateProcessorTest.java | 11 +++------- .../KStreamSlidingWindowAggregateTest.java | 4 ++-- .../internals/KStreamWindowAggregateTest.java | 9 +++----- .../internals/KTableKTableInnerJoinTest.java | 3 +-- .../internals/KTableKTableLeftJoinTest.java | 3 +-- .../internals/KTableKTableOuterJoinTest.java | 3 +-- .../internals/KTableKTableRightJoinTest.java | 4 +--- .../kstream/internals/KTableSourceTest.java | 8 ++----- .../internals/GlobalStateManagerImplTest.java | 3 +-- .../internals/InternalTopicManagerTest.java | 3 +-- .../internals/PartitionGroupTest.java | 21 ++++++------------ .../internals/ProcessorStateManagerTest.java | 3 +-- .../internals/RecordCollectorTest.java | 3 +-- .../internals/StateDirectoryTest.java | 22 ++++++------------- .../internals/StoreChangelogReaderTest.java | 3 +-- .../processor/internals/StreamThreadTest.java | 6 ++--- .../processor/internals/TaskManagerTest.java | 5 ++--- ...lSchemaRocksDBSegmentedBytesStoreTest.java | 3 +-- .../internals/AbstractKeyValueStoreTest.java | 12 ++++------ ...bstractRocksDBSegmentedBytesStoreTest.java | 3 +-- .../AbstractSessionBytesStoreTest.java | 6 ++--- .../AbstractWindowBytesStoreTest.java | 6 ++--- .../CachingInMemorySessionStoreTest.java | 6 ++--- .../CachingPersistentSessionStoreTest.java | 6 ++--- .../CachingPersistentWindowStoreTest.java | 6 ++--- ...OptionsColumnFamilyOptionsAdapterTest.java | 6 ++--- .../RocksDBTimestampedStoreTest.java | 9 +++----- .../apache/kafka/test/LogCaptureContext.java | 14 ++++++------ 37 files changed, 88 insertions(+), 165 deletions(-) diff --git a/core/src/test/scala/unit/kafka/admin/AclCommandTest.scala b/core/src/test/scala/unit/kafka/admin/AclCommandTest.scala index 85e59877a4f69..be5c629897c73 100644 --- a/core/src/test/scala/unit/kafka/admin/AclCommandTest.scala +++ b/core/src/test/scala/unit/kafka/admin/AclCommandTest.scala @@ -202,9 +202,7 @@ class AclCommandTest extends QuorumTestHarness with Logging { createServer(Some(adminClientConfig)) - val logCaptureContext = LogCaptureContext( - classOf[AppInfoParser].getName, Map(classOf[AppInfoParser].getName -> "WARN") - ) + val logCaptureContext = LogCaptureContext(Map(classOf[AppInfoParser].getName -> "WARN")) try { testAclCli(adminArgs) val warning = logCaptureContext.getMessages.find(e => e.getLevel == Level.WARN && diff --git a/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala b/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala index 21607b8811c17..929e0995f43c9 100644 --- a/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala @@ -590,7 +590,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { testControllerMove(() => { val adminZkClient = new AdminZkClient(zkClient) adminZkClient.createTopicWithAssignment(tp.topic, config = new Properties(), assignment) - }, s"classOf[ControllerIntegrationTest]#testControllerMoveOnTopicCreation") + }) } @Test @@ -604,7 +604,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { testControllerMove(() => { val adminZkClient = new AdminZkClient(zkClient) adminZkClient.deleteTopic(tp.topic()) - }, s"classOf[ControllerIntegrationTest]#testControllerMoveOnTopicDeletion") + }) } @Test @@ -615,8 +615,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { TestUtils.createTopic(zkClient, tp.topic(), assignment, servers) testControllerMove( - () => zkClient.createPreferredReplicaElection(Set(tp)), - s"classOf[ControllerIntegrationTest]#testControllerMoveOnPreferredReplicaElection") + () => zkClient.createPreferredReplicaElection(Set(tp))) } @Test @@ -629,8 +628,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { val reassignment = Map(tp -> Seq(0)) testControllerMove( - () => zkClient.createPartitionReassignment(reassignment), - s"classOf[ControllerIntegrationTest]#testControllerMoveOnPartitionReassignment") + () => zkClient.createPartitionReassignment(reassignment)) } @Test @@ -1528,9 +1526,9 @@ class ControllerIntegrationTest extends QuorumTestHarness { assertTrue(servers.head.kafkaController.controllerContext.topicNames.get(topicIdAfterUpgrade.get).isEmpty) } - private def testControllerMove(fun: () => Unit, contextName: String): Unit = { + private def testControllerMove(fun: () => Unit): Unit = { val controller = getController().kafkaController - val logCaptureContext = LogCaptureContext(contextName, scala.Predef.Map(classOf[KafkaController].getName -> "INFO")) + val logCaptureContext = LogCaptureContext(scala.Predef.Map(classOf[KafkaController].getName -> "INFO")) logCaptureContext.setLatch(1) try { diff --git a/core/src/test/scala/unit/kafka/utils/LogCaptureContext.scala b/core/src/test/scala/unit/kafka/utils/LogCaptureContext.scala index f66588ea3e3a4..144023ace0718 100644 --- a/core/src/test/scala/unit/kafka/utils/LogCaptureContext.scala +++ b/core/src/test/scala/unit/kafka/utils/LogCaptureContext.scala @@ -16,8 +16,9 @@ */ package unit.kafka.utils -import java.util.concurrent.{CountDownLatch, TimeUnit} +import org.apache.kafka.test.TestUtils +import java.util.concurrent.{CountDownLatch, TimeUnit} import org.apache.logging.log4j.Level import org.apache.logging.log4j.core.{LogEvent, LoggerContext} import org.apache.logging.log4j.test.appender.ListAppender @@ -52,9 +53,9 @@ class LogCaptureContext(listAppender: ListAppender, prevLevelMap: Map[String, Le } object LogCaptureContext { - def apply(name: String, levelMap: Map[String, String] = Map()): LogCaptureContext = { + def apply(levelMap: Map[String, String] = Map()): LogCaptureContext = { val loggerContext = LoggerContext.getContext(false) - val listAppender = ListAppender.createAppender(name, + val listAppender = ListAppender.createAppender("logger-context-" + TestUtils.randomString(8), false, false, null, null) listAppender.start loggerContext.getConfiguration.addAppender(listAppender) diff --git a/streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java b/streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java index da141fa17ed37..926c6fe17a310 100644 --- a/streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/KafkaStreamsTest.java @@ -476,8 +476,7 @@ public void shouldCleanupResourcesOnCloseWithoutPreviousStart() throws Exception final StreamsBuilder builder = getBuilderWithSource(); builder.globalTable("anyTopic"); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldCleanupResourcesOnCloseWithoutPreviousStart"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) { streams.close(); @@ -537,8 +536,7 @@ public void testStateGlobalThreadClose() throws Exception { final StreamsBuilder builder = getBuilderWithSource(); builder.globalTable("anyTopic"); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#testStateGlobalThreadClose"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) { streams.start(); waitForCondition( diff --git a/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java b/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java index c4998ba98ec2f..62cc2efc6ce5b 100644 --- a/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/StreamsConfigTest.java @@ -1064,7 +1064,6 @@ public void shouldThrowConfigExceptionWhenStoreTypeConfigNotValueInRange() { @Test public void shouldLogWarningWhenEosAlphaIsUsed() throws InterruptedException { try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldLogWarningWhenEosAlphaIsUsed", Collections.singletonMap(StreamsConfig.class.getName(), "DEBUG") )) { logCaptureContext.setLatch(3); @@ -1088,7 +1087,6 @@ public void shouldLogWarningWhenEosAlphaIsUsed() throws InterruptedException { @Test public void shouldLogWarningWhenEosBetaIsUsed() throws InterruptedException { try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldLogWarningWhenEosBetaIsUsed", Collections.singletonMap(StreamsConfig.class.getName(), "DEBUG") )) { logCaptureContext.setLatch(3); @@ -1109,10 +1107,8 @@ public void shouldLogWarningWhenEosBetaIsUsed() throws InterruptedException { @SuppressWarnings("deprecation") @Test public void shouldLogWarningWhenRetriesIsUsed() throws InterruptedException { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogWarningWhenRetriesIsUsed")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(1); - props.put(StreamsConfig.RETRIES_CONFIG, 0); new StreamsConfig(props); diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java index 747afaeeb3221..239adbbf28d7d 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java @@ -379,8 +379,7 @@ public void shouldResizeCacheAfterThreadRemovalTimesOut() throws InterruptedExce props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2); props.put(StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG, totalCacheBytes); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldResizeCacheAfterThreadRemovalTimesOut"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) { logCaptureContext.setLatch(20); @@ -405,8 +404,7 @@ public void shouldResizeMaxBufferAfterThreadRemovalTimesOut() throws Interrupted props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 2); props.put(StreamsConfig.INPUT_BUFFER_MAX_BYTES_CONFIG, maxBufferBytes); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldResizeMaxBufferAfterThreadRemovalTimesOut"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) { logCaptureContext.setLatch(20); addStreamStateChangeListener(kafkaStreams); @@ -458,8 +456,7 @@ public void close() { } }); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldResizeCacheAfterThreadReplacement"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) { logCaptureContext.setLatch(20); @@ -515,8 +512,7 @@ public void close() { } }); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldResizeMaxBufferAfterThreadReplacement"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), props)) { logCaptureContext.setLatch(20); addStreamStateChangeListener(kafkaStreams); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImplTest.java index 39fdb37a868f5..6964831fb6ec7 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KGroupedStreamImplTest.java @@ -582,7 +582,7 @@ public void shouldLogAndMeasureSkipsInAggregate() { groupedStream.count(Materialized.>as("count").withKeySerde(Serdes.String())); try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeasureSkipsInAggregate"); + LogCaptureContext.create(); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); @@ -632,7 +632,7 @@ public void shouldLogAndMeasureSkipsInReduce() { ); try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeasureSkipsInReduce"); + LogCaptureContext.create(); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java index f18198d72adcd..1064aeb5caa69 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java @@ -89,8 +89,7 @@ public void shouldLogAndMeterOnSkippedRecordsWithNullValueWithBuiltInMetricsVers props.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, StreamsConfig.METRICS_LATEST); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogAndMeterOnSkippedRecordsWithNullValueWithBuiltInMetricsVersionLatest")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(6); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoinTest.java index 1e12b006dbc8d..e4320fff368dd 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoinTest.java @@ -228,8 +228,7 @@ public void shouldClearTableEntryOnNullValueUpdates() { @Test public void shouldLogAndMeterWhenSkippingNullLeftKey() { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogAndMeterWhenSkippingNullLeftKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { final TestInputTopic inputTopic = driver.createInputTopic(streamTopic, new IntegerSerializer(), new StringSerializer()); inputTopic.pipeInput(null, "A"); @@ -244,8 +243,7 @@ public void shouldLogAndMeterWhenSkippingNullLeftKey() { @Test public void shouldLogAndMeterWhenSkippingNullLeftValue() throws InterruptedException { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogAndMeterWhenSkippingNullLeftValue")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(5); final TestInputTopic inputTopic = driver.createInputTopic(streamTopic, new IntegerSerializer(), new StringSerializer()); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregateProcessorTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregateProcessorTest.java index fd173f8ec73e5..1c8e38bb94804 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregateProcessorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSessionWindowAggregateProcessorTest.java @@ -371,8 +371,7 @@ public void shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics() { new ProcessorRecordContext(-1, -2, -3, "topic", new RecordHeaders()) ); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { processor.process(new Record<>(null, "1", 0L)); @@ -412,9 +411,7 @@ public void shouldLogAndMeterWhenSkippingLateRecordWithZeroGrace() { context.setRecordContext(new ProcessorRecordContext(11, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("dummy", "dummy", 11L)); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingLateRecordWithZeroGrace")) { - + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { // record is late context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("Late1", "1", 0L)); @@ -465,9 +462,7 @@ public void shouldLogAndMeterWhenSkippingLateRecordWithNonzeroGrace() { ).get(); processor.init(context); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingLateRecordWithNonzeroGrace")) { - + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { // dummy record to establish stream time = 0 context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders())); processor.process(new Record<>("dummy", "dummy", 0L)); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java index 6269bba4b2816..0f092e585584c 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java @@ -1012,7 +1012,7 @@ public void shouldLogAndMeterWhenSkippingNullKey() throws InterruptedException { .aggregate(MockInitializer.STRING_INIT, MockAggregator.toStringInstance("+"), Materialized.>as("topic1-Canonicalized").withValueSerde(Serdes.String())); props.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, builtInMetricsVersion); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingNullKey"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { logCaptureContext.setLatch(1); final TestInputTopic inputTopic = @@ -1046,7 +1046,7 @@ public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() throws Interrupt props.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, builtInMetricsVersion); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingNullKey"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { logCaptureContext.setLatch(1); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java index d2f9f58d56d5f..e8d21aa1e5bb5 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java @@ -278,8 +278,7 @@ public void shouldLogAndMeterWhenSkippingNullKey() { Materialized.>as("topic1-Canonicalized").withValueSerde(Serdes.String()) ); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingNullKey"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic = @@ -311,8 +310,7 @@ public void shouldLogAndMeterWhenSkippingExpiredWindow() { .map((key, value) -> new KeyValue<>(key.toString(), value)) .to("output"); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingExpiredWindow"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic = @@ -371,8 +369,7 @@ public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() { .map((key, value) -> new KeyValue<>(key.toString(), value)) .to("output"); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterWhenSkippingExpiredWindowByGrace"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic = diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java index 3154fa665bd34..e7a3a1026fcee 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java @@ -261,8 +261,7 @@ public void shouldLogAndMeterSkippedRecordsDueToNullLeftKey() { context.setRecordMetadata("left", -1, -2); join.init(context); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeterSkippedRecordsDueToNullLeftKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { join.process(new Record<>(null, new Change<>("new", "old"), 0)); assertThat( diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java index 61c9fbcc4bb75..5522a4e4ef603 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java @@ -527,8 +527,7 @@ public void shouldLogAndMeterSkippedRecordsDueToNullLeftKey() { context.setRecordMetadata("left", -1, -2); join.init(context); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogAndMeterSkippedRecordsDueToNullLeftKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { join.process(new Record<>(null, new Change<>("new", "old"), 0)); assertThat( diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableOuterJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableOuterJoinTest.java index d71bf8e65acee..f6d0708c0d552 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableOuterJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableOuterJoinTest.java @@ -418,8 +418,7 @@ public void shouldLogAndMeterSkippedRecordsDueToNullLeftKey() { context.setRecordMetadata("left", -1, -2); join.init(context); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogAndMeterSkippedRecordsDueToNullLeftKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { join.process(new Record<>(null, new Change<>("new", "old"), 0)); assertThat( diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableRightJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableRightJoinTest.java index 5efba9ea1f993..c7b6622d9db69 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableRightJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableRightJoinTest.java @@ -52,9 +52,7 @@ public void shouldLogAndMeterSkippedRecordsDueToNullLeftKeyWithBuiltInMetricsVer context.setRecordMetadata("left", -1, -2); join.init(context); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + - "#shouldLogAndMeterSkippedRecordsDueToNullLeftKeyWithBuiltInMetricsVersionLatest")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { join.process(new Record<>(null, new Change<>("new", "old"), 0)); assertThat( diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableSourceTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableSourceTest.java index d48c931e821d7..9464a47095ce5 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableSourceTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableSourceTest.java @@ -135,9 +135,7 @@ public void kTableShouldLogAndMeterOnSkippedRecords() { final String topic = "topic"; builder.table(topic, stringConsumed); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#kTableShouldLogAndMeterOnSkippedRecords", - Collections.singletonMap(KTableSource.class.getName(), "WARN")); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(Collections.singletonMap(KTableSource.class.getName(), "WARN")); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic = @@ -163,9 +161,7 @@ public void kTableShouldLogOnOutOfOrder() { final String topic = "topic"; builder.table(topic, stringConsumed, Materialized.as("store")); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#kTableShouldLogOnOutOfOrder", - Collections.singletonMap(KTableSource.class.getName(), "WARN")); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(Collections.singletonMap(KTableSource.class.getName(), "WARN")); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic = diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java index da7059ec9982d..c2e8fce5729ae 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java @@ -170,8 +170,7 @@ public void shouldLogWarningMessageWhenIOExceptionInCheckPoint() throws IOExcept file.createNewFile(); file.setWritable(false); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogWarningMessageWhenIOExceptionInCheckPoint")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); stateManager.checkpoint(); logCaptureContext.await(); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java index 6bfce8316f831..3effede108722 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java @@ -781,8 +781,7 @@ public void shouldExhaustRetriesOnTimeoutExceptionForMakeReady() { @Test public void shouldLogWhenTopicNotFoundAndNotThrowException() throws InterruptedException { try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldLogWhenTopicNotFoundAndNotThrowException", - Collections.singletonMap(InternalTopicManager.class.getName(), "DEBUG"))) { + Collections.singletonMap(InternalTopicManager.class.getName(), "DEBUG"))) { logCaptureContext.setLatch(4); mockAdminClient.addTopic( diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/PartitionGroupTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/PartitionGroupTest.java index 93d39da9c43ff..c95e735edf932 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/PartitionGroupTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/PartitionGroupTest.java @@ -571,8 +571,7 @@ public void shouldNeverWaitIfIdlingIsDisabled() { assertThat(group.allPartitionsBufferedLocally(), is(false)); try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldNeverWaitIfIdlingIsDisabled", - Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { + LogCaptureContext.create(Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(0L), is(true)); assertThat( logCaptureContext.getMessages(), @@ -611,8 +610,7 @@ public void shouldBeReadyIfAllPartitionsAreBuffered() { assertThat(group.allPartitionsBufferedLocally(), is(true)); try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldBeReadyIfAllPartitionsAreBuffered", - Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { + LogCaptureContext.create(Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(0L), is(true)); assertThat( logCaptureContext.getMessages(), @@ -644,8 +642,7 @@ public void shouldWaitForFetchesWhenMetadataIsIncomplete() { assertThat(group.allPartitionsBufferedLocally(), is(false)); try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldWaitForFetchesWhenMetadataIsIncomplete", - Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { + LogCaptureContext.create(Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(0L), is(false)); assertThat( logCaptureContext.getMessages(), @@ -682,8 +679,7 @@ public void shouldWaitForPollWhenLagIsNonzero() { assertThat(group.allPartitionsBufferedLocally(), is(false)); try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldWaitForPollWhenLagIsNonzero", - Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { + LogCaptureContext.create(Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(0L), is(false)); assertThat( logCaptureContext.getMessages(), @@ -715,8 +711,7 @@ public void shouldIdleAsSpecifiedWhenLagIsZero() { assertThat(group.allPartitionsBufferedLocally(), is(false)); try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldIdleAsSpecifiedWhenLagIsZero.1", - Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { + LogCaptureContext.create(Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(0L), is(false)); assertThat( logCaptureContext.getMessages(), @@ -725,8 +720,7 @@ public void shouldIdleAsSpecifiedWhenLagIsZero() { } try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldIdleAsSpecifiedWhenLagIsZero.2", - Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { + LogCaptureContext.create(Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(1L), is(true)); assertThat( logCaptureContext.getMessages(), @@ -740,8 +734,7 @@ public void shouldIdleAsSpecifiedWhenLagIsZero() { } try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldIdleAsSpecifiedWhenLagIsZero.3", - Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { + LogCaptureContext.create(Collections.singletonMap(PartitionGroup.class.getName(), "TRACE"))) { assertThat(group.readyToProcess(2L), is(true)); assertThat( logCaptureContext.getMessages(), diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java index f8bc4a605465c..c0872fdf663ab 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java @@ -788,8 +788,7 @@ public void shouldLogAWarningIfCheckpointThrowsAnIOException() throws Interrupte stateMgr.registerStore(persistentStore, persistentStore.stateRestoreCallback, null); stateDirectory.clean(); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldLogAWarningIfCheckpointThrowsAnIOException")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(4); stateMgr.updateChangelogOffsets(singletonMap(persistentStorePartition, 10L)); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java index 9dd8ccfad83d3..833845d8dcee8 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java @@ -730,8 +730,7 @@ public void shouldThrowStreamsExceptionOnSubsequentCloseIfFatalEvenWithContinueE @Test public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler() throws InterruptedException { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(1); final RecordCollector collector = new RecordCollectorImpl( diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java index 6165b70c59869..d95194e5b9d36 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java @@ -341,8 +341,7 @@ public void shouldCleanupObsoleteTaskDirectoriesAndDeleteTheDirectoryItself() th assertEquals(1, directory.listAllTaskDirectories().size()); assertEquals(1, directory.listNonEmptyTaskDirectories().size()); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldCleanupObsoleteStateDirectoriesOnlyOnce.0")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { time.sleep(5000); logCaptureContext.setLatch(1); directory.cleanRemovedTasks(0); @@ -498,8 +497,7 @@ public void shouldCleanupAllTaskDirectoriesIncludingGlobalOne() { @Test public void shouldNotCreateBaseDirectory() throws IOException { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldNotCreateBaseDirectory")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { initializeStateDirectory(false, false); assertThat(stateDir.exists(), is(false)); assertThat(appDir.exists(), is(false)); @@ -579,8 +577,7 @@ public void shouldNotDeleteAppDirWhenCleanUpIfNotEmpty() throws IOException { final File dummyFile = new File(appDir, "dummy"); assertTrue(dummyFile.createNewFile()); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldNotDeleteAppDirWhenCleanUpIfNotEmpty")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(4); // call StateDirectory#clean directory.clean(); @@ -599,8 +596,7 @@ public void shouldLogManualUserCallMessage() { assertThat(testFile.mkdir(), is(true)); assertThat(directory.directoryForTaskIsEmpty(taskId), is(false)); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogManualUserCallMessage")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); directory.clean(); @@ -619,8 +615,7 @@ public void shouldLogStateDirCleanerMessage() { assertThat(testFile.mkdir(), is(true)); assertThat(directory.directoryForTaskIsEmpty(taskId), is(false)); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogStateDirCleanerMessage")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); final long cleanupDelayMs = 0; time.sleep(5000); @@ -631,8 +626,7 @@ public void shouldLogStateDirCleanerMessage() { @Test public void shouldLogTempDirMessage() { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogTempDirMessage")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(4); new StateDirectory( @@ -769,9 +763,7 @@ public void shouldCleanupObsoleteTaskDirectoriesInNamedTopologiesAndDeleteThePar assertThat(directory.listAllTaskDirectories().size(), is(1)); assertThat(directory.listNonEmptyTaskDirectories().size(), is(1)); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + - "#shouldCleanupObsoleteTaskDirectoriesInNamedTopologiesAndDeleteTheParentDirectories")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); time.sleep(5000); directory.cleanRemovedTasks(0); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java index 7d14e6c1fe16a..c0b1ca0cc17b1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StoreChangelogReaderTest.java @@ -1129,8 +1129,7 @@ public void shouldThrowIfRestoreCallbackThrows() { @Test public void shouldNotThrowOnUnknownRevokedPartition() throws InterruptedException { try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldNotThrowOnUnknownRevokedPartition", - Collections.singletonMap(StoreChangelogReader.class.getName(), "DEBUG"))) { + Collections.singletonMap(StoreChangelogReader.class.getName(), "DEBUG"))) { logCaptureContext.setLatch(2); changelogReader.unregister(Collections.singletonList(new TopicPartition("unknown", 0))); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java index dd454856dd678..67d31b657e6d5 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamThreadTest.java @@ -2117,8 +2117,7 @@ public void shouldLogAndRecordSkippedMetricForDeserializationException() { new RecordHeaders(), Optional.empty())); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndRecordSkippedMetricForDeserializationException")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { thread.runOnce(); final List strings = logCaptureContext.getMessages(); @@ -2633,8 +2632,7 @@ public void shouldLogAndRecordSkippedRecordsForInvalidTimestamps() { Collections.singletonMap("client-id", thread.getName()) ); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndRecordSkippedRecordsForInvalidTimestamps")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { long offset = -1; addRecord(mockConsumer, ++offset); addRecord(mockConsumer, ++offset); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java index 1cb26f79fac12..67acd3fe61d28 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java @@ -2762,9 +2762,8 @@ public Collection changelogPartitions() { @Test public void shouldHaveRemainingPartitionsUncleared() throws InterruptedException { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldHaveRemainingPartitionsUncleared", - Collections.singletonMap(TaskManager.class.getName(), "DEBUG"))) { + try (final LogCaptureContext logCaptureContext = + LogCaptureContext.create(Collections.singletonMap(TaskManager.class.getName(), "DEBUG"))) { logCaptureContext.setLatch(4); final StateMachineTask task00 = new StateMachineTask(taskId00, taskId00Partitions, true); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java index b7a801cab3a83..bd83cdf1489da 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractDualSchemaRocksDBSegmentedBytesStoreTest.java @@ -1023,8 +1023,7 @@ public void shouldLogAndMeasureExpiredRecords() throws InterruptedException { context.setSystemTimeMs(time.milliseconds()); bytesStore.init((StateStoreContext) context, bytesStore); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogAndMeasureExpiredRecords")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(1); // write a record to advance stream time, with a high enough timestamp diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractKeyValueStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractKeyValueStoreTest.java index 79c6c4c1dcb42..2c74f487ed826 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractKeyValueStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractKeyValueStoreTest.java @@ -579,8 +579,7 @@ public void shouldNotThrowConcurrentModificationException() { @Test public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() throws InterruptedException { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); try (final KeyValueIterator iterator = store.range(-1, 1)) { @@ -599,8 +598,7 @@ public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() throws Inte @Test public void shouldNotThrowInvalidReverseRangeExceptionWithNegativeFromKey() throws InterruptedException { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldNotThrowInvalidReverseRangeExceptionWithNegativeFromKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); try (final KeyValueIterator iterator = store.reverseRange(-1, 1)) { @@ -619,8 +617,7 @@ public void shouldNotThrowInvalidReverseRangeExceptionWithNegativeFromKey() thro @Test public void shouldNotThrowInvalidRangeExceptionWithFromLargerThanTo() throws InterruptedException { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithFromLargerThanTo")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); try (final KeyValueIterator iterator = store.range(2, 1)) { @@ -639,8 +636,7 @@ public void shouldNotThrowInvalidRangeExceptionWithFromLargerThanTo() throws Int @Test public void shouldNotThrowInvalidReverseRangeExceptionWithFromLargerThanTo() throws InterruptedException { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldNotThrowInvalidReverseRangeExceptionWithFromLargerThanTo")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); try (final KeyValueIterator iterator = store.reverseRange(2, 1)) { diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java index 8c68f66c0c94d..c870fba670f24 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java @@ -803,8 +803,7 @@ public void shouldLogAndMeasureExpiredRecords() { context.setSystemTimeMs(time.milliseconds()); bytesStore.init((StateStoreContext) context, bytesStore); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeasureExpiredRecords")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { // write a record to advance stream time, with a high enough timestamp // that the subsequent record in windows[0] will already be expired. bytesStore.put(serializeKey(new Windowed<>("dummy", nextSegmentWindow)), serializeValue(0)); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java index da7097765f8e3..d72d0fe2b8557 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java @@ -722,8 +722,7 @@ public void shouldLogAndMeasureExpiredRecords() throws InterruptedException { context.setSystemTimeMs(time.milliseconds()); sessionStore.init((StateStoreContext) context, sessionStore); - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldLogAndMeasureExpiredRecords")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); // Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired @@ -799,8 +798,7 @@ public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() throws Inte final String keyTo = Serdes.String().deserializer() .deserialize("", Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(1); final KeyValueIterator, Long> iterator = sessionStore.findSessions(keyFrom, keyTo, 0L, 10L); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java index 164a4ab9617e3..0d2893b020c3e 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java @@ -968,8 +968,7 @@ public void shouldReturnSameResultsForSingleKeyFetchAndEqualKeyRangeFetch() { @Test public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { - try (final LogCaptureContext logCaptureContext = - LogCaptureContext.create(this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { final KeyValueIterator, String> iterator = windowStore.fetch(-1, 1, 0L, 10L); assertFalse(iterator.hasNext()); @@ -999,8 +998,7 @@ public void shouldLogAndMeasureExpiredRecords() { context.setTime(1L); windowStore.init((StateStoreContext) context, windowStore); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogAndMeasureExpiredRecords")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { // Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired windowStore.put(1, "initial record", 2 * RETENTION_PERIOD); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemorySessionStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemorySessionStoreTest.java index 01ffb04736134..70d207e9ca996 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemorySessionStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingInMemorySessionStoreTest.java @@ -807,8 +807,7 @@ public void shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey() final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); final KeyValueIterator, byte[]> iterator = cachingStore.backwardFindSessions(keyFrom, keyTo, 0L, 10L); @@ -830,8 +829,7 @@ public void shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey() @Test public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() throws InterruptedException { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentSessionStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentSessionStoreTest.java index f3384a415aa19..192816a52f061 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentSessionStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentSessionStoreTest.java @@ -818,8 +818,7 @@ public void shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey() final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWhenBackwardWithNegativeFromKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); final KeyValueIterator, byte[]> iterator = @@ -845,8 +844,7 @@ public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { final KeyValueIterator, byte[]> iterator = cachingStore.findSessions(keyFrom, keyTo, 0L, 10L); assertFalse(iterator.hasNext()); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentWindowStoreTest.java index ebbd5be893142..48ce37fe8fc74 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentWindowStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/CachingPersistentWindowStoreTest.java @@ -998,8 +998,7 @@ public void shouldThrowNullPointerExceptionOnFetchNullKey() { @Test public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() throws InterruptedException { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); @@ -1024,8 +1023,7 @@ public void shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey() thr final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); final KeyValueIterator, byte[]> iterator = diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java index 72afac31f5cc1..6c6cfdfee2e5a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java @@ -330,10 +330,8 @@ public String name() { @Test public void shouldLogWarningWhenSettingWalOptions() throws Exception { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldLogWarningWhenSettingWalOptions", - Collections.singletonMap(RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.class.getName(), "WARN") - )) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( + Collections.singletonMap(RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.class.getName(), "WARN"))) { logCaptureContext.setLatch(16); final RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter adapter = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(new DBOptions(), new ColumnFamilyOptions()); diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimestampedStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimestampedStoreTest.java index 40a65cc178c63..5230d9935543b 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimestampedStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/RocksDBTimestampedStoreTest.java @@ -53,8 +53,7 @@ RocksDBStore getRocksDBStore() { @Test public void shouldOpenNewStoreInRegularMode() throws InterruptedException { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldOpenNewStoreInRegularMode")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(2); rocksDBStore.init((StateStoreContext) context, rocksDBStore); @@ -69,8 +68,7 @@ public void shouldOpenNewStoreInRegularMode() throws InterruptedException { @Test public void shouldOpenExistingStoreInRegularMode() throws Exception { - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create( - this.getClass().getName() + "#shouldOpenExistingStoreInRegularMode")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(3); // prepare store @@ -132,8 +130,7 @@ public void shouldOpenExistingStoreInRegularMode() throws Exception { public void shouldMigrateDataFromDefaultToTimestampColumnFamily() throws Exception { prepareOldStore(); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldMigrateDataFromDefaultToTimestampColumnFamily")) { + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) { logCaptureContext.setLatch(3); rocksDBStore.init((StateStoreContext) context, rocksDBStore); diff --git a/streams/src/test/java/org/apache/kafka/test/LogCaptureContext.java b/streams/src/test/java/org/apache/kafka/test/LogCaptureContext.java index 6849e4bd50628..872e029126e4e 100644 --- a/streams/src/test/java/org/apache/kafka/test/LogCaptureContext.java +++ b/streams/src/test/java/org/apache/kafka/test/LogCaptureContext.java @@ -32,18 +32,18 @@ public class LogCaptureContext implements AutoCloseable { private final ListAppender listAppender; private final Map prevLevelMap = new HashMap<>(); - public static LogCaptureContext create(final String name) { - return create(name, new HashMap<>()); + public static LogCaptureContext create() { + return create(new HashMap<>()); } - public static LogCaptureContext create(final String name, final Map levelMap) { - return new LogCaptureContext(name, levelMap); + public static LogCaptureContext create(final Map levelMap) { + return new LogCaptureContext(levelMap); } - private LogCaptureContext(final String name, final Map levelMap) { + private LogCaptureContext(final Map levelMap) { final LoggerContext loggerContext = LoggerContext.getContext(false); - listAppender = ListAppender.createAppender(name, false, false, - PatternLayout.newBuilder().withPattern("%p %m %throwable").build(), null); + listAppender = ListAppender.createAppender("logger-context-" + TestUtils.randomString(8), + false, false, PatternLayout.newBuilder().withPattern("%p %m %throwable").build(), null); listAppender.start(); loggerContext.getConfiguration().addAppender(listAppender); loggerContext.getRootLogger().addAppender(listAppender); From 0c38c3ba1a990d9ddda1ec33a0211d601f0a195c Mon Sep 17 00:00:00 2001 From: Lee Dongjin Date: Fri, 28 Jan 2022 17:56:46 +0900 Subject: [PATCH 4/9] Fix TODO in PlaintextAdminIntegrationTest#testIncrementalAlterConfigsForLog4jLogLevelsDoesNotWorkWithInvalidConfigs; see KIP-817 --- .../kafka/api/PlaintextAdminIntegrationTest.scala | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala index 0e1e890859c24..cc7e72b6bd3b4 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala @@ -2228,10 +2228,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { * However, there are two problems with method 2: * * - Method 1 supports 8 Log4j levels; However, method 2 supports only 6 levels. (see [[LogLevelConfig.VALID_LOG_LEVELS]]) - * So, OFF and ALL are only supported in method 1. - * - If the user tries to call APPEND or SUBTRACT with method 2, [[InvalidRequestException]] is thrown; However, if the user - * specifies the wrong logger name or level, it throws [[InvalidConfigurationException]]. (see [[ConfigAdminManager#validateLogLevelConfigs]]) - * Is this consistent? + * So, OFF and ALL are only supported in method 1. Lacking support of OFF level limits the utilization of this functionality. + * - With Method 1, an unsupported log level is regarded as DEBUG; However, with Method 2, an unsupported log level raises + * [[InvalidConfigurationException]]. The documentation does not state their slightly different semantics. */ val invalidLogLevelLogLevelEntries = Seq( new AlterConfigOp(new ConfigEntry("kafka.server.KafkaRequestHandler", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), // valid From 6a1d9011da789d1b893c7bdd364b9bf55b7bf890 Mon Sep 17 00:00:00 2001 From: Lee Dongjin Date: Sat, 29 Jan 2022 22:42:19 +0900 Subject: [PATCH 5/9] Add logging message for the embedded zookeeper launchers --- bin/windows/zookeeper-server-start.bat | 2 ++ bin/zookeeper-server-start.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/bin/windows/zookeeper-server-start.bat b/bin/windows/zookeeper-server-start.bat index ecdf5fe3710b5..ad42edf1df9da 100644 --- a/bin/windows/zookeeper-server-start.bat +++ b/bin/windows/zookeeper-server-start.bat @@ -19,6 +19,8 @@ IF [%1] EQU [] ( EXIT /B 1 ) +echo Running with log4j 2.x - Log4j MBean registration is not supported. + SetLocal IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( echo DEPRECATED: using log4j 1.x configuration. To use log4j 2.x configuration, run with: 'set KAFKA_LOG4J_OPTS=-Dlog4j.configurationFile=file:%~dp0../../config/log4j2.properties' diff --git a/bin/zookeeper-server-start.sh b/bin/zookeeper-server-start.sh index 3dfbb491dea79..3d45d8b7f85d6 100755 --- a/bin/zookeeper-server-start.sh +++ b/bin/zookeeper-server-start.sh @@ -26,6 +26,8 @@ if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties" fi +echo "Running with log4j 2.x - Log4j MBean registration is not supported." + if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then export KAFKA_HEAP_OPTS="-Xmx512M -Xms512M" fi From d8a27c1b99325c77ccf9b98f10a3ea5b6a17cfe4 Mon Sep 17 00:00:00 2001 From: Lee Dongjin Date: Thu, 17 Mar 2022 22:43:44 +0900 Subject: [PATCH 6/9] =?UTF-8?q?Upgrade=20log4j=20dependency:=202.14.1=20?= =?UTF-8?q?=E2=86=92=202.17.2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- gradle/dependencies.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradle/dependencies.gradle b/gradle/dependencies.gradle index 8201ff5ad9704..c42f22ab54003 100644 --- a/gradle/dependencies.gradle +++ b/gradle/dependencies.gradle @@ -75,7 +75,7 @@ versions += [ jline: "3.21.0", jmh: "1.35", hamcrest: "2.2", - log4j2: "2.14.1", + log4j2: "2.17.2", scalaLogging: "3.9.4", jaxb: "2.3.0", jaxrs: "2.1.1", From 1c6670bcb36105223247ce51d6837a97f27daa4e Mon Sep 17 00:00:00 2001 From: Lee Dongjin Date: Fri, 18 Mar 2022 19:23:21 +0900 Subject: [PATCH 7/9] Add javadoc to LogCaptureContext --- .../unit/kafka/utils/LogCaptureContext.scala | 50 +++++++++++++++++++ .../apache/kafka/test/LogCaptureContext.java | 41 +++++++++++++++ 2 files changed, 91 insertions(+) diff --git a/core/src/test/scala/unit/kafka/utils/LogCaptureContext.scala b/core/src/test/scala/unit/kafka/utils/LogCaptureContext.scala index 144023ace0718..6bb0f568286a8 100644 --- a/core/src/test/scala/unit/kafka/utils/LogCaptureContext.scala +++ b/core/src/test/scala/unit/kafka/utils/LogCaptureContext.scala @@ -52,6 +52,56 @@ class LogCaptureContext(listAppender: ListAppender, prevLevelMap: Map[String, Le } } +/** + * This class provides an isolated logging context for logging tests. You can also set the logging + * level of the loggers for a given context differently. + * + * By default, the context uses the definition in src/test/resources/log4j2.properties: + * + * {{{ + * // Creates a logging context with default configurations + * val logCaptureContext = LogCaptureContext(Map(classOf[AppInfoParser].getName -> "WARN")) + * try { + * ... + * } finally { + * logCaptureContext.close + * } + * }}} + * + * You can override the default logging levels by passing a map from the logger name to the desired level, like: + * + * {{{ + * // A logging context with default configuration, but 'foo.bar' logger's level is set to WARN. + * val logCaptureContext = LogCaptureContext(Map("foo.bar" -> "WARN")) + * try { + * ... + * } finally { + * logCaptureContext.close + * } + * }}} + * + * Since the logging messages are appended asynchronously, you should wait until the appender process + * the given messages with [[LogCaptureContext.setLatch(Int)]] and [[LogCaptureContext.await(Long, TimeUnit)]] methods, like: + * + * {{{ + * // A logging context with default configuration, but 'foo.bar' logger's level is set to WARN. + * val logCaptureContext = LogCaptureContext() + * try { + * // We expect there will be at least 5 logging messages. + * logCaptureContext.setLatch(5); + * + * // The routine to test ... + * + * // Wait for the appender to finish processing the logging messages, 10 seconds in maximum. + * logCaptureContext.await(10, TimeUnit.SECONDS) + * val event = logCaptureContext.getMessages.find(...) + * } finally { + * logCaptureContext.close + * } + * }}} + * + * Note: The tests may hang up if you set the messages count too high. + */ object LogCaptureContext { def apply(levelMap: Map[String, String] = Map()): LogCaptureContext = { val loggerContext = LoggerContext.getContext(false) diff --git a/streams/src/test/java/org/apache/kafka/test/LogCaptureContext.java b/streams/src/test/java/org/apache/kafka/test/LogCaptureContext.java index 872e029126e4e..05dd0d27065e1 100644 --- a/streams/src/test/java/org/apache/kafka/test/LogCaptureContext.java +++ b/streams/src/test/java/org/apache/kafka/test/LogCaptureContext.java @@ -28,6 +28,47 @@ import org.apache.logging.log4j.core.layout.PatternLayout; import org.apache.logging.log4j.test.appender.ListAppender; +/** + *

This class provides an isolated logging context for logging tests. You can also set the logging + * level of the loggers for a given context differently. + * + *

By default, the context uses the definition in src/test/resources/log4j2.properties: + *

+ *     // Creates a logging context with default configurations
+ *     try (final LogCaptureContext logCaptureContext = LogCaptureContext.create()) {
+ *         ...
+ *     }
+ * 
+ * + *

You can override the default logging levels by passing a map from the logger name to the desired level, like: + *

+ *     // A logging context with default configuration, but 'foo.bar' logger's level is set to WARN.
+ *     try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(
+ *         Collections.singletonMap("foo.bar", "WARN")
+ *     )) {
+ *         ...
+ *     }
+ * 
+ * + *

Since the logging messages are appended asynchronously, you should wait until the appender process + * the given messages with {@link #setLatch(int)} and {@link #await(long, TimeUnit)} methods, like: + *

+ *     try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(...)) {
+ *         // We expect there will be at least 5 logging messages.
+ *         logCaptureContext.setLatch(5);
+ *
+ *         // The routine to test ...
+ *
+ *         // Wait for the appender to finish processing the logging messages, 10 seconds in maximum.
+ *         logCaptureContext.await(10L, TimeUnit.SECONDS);
+ *         assertThat(
+ *             logCaptureContext.getMessages(),
+ *             hasItem("the logging message is appended"));
+ *     }
+ * 
+ * + *

Note: The tests may hang up if you set the messages count too high. + */ public class LogCaptureContext implements AutoCloseable { private final ListAppender listAppender; private final Map prevLevelMap = new HashMap<>(); From 137892e1a31916629db0314214ff971e815b920b Mon Sep 17 00:00:00 2001 From: Lee Dongjin Date: Fri, 18 Mar 2022 19:23:52 +0900 Subject: [PATCH 8/9] Trivial Improvements (thanks to Luke Chen) --- .../connect/runtime/rest/resources/LoggingResource.java | 8 ++------ .../streams/kstream/internals/KStreamKStreamJoinTest.java | 3 --- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java index 6069951765c0e..f3547a86fb2f1 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java @@ -99,14 +99,10 @@ public Response getLogger(final @PathParam("logger") String loggerName) { List en = currentLoggers(); Optional found = en.stream().filter(existingLogger -> loggerName.equals(existingLogger.getName())).findAny(); - logger = found.orElse(null); + logger = found.orElseThrow(() -> new NotFoundException("Logger " + loggerName + " not found.")); } - if (logger == null) { - throw new NotFoundException("Logger " + loggerName + " not found."); - } else { - return Response.ok(effectiveLevelToMap(logger)).build(); - } + return Response.ok(effectiveLevelToMap(logger)).build(); } /** diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java index 1064aeb5caa69..2047ea1f42f97 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java @@ -278,9 +278,6 @@ public void shouldThrowExceptionWhenOtherJoinStoreSetsRetainDuplicatesFalse() { @Test public void shouldBuildJoinWithCustomStoresAndCorrectWindowSettings() { // Case where everything matches up - final WindowBytesStoreSupplier thisStoreSupplier = buildWindowBytesStoreSupplier("in-memory-join-store", 150, 100, true); - final WindowBytesStoreSupplier otherStoreSupplier = buildWindowBytesStoreSupplier("in-memory-join-store-other", 150, 100, true); - final StreamsBuilder builder = new StreamsBuilder(); final KStream left = builder.stream("left", Consumed.with(Serdes.String(), Serdes.Integer())); final KStream right = builder.stream("right", Consumed.with(Serdes.String(), Serdes.Integer())); From b7d7b915234182e84e4e7d1551238aee6d4802c7 Mon Sep 17 00:00:00 2001 From: Lee Dongjin Date: Sat, 23 Apr 2022 20:46:57 +0900 Subject: [PATCH 9/9] Make AdjustStreamThreadCountTest#shouldResizeCacheAfterThreadReplacement not fragile --- .../streams/integration/AdjustStreamThreadCountTest.java | 3 +-- .../TimeOrderedCachingPersistentWindowStoreTest.java | 6 ++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java index 239adbbf28d7d..0aaf2570300c3 100644 --- a/streams/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/integration/AdjustStreamThreadCountTest.java @@ -471,8 +471,7 @@ public void close() { waitForTransitionFromRebalancingToRunning(); logCaptureContext.await(); - // after we replace the thread there should be two remaining threads with 5 bytes each for - // the cache and 50 for the input buffer + // all 10 bytes should be available for remaining thread assertThat(logCaptureContext.getMessages(), hasItems(containsString("Adding StreamThread-3, there are now 2 threads with cache size/max buffer size values as 5/50 per thread. "))); } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingPersistentWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingPersistentWindowStoreTest.java index e0802561b4108..8c91e68d049b6 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingPersistentWindowStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedCachingPersistentWindowStoreTest.java @@ -1130,8 +1130,7 @@ public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldNotThrowInvalidRangeExceptionWithNegativeFromKey"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final KeyValueIterator, byte[]> iterator = cachingStore.fetch(keyFrom, keyTo, 0L, 10L)) { assertFalse(iterator.hasNext()); @@ -1151,8 +1150,7 @@ public void shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey() { final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); - try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(this.getClass().getName() - + "#shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey"); + try (final LogCaptureContext logCaptureContext = LogCaptureContext.create(); final KeyValueIterator, byte[]> iterator = cachingStore.backwardFetch(keyFrom, keyTo, Instant.ofEpochMilli(0L), Instant.ofEpochMilli(10L))) { assertFalse(iterator.hasNext());