diff --git a/bin/kafka-get-offsets.sh b/bin/kafka-get-offsets.sh index 993a202683309..b9e37b8890b4a 100755 --- a/bin/kafka-get-offsets.sh +++ b/bin/kafka-get-offsets.sh @@ -14,4 +14,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -exec $(dirname $0)/kafka-run-class.sh kafka.tools.GetOffsetShell "$@" +exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.GetOffsetShell "$@" diff --git a/bin/windows/kafka-get-offsets.bat b/bin/windows/kafka-get-offsets.bat index 08b8e27d70fec..89d16671071cd 100644 --- a/bin/windows/kafka-get-offsets.bat +++ b/bin/windows/kafka-get-offsets.bat @@ -14,4 +14,4 @@ rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. rem See the License for the specific language governing permissions and rem limitations under the License. -"%~dp0kafka-run-class.bat" kafka.tools.GetOffsetShell %* +"%~dp0kafka-run-class.bat" org.apache.kafka.tools.GetOffsetShell %* diff --git a/build.gradle b/build.gradle index b332932c63bf5..20d1328df5935 100644 --- a/build.gradle +++ b/build.gradle @@ -2915,6 +2915,10 @@ project(':connect:file') { testRuntimeOnly libs.slf4jlog4j testImplementation project(':clients').sourceSets.test.output + testImplementation project(':connect:runtime') + testImplementation project(':connect:runtime').sourceSets.test.output + testImplementation project(':core') + testImplementation project(':core').sourceSets.test.output } javadoc { diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index 0843deb1a3032..a12ee2ea93e8f 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -290,6 +290,10 @@ + + + + diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClientUtils.java index 4c4d6353eff2f..16c76afe3f72a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClientUtils.java @@ -18,6 +18,7 @@ package org.apache.kafka.clients; import org.apache.kafka.common.Node; +import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; import org.apache.kafka.common.utils.Time; @@ -114,4 +115,21 @@ public static ClientResponse sendAndReceive(KafkaClient client, ClientRequest re } } + + /** + * Check if the code is disconnected and unavailable for immediate reconnection (i.e. if it is in + * reconnect backoff window following the disconnect). + */ + public static boolean isUnavailable(KafkaClient client, Node node, Time time) { + return client.connectionFailed(node) && client.connectionDelay(node, time.milliseconds()) > 0; + } + + /** + * Check for an authentication error on a given node and raise the exception if there is one. + */ + public static void maybeThrowAuthFailure(KafkaClient client, Node node) { + AuthenticationException exception = client.authenticationException(node); + if (exception != null) + throw exception; + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/EndpointType.java b/clients/src/main/java/org/apache/kafka/clients/admin/EndpointType.java new file mode 100644 index 0000000000000..e77d88a013a72 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/admin/EndpointType.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients.admin; + +/** + * Identifies the endpoint type, as specified by KIP-919. + */ +public enum EndpointType { + UNKNOWN((byte) 0), + BROKER((byte) 1), + CONTROLLER((byte) 2); + + private final byte id; + + EndpointType(byte id) { + this.id = id; + } + + public byte id() { + return id; + } + + public static EndpointType fromId(byte id) { + if (id == BROKER.id) { + return BROKER; + } else if (id == CONTROLLER.id) { + return CONTROLLER; + } else { + return UNKNOWN; + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java index 16af19667194a..960d953b584b3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java @@ -61,7 +61,7 @@ public ListOffsetsHandler( this.offsetTimestampsByPartition = offsetTimestampsByPartition; this.options = options; this.log = logContext.logger(ListOffsetsHandler.class); - this.lookupStrategy = new PartitionLeaderStrategy(logContext); + this.lookupStrategy = new PartitionLeaderStrategy(logContext, false); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java index 18ae79a7b6267..fe8e48e705dc0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategy.java @@ -42,9 +42,15 @@ public class PartitionLeaderStrategy implements AdminApiLookupStrategy partitions) { return new MetadataRequest.Builder(request); } + @SuppressWarnings("fallthrough") private void handleTopicError( String topic, Errors topicError, @@ -72,6 +79,12 @@ private void handleTopicError( ) { switch (topicError) { case UNKNOWN_TOPIC_OR_PARTITION: + if (!tolerateUnknownTopics) { + log.error("Received unknown topic error for topic {}", topic, topicError.exception()); + failAllPartitionsForTopic(topic, requestPartitions, failed, tp -> topicError.exception( + "Failed to fetch metadata for partition " + tp + " because metadata for topic `" + topic + "` could not be found")); + break; + } case LEADER_NOT_AVAILABLE: case BROKER_NOT_AVAILABLE: log.debug("Metadata request for topic {} returned topic-level error {}. Will retry", @@ -124,6 +137,7 @@ private void handlePartitionError( case LEADER_NOT_AVAILABLE: case BROKER_NOT_AVAILABLE: case KAFKA_STORAGE_ERROR: + case UNKNOWN_TOPIC_OR_PARTITION: log.debug("Metadata request for partition {} returned partition-level error {}. Will retry", topicPartition, partitionError); break; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index 9ab9a3b476307..b98f7989c4142 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -639,9 +639,9 @@ private void maybeOverrideClientId(Map configs) { } } - protected static Map appendDeserializerToConfig(Map configs, - Deserializer keyDeserializer, - Deserializer valueDeserializer) { + public static Map appendDeserializerToConfig(Map configs, + Deserializer keyDeserializer, + Deserializer valueDeserializer) { // validate deserializer configuration, if the passed deserializer instance is null, the user must explicitly set a valid deserializer configuration value Map newConfigs = new HashMap<>(configs); if (keyDeserializer != null) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java index a2f2eed4eec65..f029fe74f8932 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java @@ -2219,7 +2219,8 @@ public Map endOffsets(Collection partition * for example if there is no position yet, or if the end offset is not known yet. * *

- * This method uses locally cached metadata and never makes a remote call. + * This method uses locally cached metadata. If the log end offset is not known yet, it triggers a request to fetch + * the log end offset, but returns immediately. * * @param topicPartition The partition to get the lag for. * diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java index 5ab804ccb7ae5..8d6390ca94da5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java @@ -274,11 +274,11 @@ public Fetch collectFetch() { try { while (recordsRemaining > 0) { - if (nextInLineFetch == null || nextInLineFetch.isConsumed) { + if (nextInLineFetch == null || nextInLineFetch.isConsumed()) { CompletedFetch records = completedFetches.peek(); if (records == null) break; - if (!records.initialized) { + if (!records.isInitialized()) { try { nextInLineFetch = initializeCompletedFetch(records); } catch (Exception e) { @@ -336,7 +336,7 @@ private Fetch fetchRecords(final int maxRecords) { throw new IllegalStateException("Missing position for fetchable partition " + nextInLineFetch.partition); } - if (nextInLineFetch.nextFetchOffset == position.offset) { + if (nextInLineFetch.nextFetchOffset() == position.offset) { List> partRecords = nextInLineFetch.fetchRecords(maxRecords); log.trace("Returning {} fetched records at offset {} for assigned partition {}", @@ -344,10 +344,10 @@ private Fetch fetchRecords(final int maxRecords) { boolean positionAdvanced = false; - if (nextInLineFetch.nextFetchOffset > position.offset) { + if (nextInLineFetch.nextFetchOffset() > position.offset) { SubscriptionState.FetchPosition nextPosition = new SubscriptionState.FetchPosition( - nextInLineFetch.nextFetchOffset, - nextInLineFetch.lastEpoch, + nextInLineFetch.nextFetchOffset(), + nextInLineFetch.lastEpoch(), position.currentLeader); log.trace("Updating fetch position from {} to {} for partition {} and returning {} records from `poll()`", position, nextPosition, nextInLineFetch.partition, partRecords.size()); @@ -369,7 +369,7 @@ private Fetch fetchRecords(final int maxRecords) { // these records aren't next in line based on the last consumed position, ignore them // they must be from an obsolete request log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", - nextInLineFetch.partition, nextInLineFetch.nextFetchOffset, position); + nextInLineFetch.partition, nextInLineFetch.nextFetchOffset(), position); } } @@ -381,7 +381,7 @@ private Fetch fetchRecords(final int maxRecords) { private List fetchablePartitions() { Set exclude = new HashSet<>(); - if (nextInLineFetch != null && !nextInLineFetch.isConsumed) { + if (nextInLineFetch != null && !nextInLineFetch.isConsumed()) { exclude.add(nextInLineFetch.partition); } for (CompletedFetch completedFetch : completedFetches) { @@ -528,7 +528,7 @@ private CompletedFetch initializeCompletedFetch(final CompletedFetch private CompletedFetch handleInitializeCompletedFetchSuccess(final CompletedFetch completedFetch) { final TopicPartition tp = completedFetch.partition; - final long fetchOffset = completedFetch.nextFetchOffset; + final long fetchOffset = completedFetch.nextFetchOffset(); // we are interested in this fetch only if the beginning offset matches the // current consumed position @@ -586,14 +586,14 @@ private CompletedFetch handleInitializeCompletedFetchSuccess(final Complet }); } - completedFetch.initialized = true; + completedFetch.setInitialized(); return completedFetch; } private void handleInitializeCompletedFetchErrors(final CompletedFetch completedFetch, final Errors error) { final TopicPartition tp = completedFetch.partition; - final long fetchOffset = completedFetch.nextFetchOffset; + final long fetchOffset = completedFetch.nextFetchOffset(); if (error == Errors.NOT_LEADER_OR_FOLLOWER || error == Errors.REPLICA_NOT_AVAILABLE || diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index 9d23b9a9c2473..83672fbc080ad 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -53,7 +53,8 @@ public class CommitRequestManager implements RequestManager { // TODO: current in ConsumerConfig but inaccessible in the internal package. private static final String THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED = "internal.throw.on.fetch.stable.offset.unsupported"; // TODO: We will need to refactor the subscriptionState - private final SubscriptionState subscriptionState; + private final SubscriptionState subscriptions; + private final LogContext logContext; private final Logger log; private final Optional autoCommitState; private final CoordinatorRequestManager coordinatorRequestManager; @@ -66,11 +67,12 @@ public class CommitRequestManager implements RequestManager { public CommitRequestManager( final Time time, final LogContext logContext, - final SubscriptionState subscriptionState, + final SubscriptionState subscriptions, final ConsumerConfig config, final CoordinatorRequestManager coordinatorRequestManager, final GroupState groupState) { Objects.requireNonNull(coordinatorRequestManager, "Coordinator is needed upon committing offsets"); + this.logContext = logContext; this.log = logContext.logger(getClass()); this.pendingRequests = new PendingRequests(); if (config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)) { @@ -82,7 +84,7 @@ public CommitRequestManager( } this.coordinatorRequestManager = coordinatorRequestManager; this.groupState = groupState; - this.subscriptionState = subscriptionState; + this.subscriptions = subscriptions; this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.retryBackoffMaxMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG); this.throwOnFetchStableOffsetUnsupported = config.getBoolean(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED); @@ -99,7 +101,7 @@ public NetworkClientDelegate.PollResult poll(final long currentTimeMs) { return new NetworkClientDelegate.PollResult(Long.MAX_VALUE, Collections.emptyList()); } - maybeAutoCommit(this.subscriptionState.allConsumed()); + maybeAutoCommit(this.subscriptions.allConsumed()); if (!pendingRequests.hasUnsentRequests()) { return new NetworkClientDelegate.PollResult(Long.MAX_VALUE, Collections.emptyList()); } @@ -167,9 +169,9 @@ CompletableFuture sendAutoCommit(final Map { if (t instanceof RetriableCommitFailedException) { - log.debug("Asynchronous auto-commit of offsets {} failed due to retriable error: {}", allConsumedOffsets, t); + log.debug("Asynchronous auto-commit of offsets {} failed due to retriable error: {}", allConsumedOffsets, t.getMessage()); } else { - log.warn("Asynchronous auto-commit of offsets {} failed: {}", allConsumedOffsets, t.getMessage()); + log.warn("Asynchronous auto-commit of offsets {} failed", allConsumedOffsets, t); } return null; }); @@ -241,7 +243,7 @@ public OffsetFetchRequestState(final Set partitions, final GroupState.Generation generation, final long retryBackoffMs, final long retryBackoffMaxMs) { - super(retryBackoffMs, retryBackoffMaxMs); + super(logContext, CommitRequestManager.class.getSimpleName(), retryBackoffMs, retryBackoffMaxMs); this.requestedPartitions = partitions; this.requestedGeneration = generation; this.future = new CompletableFuture<>(); @@ -366,6 +368,16 @@ private CompletableFuture> chainFuture(fi } }); } + + @Override + public String toString() { + return "OffsetFetchRequestState{" + + "requestedPartitions=" + requestedPartitions + + ", requestedGeneration=" + requestedGeneration + + ", future=" + future + + ", " + toStringBase() + + '}'; + } } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java index 96319b18ff170..26a134c1b8d1f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java @@ -57,17 +57,12 @@ * @param Record key type * @param Record value type */ -class CompletedFetch { +public class CompletedFetch { final TopicPartition partition; final FetchResponseData.PartitionData partitionData; final short requestVersion; - long nextFetchOffset; - Optional lastEpoch; - boolean isConsumed = false; - boolean initialized = false; - private final Logger log; private final SubscriptionState subscriptions; private final FetchConfig fetchConfig; @@ -84,6 +79,10 @@ class CompletedFetch { private CloseableIterator records; private Exception cachedRecordException = null; private boolean corruptLastRecord = false; + private long nextFetchOffset; + private Optional lastEpoch; + private boolean isConsumed = false; + private boolean initialized = false; CompletedFetch(LogContext logContext, SubscriptionState subscriptions, @@ -109,6 +108,27 @@ class CompletedFetch { this.abortedTransactions = abortedTransactions(partitionData); } + long nextFetchOffset() { + return nextFetchOffset; + } + + Optional lastEpoch() { + return lastEpoch; + } + + boolean isInitialized() { + return initialized; + } + + void setInitialized() { + this.initialized = true; + } + + public boolean isConsumed() { + return isConsumed; + } + + /** * After each partition is parsed, we update the current metric totals with the total bytes * and number of records parsed. After all partitions have reported, we write the metric. diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java index 9a119a18b331c..97c64f93750cd 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java @@ -20,6 +20,7 @@ import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.Metadata; +import org.apache.kafka.clients.NetworkClientUtils; import org.apache.kafka.clients.RequestCompletionHandler; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; @@ -558,7 +559,7 @@ public void close() throws IOException { public boolean isUnavailable(Node node) { lock.lock(); try { - return client.connectionFailed(node) && client.connectionDelay(node, time.milliseconds()) > 0; + return NetworkClientUtils.isUnavailable(client, node, time); } finally { lock.unlock(); } @@ -570,9 +571,7 @@ public boolean isUnavailable(Node node) { public void maybeThrowAuthFailure(Node node) { lock.lock(); try { - AuthenticationException exception = client.authenticationException(node); - if (exception != null) - throw exception; + NetworkClientUtils.maybeThrowAuthFailure(client, node); } finally { lock.unlock(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java index bc45c58a36dcc..241760d4c22dd 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java @@ -34,13 +34,13 @@ /** * This is responsible for timing to send the next {@link FindCoordinatorRequest} based on the following criteria: - * + *

* Whether there is an existing coordinator. * Whether there is an inflight request. * Whether the backoff timer has expired. * The {@link org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.PollResult} contains either a wait timer * or a singleton list of {@link org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.UnsentRequest}. - * + *

* The {@link FindCoordinatorRequest} will be handled by the {@link #onResponse(long, FindCoordinatorResponse)} callback, which * subsequently invokes {@code onResponse} to handle the exception and response. Note that the coordinator node will be * marked {@code null} upon receiving a failure. @@ -70,7 +70,12 @@ public CoordinatorRequestManager( this.log = logContext.logger(this.getClass()); this.nonRetriableErrorHandler = errorHandler; this.groupId = groupId; - this.coordinatorRequestState = new RequestState(retryBackoffMs, retryBackoffMaxMs); + this.coordinatorRequestState = new RequestState( + logContext, + CoordinatorRequestManager.class.getSimpleName(), + retryBackoffMs, + retryBackoffMaxMs + ); } /** @@ -218,5 +223,4 @@ private void onResponse( public Optional coordinator() { return Optional.ofNullable(this.coordinator); } - } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestState.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestState.java index d0656daeb6635..449d5a471cc9a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestState.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestState.java @@ -17,18 +17,27 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.common.utils.ExponentialBackoff; +import org.apache.kafka.common.utils.LogContext; +import org.slf4j.Logger; class RequestState { + + private final Logger log; + protected final String owner; final static int RETRY_BACKOFF_EXP_BASE = 2; final static double RETRY_BACKOFF_JITTER = 0.2; - private final ExponentialBackoff exponentialBackoff; - private long lastSentMs = -1; - private long lastReceivedMs = -1; - private int numAttempts = 0; - private long backoffMs = 0; + protected final ExponentialBackoff exponentialBackoff; + protected long lastSentMs = -1; + protected long lastReceivedMs = -1; + protected int numAttempts = 0; + protected long backoffMs = 0; - public RequestState(final long retryBackoffMs, + public RequestState(final LogContext logContext, + final String owner, + final long retryBackoffMs, final long retryBackoffMaxMs) { + this.log = logContext.logger(RequestState.class); + this.owner = owner; this.exponentialBackoff = new ExponentialBackoff( retryBackoffMs, RETRY_BACKOFF_EXP_BASE, @@ -37,10 +46,14 @@ public RequestState(final long retryBackoffMs, } // Visible for testing - RequestState(final long retryBackoffMs, + RequestState(final LogContext logContext, + final String owner, + final long retryBackoffMs, final int retryBackoffExpBase, final long retryBackoffMaxMs, final double jitter) { + this.log = logContext.logger(RequestState.class); + this.owner = owner; this.exponentialBackoff = new ExponentialBackoff( retryBackoffMs, retryBackoffExpBase, @@ -65,13 +78,19 @@ public boolean canSendRequest(final long currentTimeMs) { return true; } - if (this.lastReceivedMs == -1 || - this.lastReceivedMs < this.lastSentMs) { - // there is an inflight request + if (this.lastReceivedMs == -1 || this.lastReceivedMs < this.lastSentMs) { + log.trace("An inflight request already exists for {}", this); return false; } - return requestBackoffExpired(currentTimeMs); + long remainingBackoffMs = remainingBackoffMs(currentTimeMs); + + if (remainingBackoffMs <= 0) { + return true; + } else { + log.trace("{} ms remain before another request should be sent for {}", remainingBackoffMs, this); + return false; + } } public void onSendAttempt(final long currentTimeMs) { @@ -105,12 +124,29 @@ public void onFailedAttempt(final long currentTimeMs) { this.numAttempts++; } - private boolean requestBackoffExpired(final long currentTimeMs) { - return remainingBackoffMs(currentTimeMs) <= 0; - } - long remainingBackoffMs(final long currentTimeMs) { long timeSinceLastReceiveMs = currentTimeMs - this.lastReceivedMs; return Math.max(0, backoffMs - timeSinceLastReceiveMs); } -} + + /** + * This method appends the instance variables together in a simple String of comma-separated key value pairs. + * This allows subclasses to include these values and not have to duplicate each variable, helping to prevent + * any variables from being omitted when new ones are added. + * + * @return String version of instance variables. + */ + protected String toStringBase() { + return "owner='" + owner + '\'' + + ", exponentialBackoff=" + exponentialBackoff + + ", lastSentMs=" + lastSentMs + + ", lastReceivedMs=" + lastReceivedMs + + ", numAttempts=" + numAttempts + + ", backoffMs=" + backoffMs; + } + + @Override + public String toString() { + return "RequestState{" + toStringBase() + '}'; + } +} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/errors/MismatchedEndpointTypeException.java b/clients/src/main/java/org/apache/kafka/common/errors/MismatchedEndpointTypeException.java new file mode 100644 index 0000000000000..a9a2a5ba71c90 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/errors/MismatchedEndpointTypeException.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.errors; + +public class MismatchedEndpointTypeException extends ApiException { + public MismatchedEndpointTypeException(String message) { + super(message); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/UnknownControllerIdException.java b/clients/src/main/java/org/apache/kafka/common/errors/UnknownControllerIdException.java new file mode 100644 index 0000000000000..58d0c89014b11 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/errors/UnknownControllerIdException.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.errors; + +public class UnknownControllerIdException extends ApiException { + public UnknownControllerIdException(String message) { + super(message); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedEndpointTypeException.java b/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedEndpointTypeException.java new file mode 100644 index 0000000000000..e786e740d9552 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedEndpointTypeException.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.errors; + +public class UnsupportedEndpointTypeException extends ApiException { + public UnsupportedEndpointTypeException(String message) { + super(message); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java index 97c382ca87f19..64bbe1557c43a 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java @@ -111,7 +111,9 @@ public enum ApiKeys { DESCRIBE_TRANSACTIONS(ApiMessageType.DESCRIBE_TRANSACTIONS), LIST_TRANSACTIONS(ApiMessageType.LIST_TRANSACTIONS), ALLOCATE_PRODUCER_IDS(ApiMessageType.ALLOCATE_PRODUCER_IDS, true, true), - CONSUMER_GROUP_HEARTBEAT(ApiMessageType.CONSUMER_GROUP_HEARTBEAT); + CONSUMER_GROUP_HEARTBEAT(ApiMessageType.CONSUMER_GROUP_HEARTBEAT), + CONSUMER_GROUP_DESCRIBE(ApiMessageType.CONSUMER_GROUP_DESCRIBE), + CONTROLLER_REGISTRATION(ApiMessageType.CONTROLLER_REGISTRATION); private static final Map> APIS_BY_LISTENER = new EnumMap<>(ApiMessageType.ListenerType.class); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java index 1ccdcd0627cb2..e2d57278ef881 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java @@ -78,6 +78,7 @@ import org.apache.kafka.common.errors.ListenerNotFoundException; import org.apache.kafka.common.errors.LogDirNotFoundException; import org.apache.kafka.common.errors.MemberIdRequiredException; +import org.apache.kafka.common.errors.MismatchedEndpointTypeException; import org.apache.kafka.common.errors.NetworkException; import org.apache.kafka.common.errors.NewLeaderElectedException; import org.apache.kafka.common.errors.NoReassignmentInProgressException; @@ -118,6 +119,7 @@ import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.errors.TransactionalIdNotFoundException; import org.apache.kafka.common.errors.UnacceptableCredentialException; +import org.apache.kafka.common.errors.UnknownControllerIdException; import org.apache.kafka.common.errors.UnknownLeaderEpochException; import org.apache.kafka.common.errors.UnknownMemberIdException; import org.apache.kafka.common.errors.UnknownProducerIdException; @@ -129,6 +131,7 @@ import org.apache.kafka.common.errors.UnsupportedAssignorException; import org.apache.kafka.common.errors.UnsupportedByAuthenticationException; import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; +import org.apache.kafka.common.errors.UnsupportedEndpointTypeException; import org.apache.kafka.common.errors.UnsupportedForMessageFormatException; import org.apache.kafka.common.errors.UnsupportedSaslMechanismException; import org.apache.kafka.common.errors.UnsupportedVersionException; @@ -380,7 +383,10 @@ public enum Errors { FENCED_MEMBER_EPOCH(110, "The member epoch is fenced by the group coordinator. The member must abandon all its partitions and rejoin.", FencedMemberEpochException::new), UNRELEASED_INSTANCE_ID(111, "The instance ID is still used by another member in the consumer group. That member must leave first.", UnreleasedInstanceIdException::new), UNSUPPORTED_ASSIGNOR(112, "The assignor or its version range is not supported by the consumer group.", UnsupportedAssignorException::new), - STALE_MEMBER_EPOCH(113, "The member epoch is stale. The member must retry after receiving its updated member epoch via the ConsumerGroupHeartbeat API.", StaleMemberEpochException::new); + STALE_MEMBER_EPOCH(113, "The member epoch is stale. The member must retry after receiving its updated member epoch via the ConsumerGroupHeartbeat API.", StaleMemberEpochException::new), + MISMATCHED_ENDPOINT_TYPE(114, "The request was sent to an endpoint of the wrong type.", MismatchedEndpointTypeException::new), + UNSUPPORTED_ENDPOINT_TYPE(115, "This endpoint type is not supported yet.", UnsupportedEndpointTypeException::new), + UNKNOWN_CONTROLLER_ID(116, "This controller ID is not known.", UnknownControllerIdException::new); private static final Logger log = LoggerFactory.getLogger(Errors.class); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java index 64f1c2e4a2f84..406159732697e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java @@ -312,6 +312,10 @@ private static AbstractRequest doParseRequest(ApiKeys apiKey, short apiVersion, return AllocateProducerIdsRequest.parse(buffer, apiVersion); case CONSUMER_GROUP_HEARTBEAT: return ConsumerGroupHeartbeatRequest.parse(buffer, apiVersion); + case CONSUMER_GROUP_DESCRIBE: + return ConsumerGroupDescribeRequest.parse(buffer, apiVersion); + case CONTROLLER_REGISTRATION: + return ControllerRegistrationRequest.parse(buffer, apiVersion); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseRequest`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java index 4b55a6d582d11..bcf1ac895c580 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java @@ -249,6 +249,10 @@ public static AbstractResponse parseResponse(ApiKeys apiKey, ByteBuffer response return AllocateProducerIdsResponse.parse(responseBuffer, version); case CONSUMER_GROUP_HEARTBEAT: return ConsumerGroupHeartbeatResponse.parse(responseBuffer, version); + case CONSUMER_GROUP_DESCRIBE: + return ConsumerGroupDescribeResponse.parse(responseBuffer, version); + case CONTROLLER_REGISTRATION: + return ControllerRegistrationResponse.parse(responseBuffer, version); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseResponse`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeRequest.java new file mode 100644 index 0000000000000..862d9c9d4e497 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeRequest.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.ConsumerGroupDescribeRequestData; +import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; + +public class ConsumerGroupDescribeRequest extends AbstractRequest { + + public static class Builder extends AbstractRequest.Builder { + + private final ConsumerGroupDescribeRequestData data; + + public Builder(ConsumerGroupDescribeRequestData data) { + this(data, false); + } + + public Builder(ConsumerGroupDescribeRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.CONSUMER_GROUP_DESCRIBE, enableUnstableLastVersion); + this.data = data; + } + + @Override + public ConsumerGroupDescribeRequest build(short version) { + return new ConsumerGroupDescribeRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final ConsumerGroupDescribeRequestData data; + + public ConsumerGroupDescribeRequest(ConsumerGroupDescribeRequestData data, short version) { + super(ApiKeys.CONSUMER_GROUP_DESCRIBE, version); + this.data = data; + } + + @Override + public ConsumerGroupDescribeResponse getErrorResponse(int throttleTimeMs, Throwable e) { + ConsumerGroupDescribeResponseData data = new ConsumerGroupDescribeResponseData() + .setThrottleTimeMs(throttleTimeMs); + // Set error for each group + this.data.groupIds().forEach( + groupId -> data.groups().add( + new ConsumerGroupDescribeResponseData.DescribedGroup() + .setGroupId(groupId) + .setErrorCode(Errors.forException(e).code()) + ) + ); + return new ConsumerGroupDescribeResponse(data); + } + + @Override + public ConsumerGroupDescribeRequestData data() { + return data; + } + + public static ConsumerGroupDescribeRequest parse(ByteBuffer buffer, short version) { + return new ConsumerGroupDescribeRequest( + new ConsumerGroupDescribeRequestData(new ByteBufferAccessor(buffer), version), + version + ); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponse.java new file mode 100644 index 0000000000000..70456e7b0240b --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponse.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +/** + * Possible error codes. + * + * - {@link Errors#GROUP_AUTHORIZATION_FAILED} + * - {@link Errors#NOT_COORDINATOR} + * - {@link Errors#COORDINATOR_NOT_AVAILABLE} + * - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS} + * - {@link Errors#INVALID_REQUEST} + * - {@link Errors#INVALID_GROUP_ID} + * - {@link Errors#GROUP_ID_NOT_FOUND} + */ +public class ConsumerGroupDescribeResponse extends AbstractResponse { + + private final ConsumerGroupDescribeResponseData data; + + public ConsumerGroupDescribeResponse(ConsumerGroupDescribeResponseData data) { + super(ApiKeys.CONSUMER_GROUP_DESCRIBE); + this.data = data; + } + + @Override + public ConsumerGroupDescribeResponseData data() { + return data; + } + + @Override + public Map errorCounts() { + HashMap counts = new HashMap<>(); + data.groups().forEach( + group -> updateErrorCounts(counts, Errors.forCode(group.errorCode())) + ); + return counts; + } + + @Override + public int throttleTimeMs() { + return data.throttleTimeMs(); + } + + @Override + public void maybeSetThrottleTimeMs(int throttleTimeMs) { + data.setThrottleTimeMs(throttleTimeMs); + } + + public static ConsumerGroupDescribeResponse parse(ByteBuffer buffer, short version) { + return new ConsumerGroupDescribeResponse( + new ConsumerGroupDescribeResponseData(new ByteBufferAccessor(buffer), version) + ); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationRequest.java new file mode 100644 index 0000000000000..34cbef09294f8 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationRequest.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.ControllerRegistrationRequestData; +import org.apache.kafka.common.message.ControllerRegistrationResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; + +public class ControllerRegistrationRequest extends AbstractRequest { + public static class Builder extends AbstractRequest.Builder { + private final ControllerRegistrationRequestData data; + + public Builder(ControllerRegistrationRequestData data) { + super(ApiKeys.CONTROLLER_REGISTRATION); + this.data = data; + } + + @Override + public ControllerRegistrationRequest build(short version) { + return new ControllerRegistrationRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final ControllerRegistrationRequestData data; + + public ControllerRegistrationRequest(ControllerRegistrationRequestData data, short version) { + super(ApiKeys.CONTROLLER_REGISTRATION, version); + this.data = data; + } + + @Override + public ControllerRegistrationRequestData data() { + return data; + } + + @Override + public ControllerRegistrationResponse getErrorResponse(int throttleTimeMs, Throwable e) { + Errors error = Errors.forException(e); + return new ControllerRegistrationResponse(new ControllerRegistrationResponseData() + .setThrottleTimeMs(throttleTimeMs) + .setErrorCode(error.code()) + .setErrorMessage(error.message())); + } + + public static ControllerRegistrationRequest parse(ByteBuffer buffer, short version) { + return new ControllerRegistrationRequest( + new ControllerRegistrationRequestData(new ByteBufferAccessor(buffer), version), + version); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationResponse.java new file mode 100644 index 0000000000000..d44e915b5fa5a --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationResponse.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.ControllerRegistrationResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.Map; + +public class ControllerRegistrationResponse extends AbstractResponse { + private final ControllerRegistrationResponseData data; + + public ControllerRegistrationResponse(ControllerRegistrationResponseData data) { + super(ApiKeys.CONTROLLER_REGISTRATION); + this.data = data; + } + + @Override + public ControllerRegistrationResponseData data() { + return data; + } + + @Override + public int throttleTimeMs() { + return data.throttleTimeMs(); + } + + @Override + public void maybeSetThrottleTimeMs(int throttleTimeMs) { + data.setThrottleTimeMs(throttleTimeMs); + } + + @Override + public Map errorCounts() { + return Collections.singletonMap(Errors.forCode(data.errorCode()), 1); + } + + public static ControllerRegistrationResponse parse(ByteBuffer buffer, short version) { + return new ControllerRegistrationResponse( + new ControllerRegistrationResponseData(new ByteBufferAccessor(buffer), version)); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/utils/ExponentialBackoff.java b/clients/src/main/java/org/apache/kafka/common/utils/ExponentialBackoff.java index 1c4482660a9b6..15e0cf2ba4b33 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/ExponentialBackoff.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/ExponentialBackoff.java @@ -58,4 +58,14 @@ public long backoff(long attempts) { long backoffValue = (long) (randomFactor * term); return backoffValue > maxInterval ? maxInterval : backoffValue; } + + @Override + public String toString() { + return "ExponentialBackoff{" + + "multiplier=" + multiplier + + ", expMax=" + expMax + + ", initialInterval=" + initialInterval + + ", jitter=" + jitter + + '}'; + } } diff --git a/clients/src/main/resources/common/message/ConsumerGroupDescribeRequest.json b/clients/src/main/resources/common/message/ConsumerGroupDescribeRequest.json new file mode 100644 index 0000000000000..1d7842a1206ee --- /dev/null +++ b/clients/src/main/resources/common/message/ConsumerGroupDescribeRequest.json @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 69, + "type": "request", + "listeners": ["zkBroker", "broker"], + "name": "ConsumerGroupDescribeRequest", + "validVersions": "0", + // The ConsumerGroupDescribe API is added as part of KIP-848 and is still + // under development. Hence, the API is not exposed by default by brokers + // unless explicitly enabled. + "latestVersionUnstable": true, + "flexibleVersions": "0+", + "fields": [ + { "name": "GroupIds", "type": "[]string", "versions": "0+", "entityType": "groupId", + "about": "The ids of the groups to describe" }, + { "name": "IncludeAuthorizedOperations", "type": "bool", "versions": "0+", + "about": "Whether to include authorized operations." } + ] +} diff --git a/clients/src/main/resources/common/message/ConsumerGroupDescribeResponse.json b/clients/src/main/resources/common/message/ConsumerGroupDescribeResponse.json new file mode 100644 index 0000000000000..1dea4e2ead682 --- /dev/null +++ b/clients/src/main/resources/common/message/ConsumerGroupDescribeResponse.json @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 69, + "type": "response", + "name": "ConsumerGroupDescribeResponse", + "validVersions": "0", + "flexibleVersions": "0+", + // Supported errors: + // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - NOT_COORDINATOR (version 0+) + // - COORDINATOR_NOT_AVAILABLE (version 0+) + // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) + // - INVALID_REQUEST (version 0+) + // - INVALID_GROUP_ID (version 0+) + // - GROUP_ID_NOT_FOUND (version 0+) + "fields": [ + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, + { "name": "Groups", "type": "[]DescribedGroup", "versions": "0+", + "about": "Each described group.", + "fields": [ + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The describe error, or 0 if there was no error." }, + { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The top-level error message, or null if there was no error." }, + { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", + "about": "The group ID string." }, + { "name": "GroupState", "type": "string", "versions": "0+", + "about": "The group state string, or the empty string." }, + { "name": "GroupEpoch", "type": "int32", "versions": "0+", + "about": "The group epoch." }, + { "name": "AssignmentEpoch", "type": "int32", "versions": "0+", + "about": "The assignment epoch." }, + { "name": "AssignorName", "type": "string", "versions": "0+", + "about": "The selected assignor." }, + { "name": "Members", "type": "[]Member", "versions": "0+", + "about": "The members.", + "fields": [ + { "name": "MemberId", "type": "uuid", "versions": "0+", + "about": "The member ID." }, + { "name": "InstanceId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The member instance ID." }, + { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "The member rack ID." }, + { "name": "MemberEpoch", "type": "int32", "versions": "0+", + "about": "The current member epoch." }, + { "name": "ClientId", "type": "string", "versions": "0+", + "about": "The client ID." }, + { "name": "ClientHost", "type": "string", "versions": "0+", + "about": "The client host." }, + { "name": "SubscribedTopicNames", "type": "[]string", "versions": "0+", + "about": "The subscribed topic names." }, + { "name": "SubscribedTopicRegex", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + "about": "the subscribed topic regex otherwise or null of not provided." }, + { "name": "Assignment", "type": "Assignment", "versions": "0+", + "about": "The current assignment." }, + { "name": "TargetAssignment", "type": "Assignment", "versions": "0+", + "about": "The target assignment." } + ]}, + { "name": "AuthorizedOperations", "type": "int32", "versions": "3+", "default": "-2147483648", + "about": "32-bit bitfield to represent authorized operations for this group." } + ] + } + ], + "commonStructs": [ + { "name": "TopicPartitions", "versions": "0+", "fields": [ + { "name": "TopicId", "type": "uuid", "versions": "0+", + "about": "The topic ID." }, + { "name": "TopicName", "type": "string", "versions": "0+", + "about": "The topic name." }, + { "name": "Partitions", "type": "[]int32", "versions": "0+", + "about": "The partitions." } + ]}, + { "name": "Assignment", "versions": "0+", "fields": [ + { "name": "TopicPartitions", "type": "[]TopicPartitions", "versions": "0+", + "about": "The assigned topic-partitions to the member." }, + { "name": "Error", "type": "int8", "versions": "0+", + "about": "The assigned error." }, + { "name": "MetadataVersion", "type": "int32", "versions": "0+", + "about": "The assignor metadata version." }, + { "name": "MetadataBytes", "type": "bytes", "versions": "0+", + "about": "The assignor metadata bytes." } + ]} + ] +} diff --git a/clients/src/main/resources/common/message/ControllerRegistrationRequest.json b/clients/src/main/resources/common/message/ControllerRegistrationRequest.json new file mode 100644 index 0000000000000..56647a0d4b720 --- /dev/null +++ b/clients/src/main/resources/common/message/ControllerRegistrationRequest.json @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 70, + "type": "request", + "listeners": ["controller"], + "name": "ControllerRegistrationRequest", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "ControllerId", "type": "int32", "versions": "0+", + "about": "The ID of the controller to register." }, + { "name": "IncarnationId", "type": "uuid", "versions": "0+", + "about": "The controller incarnation ID, which is unique to each process run." }, + { "name": "ZkMigrationReady", "type": "bool", "versions": "0+", + "about": "Set if the required configurations for ZK migration are present." }, + { "name": "Listeners", "type": "[]Listener", + "about": "The listeners of this controller", "versions": "0+", "fields": [ + { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, + "about": "The name of the endpoint." }, + { "name": "Host", "type": "string", "versions": "0+", + "about": "The hostname." }, + { "name": "Port", "type": "uint16", "versions": "0+", + "about": "The port." }, + { "name": "SecurityProtocol", "type": "int16", "versions": "0+", + "about": "The security protocol." } + ]}, + { "name": "Features", "type": "[]Feature", + "about": "The features on this controller", "versions": "0+", "fields": [ + { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, + "about": "The feature name." }, + { "name": "MinSupportedVersion", "type": "int16", "versions": "0+", + "about": "The minimum supported feature level." }, + { "name": "MaxSupportedVersion", "type": "int16", "versions": "0+", + "about": "The maximum supported feature level." } + ]} + ] +} diff --git a/clients/src/main/resources/common/message/ControllerRegistrationResponse.json b/clients/src/main/resources/common/message/ControllerRegistrationResponse.json new file mode 100644 index 0000000000000..f69932a375c1f --- /dev/null +++ b/clients/src/main/resources/common/message/ControllerRegistrationResponse.json @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 70, + "type": "response", + "name": "ControllerRegistrationResponse", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", + "about": "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The response error code." }, + { "name": "ErrorMessage", "type": "string", "nullableVersions": "0+", "versions": "0+", + "about": "The response error message, or null if there was no error." } + ] +} diff --git a/clients/src/main/resources/common/message/DescribeClusterRequest.json b/clients/src/main/resources/common/message/DescribeClusterRequest.json index 192e4d87d4497..34ebe013bb1a0 100644 --- a/clients/src/main/resources/common/message/DescribeClusterRequest.json +++ b/clients/src/main/resources/common/message/DescribeClusterRequest.json @@ -16,12 +16,17 @@ { "apiKey": 60, "type": "request", - "listeners": ["zkBroker", "broker"], + "listeners": ["zkBroker", "broker", "controller"], "name": "DescribeClusterRequest", - "validVersions": "0", + // + // Version 1 adds EndpointType for KIP-919 support. + // + "validVersions": "0-1", "flexibleVersions": "0+", "fields": [ { "name": "IncludeClusterAuthorizedOperations", "type": "bool", "versions": "0+", - "about": "Whether to include cluster authorized operations." } + "about": "Whether to include cluster authorized operations." }, + { "name": "EndpointType", "type": "int8", "versions": "1+", "default": "1", + "about": "The endpoint type to describe. 1=brokers, 2=controllers." } ] } diff --git a/clients/src/main/resources/common/message/DescribeClusterResponse.json b/clients/src/main/resources/common/message/DescribeClusterResponse.json index 084ff5410496b..6cccd1d26c471 100644 --- a/clients/src/main/resources/common/message/DescribeClusterResponse.json +++ b/clients/src/main/resources/common/message/DescribeClusterResponse.json @@ -17,7 +17,11 @@ "apiKey": 60, "type": "response", "name": "DescribeClusterResponse", - "validVersions": "0", + // + // Version 1 adds the EndpointType field, and makes MISMATCHED_ENDPOINT_TYPE and + // UNSUPPORTED_ENDPOINT_TYPE valid top-level response error codes. + // + "validVersions": "0-1", "flexibleVersions": "0+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", @@ -26,6 +30,8 @@ "about": "The top-level error code, or 0 if there was no error" }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The top-level error message, or null if there was no error." }, + { "name": "EndpointType", "type": "int8", "versions": "1+", "default": "1", + "about": "The endpoint type that was described. 1=brokers, 2=controllers." }, { "name": "ClusterId", "type": "string", "versions": "0+", "about": "The cluster ID that responding broker belongs to." }, { "name": "ControllerId", "type": "int32", "versions": "0+", "default": "-1", "entityType": "brokerId", diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/EndpointTypeTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/EndpointTypeTest.java new file mode 100644 index 0000000000000..9ebeb39213dbb --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/clients/admin/EndpointTypeTest.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.clients.admin; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import static org.junit.jupiter.api.Assertions.assertEquals; + + +@Timeout(60) +public class EndpointTypeTest { + @Test + public void testRoundTripBroker() { + testRoundTrip(EndpointType.BROKER); + } + + @Test + public void testRoundTripController() { + testRoundTrip(EndpointType.CONTROLLER); + } + + @Test + public void testUnknown() { + assertEquals(EndpointType.UNKNOWN, EndpointType.fromId((byte) 0)); + assertEquals(EndpointType.UNKNOWN, EndpointType.fromId((byte) 3)); + } + + private void testRoundTrip(EndpointType type) { + byte id = type.id(); + assertEquals(type, EndpointType.fromId(id)); + } +} diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java index 17694a16a5a63..f942fc699010d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java @@ -213,6 +213,8 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -558,12 +560,16 @@ private static FindCoordinatorResponse prepareBatchedFindCoordinatorResponse(Err } private static MetadataResponse prepareMetadataResponse(Cluster cluster, Errors error) { + return prepareMetadataResponse(cluster, error, error); + } + + private static MetadataResponse prepareMetadataResponse(Cluster cluster, Errors topicError, Errors partitionError) { List metadata = new ArrayList<>(); for (String topic : cluster.topics()) { List pms = new ArrayList<>(); for (PartitionInfo pInfo : cluster.availablePartitionsForTopic(topic)) { MetadataResponsePartition pm = new MetadataResponsePartition() - .setErrorCode(error.code()) + .setErrorCode(partitionError.code()) .setPartitionIndex(pInfo.partition()) .setLeaderId(pInfo.leader().id()) .setLeaderEpoch(234) @@ -573,7 +579,7 @@ private static MetadataResponse prepareMetadataResponse(Cluster cluster, Errors pms.add(pm); } MetadataResponseTopic tm = new MetadataResponseTopic() - .setErrorCode(error.code()) + .setErrorCode(topicError.code()) .setName(topic) .setIsInternal(false) .setPartitions(pms); @@ -5462,7 +5468,6 @@ public void testDescribeMetadataQuorumFailure() { @Test public void testListOffsetsMetadataRetriableErrors() throws Exception { - Node node0 = new Node(0, "localhost", 8120); Node node1 = new Node(1, "localhost", 8121); List nodes = Arrays.asList(node0, node1); @@ -5485,7 +5490,8 @@ public void testListOffsetsMetadataRetriableErrors() throws Exception { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.LEADER_NOT_AVAILABLE)); - env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.UNKNOWN_TOPIC_OR_PARTITION)); + // We retry when a partition of a topic (but not the topic itself) is unknown + env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE, Errors.UNKNOWN_TOPIC_OR_PARTITION)); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); // listoffsets response from broker 0 @@ -5636,9 +5642,13 @@ public void testListOffsetsWithLeaderChange() throws Exception { } } - @Test - public void testListOffsetsMetadataNonRetriableErrors() throws Exception { - + @ParameterizedTest + @MethodSource("listOffsetsMetadataNonRetriableErrors") + public void testListOffsetsMetadataNonRetriableErrors( + Errors topicMetadataError, + Errors partitionMetadataError, + Class expectedFailure + ) throws Exception { Node node0 = new Node(0, "localhost", 8120); Node node1 = new Node(1, "localhost", 8121); List nodes = Arrays.asList(node0, node1); @@ -5654,18 +5664,49 @@ public void testListOffsetsMetadataNonRetriableErrors() throws Exception { node0); final TopicPartition tp1 = new TopicPartition("foo", 0); + final MetadataResponse preparedResponse = prepareMetadataResponse( + cluster, topicMetadataError, partitionMetadataError + ); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.TOPIC_AUTHORIZATION_FAILED)); + env.kafkaClient().prepareResponse(preparedResponse); Map partitions = new HashMap<>(); partitions.put(tp1, OffsetSpec.latest()); ListOffsetsResult result = env.adminClient().listOffsets(partitions); - TestUtils.assertFutureError(result.all(), TopicAuthorizationException.class); - } + TestUtils.assertFutureError(result.all(), expectedFailure); + } + } + + private static Stream listOffsetsMetadataNonRetriableErrors() { + return Stream.of( + Arguments.of( + Errors.TOPIC_AUTHORIZATION_FAILED, + Errors.TOPIC_AUTHORIZATION_FAILED, + TopicAuthorizationException.class + ), + Arguments.of( + // We fail fast when the entire topic is unknown... + Errors.UNKNOWN_TOPIC_OR_PARTITION, + Errors.NONE, + UnknownTopicOrPartitionException.class + ), + Arguments.of( + // ... even if a partition in the topic is also somehow reported as unknown... + Errors.UNKNOWN_TOPIC_OR_PARTITION, + Errors.UNKNOWN_TOPIC_OR_PARTITION, + UnknownTopicOrPartitionException.class + ), + Arguments.of( + // ... or a partition in the topic has a different, otherwise-retriable error + Errors.UNKNOWN_TOPIC_OR_PARTITION, + Errors.LEADER_NOT_AVAILABLE, + UnknownTopicOrPartitionException.class + ) + ); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestStateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestStateTest.java index 344df5df92b12..365fb1a7e6a8d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestStateTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestStateTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.clients.consumer.internals; +import org.apache.kafka.common.utils.LogContext; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -25,6 +26,8 @@ public class RequestStateTest { @Test public void testRequestStateSimple() { RequestState state = new RequestState( + new LogContext(), + this.getClass().getSimpleName(), 100, 2, 1000, diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ConsumerGroupDescribeRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ConsumerGroupDescribeRequestTest.java new file mode 100644 index 0000000000000..81255da2b113c --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/requests/ConsumerGroupDescribeRequestTest.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.ConsumerGroupDescribeRequestData; +import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; +import org.apache.kafka.common.protocol.Errors; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ConsumerGroupDescribeRequestTest { + + @Test + void testGetErrorResponse() { + List groupIds = Arrays.asList("group0", "group1"); + ConsumerGroupDescribeRequestData data = new ConsumerGroupDescribeRequestData(); + data.groupIds().addAll(groupIds); + ConsumerGroupDescribeRequest request = new ConsumerGroupDescribeRequest.Builder(data, true) + .build(); + Throwable e = Errors.GROUP_AUTHORIZATION_FAILED.exception(); + int throttleTimeMs = 1000; + + ConsumerGroupDescribeResponse response = request.getErrorResponse(throttleTimeMs, e); + + assertEquals(throttleTimeMs, response.throttleTimeMs()); + for (int i = 0; i < groupIds.size(); i++) { + ConsumerGroupDescribeResponseData.DescribedGroup group = response.data().groups().get(i); + assertEquals(groupIds.get(i), group.groupId()); + assertEquals(Errors.forException(e).code(), group.errorCode()); + } + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponseTest.java new file mode 100644 index 0000000000000..ed3d87bd96308 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponseTest.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; +import org.apache.kafka.common.protocol.Errors; +import org.junit.jupiter.api.Test; + +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; + +public class ConsumerGroupDescribeResponseTest { + + @Test + void testErrorCounts() { + Errors e = Errors.INVALID_GROUP_ID; + int errorCount = 2; + ConsumerGroupDescribeResponseData data = new ConsumerGroupDescribeResponseData(); + for (int i = 0; i < errorCount; i++) { + data.groups().add( + new ConsumerGroupDescribeResponseData.DescribedGroup() + .setErrorCode(e.code()) + ); + } + ConsumerGroupDescribeResponse response = new ConsumerGroupDescribeResponse(data); + + Map counts = response.errorCounts(); + + assertEquals(errorCount, counts.get(e)); + assertNull(counts.get(Errors.COORDINATOR_NOT_AVAILABLE)); + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java index c8bd3563b53cf..c70b592c628bf 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java @@ -67,12 +67,16 @@ import org.apache.kafka.common.message.BrokerHeartbeatResponseData; import org.apache.kafka.common.message.BrokerRegistrationRequestData; import org.apache.kafka.common.message.BrokerRegistrationResponseData; +import org.apache.kafka.common.message.ConsumerGroupDescribeRequestData; +import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData; import org.apache.kafka.common.message.ControlledShutdownRequestData; import org.apache.kafka.common.message.ControlledShutdownResponseData; import org.apache.kafka.common.message.ControlledShutdownResponseData.RemainingPartition; import org.apache.kafka.common.message.ControlledShutdownResponseData.RemainingPartitionCollection; +import org.apache.kafka.common.message.ControllerRegistrationRequestData; +import org.apache.kafka.common.message.ControllerRegistrationResponseData; import org.apache.kafka.common.message.CreateAclsRequestData; import org.apache.kafka.common.message.CreateAclsResponseData; import org.apache.kafka.common.message.CreateDelegationTokenRequestData; @@ -1057,6 +1061,8 @@ private AbstractRequest getRequest(ApiKeys apikey, short version) { case LIST_TRANSACTIONS: return createListTransactionsRequest(version); case ALLOCATE_PRODUCER_IDS: return createAllocateProducerIdsRequest(version); case CONSUMER_GROUP_HEARTBEAT: return createConsumerGroupHeartbeatRequest(version); + case CONSUMER_GROUP_DESCRIBE: return createConsumerGroupDescribeRequest(version); + case CONTROLLER_REGISTRATION: return createControllerRegistrationRequest(version); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1132,10 +1138,36 @@ private AbstractResponse getResponse(ApiKeys apikey, short version) { case LIST_TRANSACTIONS: return createListTransactionsResponse(); case ALLOCATE_PRODUCER_IDS: return createAllocateProducerIdsResponse(); case CONSUMER_GROUP_HEARTBEAT: return createConsumerGroupHeartbeatResponse(); + case CONSUMER_GROUP_DESCRIBE: return createConsumerGroupDescribeResponse(); + case CONTROLLER_REGISTRATION: return createControllerRegistrationResponse(); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } + private ConsumerGroupDescribeRequest createConsumerGroupDescribeRequest(short version) { + ConsumerGroupDescribeRequestData data = new ConsumerGroupDescribeRequestData() + .setGroupIds(Collections.singletonList("group")) + .setIncludeAuthorizedOperations(false); + return new ConsumerGroupDescribeRequest.Builder(data).build(version); + } + + private ConsumerGroupDescribeResponse createConsumerGroupDescribeResponse() { + ConsumerGroupDescribeResponseData data = new ConsumerGroupDescribeResponseData() + .setGroups(Collections.singletonList( + new ConsumerGroupDescribeResponseData.DescribedGroup() + .setGroupId("group") + .setErrorCode((short) 0) + .setErrorMessage(Errors.forCode((short) 0).message()) + .setGroupState(ConsumerGroupState.EMPTY.toString()) + .setGroupEpoch(0) + .setAssignmentEpoch(0) + .setAssignorName("range") + .setMembers(new ArrayList(0)) + )) + .setThrottleTimeMs(1000); + return new ConsumerGroupDescribeResponse(data); + } + private ConsumerGroupHeartbeatRequest createConsumerGroupHeartbeatRequest(short version) { ConsumerGroupHeartbeatRequestData data = new ConsumerGroupHeartbeatRequestData() .setGroupId("group") @@ -1176,6 +1208,38 @@ private ConsumerGroupHeartbeatResponse createConsumerGroupHeartbeatResponse() { return new ConsumerGroupHeartbeatResponse(data); } + private ControllerRegistrationRequest createControllerRegistrationRequest(short version) { + ControllerRegistrationRequestData data = new ControllerRegistrationRequestData(). + setControllerId(3). + setIncarnationId(Uuid.fromString("qiTdnbu6RPazh1Aufq4dxw")). + setZkMigrationReady(true). + setFeatures(new ControllerRegistrationRequestData.FeatureCollection( + Arrays.asList( + new ControllerRegistrationRequestData.Feature(). + setName("metadata.version"). + setMinSupportedVersion((short) 1). + setMinSupportedVersion((short) 15) + ).iterator() + )). + setListeners(new ControllerRegistrationRequestData.ListenerCollection( + Arrays.asList( + new ControllerRegistrationRequestData.Listener(). + setName("CONTROLLER"). + setName("localhost"). + setPort(9012). + setSecurityProtocol(SecurityProtocol.PLAINTEXT.id) + ).iterator() + )); + return new ControllerRegistrationRequest(data, version); + } + + private ControllerRegistrationResponse createControllerRegistrationResponse() { + ControllerRegistrationResponseData data = new ControllerRegistrationResponseData(). + setErrorCode(Errors.NONE.code()). + setThrottleTimeMs(1000); + return new ControllerRegistrationResponse(data); + } + private FetchSnapshotRequest createFetchSnapshotRequest(short version) { FetchSnapshotRequestData data = new FetchSnapshotRequestData() .setClusterId("clusterId") diff --git a/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSinkConnectorIntegrationTest.java b/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSinkConnectorIntegrationTest.java new file mode 100644 index 0000000000000..433c2004710c2 --- /dev/null +++ b/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSinkConnectorIntegrationTest.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.connect.file.integration; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.connect.file.FileStreamSinkConnector; +import org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster; +import org.apache.kafka.test.TestUtils; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.io.BufferedReader; +import java.io.File; +import java.io.InputStreamReader; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.kafka.connect.file.FileStreamSinkConnector.FILE_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; +import static org.apache.kafka.connect.sink.SinkConnector.TOPICS_CONFIG; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@Tag("integration") +public class FileStreamSinkConnectorIntegrationTest { + + private static final String CONNECTOR_NAME = "test-connector"; + private static final String TOPIC = "test-topic"; + private static final String MESSAGE_PREFIX = "Message "; + private static final int NUM_MESSAGES = 5; + private static final String FILE_NAME = "test-file"; + private final EmbeddedConnectCluster connect = new EmbeddedConnectCluster.Builder().build(); + + @BeforeEach + public void setup() { + connect.start(); + connect.kafka().createTopic(TOPIC); + produceMessagesToTopic(TOPIC, NUM_MESSAGES); + } + + @AfterEach + public void tearDown() { + connect.stop(); + } + + @Test + public void testSimpleSink() throws Exception { + File tempDir = TestUtils.tempDirectory(); + Path tempFilePath = tempDir.toPath().resolve(FILE_NAME); + Map connectorConfigs = baseConnectorConfigs(TOPIC, tempFilePath.toString()); + connect.configureConnector(CONNECTOR_NAME, connectorConfigs); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 1, + "Connector and task did not start in time"); + + verifyLinesInFile(tempFilePath, NUM_MESSAGES, true); + } + + @Test + public void testAlterOffsets() throws Exception { + File tempDir = TestUtils.tempDirectory(); + Path tempFilePath = tempDir.toPath().resolve(FILE_NAME); + Map connectorConfigs = baseConnectorConfigs(TOPIC, tempFilePath.toString()); + connect.configureConnector(CONNECTOR_NAME, connectorConfigs); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 1, + "Connector and task did not start in time"); + + verifyLinesInFile(tempFilePath, NUM_MESSAGES, true); + + connect.stopConnector(CONNECTOR_NAME); + connect.assertions().assertConnectorIsStopped(CONNECTOR_NAME, "Connector did not stop in time"); + + // Alter the offsets to cause the last message in the topic to be re-processed + connect.alterSinkConnectorOffset(CONNECTOR_NAME, new TopicPartition(TOPIC, 0), (long) (NUM_MESSAGES - 1)); + + connect.resumeConnector(CONNECTOR_NAME); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 1, + "Connector and task did not resume in time"); + + // The last message should be re-processed when the connector is resumed after the offsets are altered + verifyLinesInFile(tempFilePath, NUM_MESSAGES + 1, false); + } + + @Test + public void testResetOffsets() throws Exception { + File tempDir = TestUtils.tempDirectory(); + Path tempFilePath = tempDir.toPath().resolve(FILE_NAME); + Map connectorConfigs = baseConnectorConfigs(TOPIC, tempFilePath.toString()); + connect.configureConnector(CONNECTOR_NAME, connectorConfigs); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 1, + "Connector and task did not start in time"); + + verifyLinesInFile(tempFilePath, NUM_MESSAGES, true); + + connect.stopConnector(CONNECTOR_NAME); + connect.assertions().assertConnectorIsStopped(CONNECTOR_NAME, "Connector did not stop in time"); + + // Reset the offsets to cause all the message in the topic to be re-processed + connect.resetConnectorOffsets(CONNECTOR_NAME); + + connect.resumeConnector(CONNECTOR_NAME); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 1, + "Connector and task did not resume in time"); + + // All the messages should be re-processed when the connector is resumed after the offsets are reset + verifyLinesInFile(tempFilePath, 2 * NUM_MESSAGES, false); + } + + @Test + public void testSinkMultipleTopicsWithMultipleTasks() throws Exception { + String topic2 = "test-topic-2"; + connect.kafka().createTopic(topic2); + produceMessagesToTopic(topic2, NUM_MESSAGES); + + File tempDir = TestUtils.tempDirectory(); + Path tempFilePath = tempDir.toPath().resolve(FILE_NAME); + Map connectorConfigs = baseConnectorConfigs(TOPIC + "," + topic2, tempFilePath.toString()); + connectorConfigs.put(TASKS_MAX_CONFIG, "2"); + + connect.configureConnector(CONNECTOR_NAME, connectorConfigs); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 2, + "Connector and task did not start in time"); + + // Only verify the number of lines since the messages can be consumed in any order across the two topics + verifyLinesInFile(tempFilePath, 2 * NUM_MESSAGES, false); + } + + private void produceMessagesToTopic(String topic, int numMessages) { + for (int i = 0; i < numMessages; i++) { + connect.kafka().produce(topic, MESSAGE_PREFIX + i); + } + } + + private Map baseConnectorConfigs(String topics, String filePath) { + Map connectorConfigs = new HashMap<>(); + connectorConfigs.put(CONNECTOR_CLASS_CONFIG, FileStreamSinkConnector.class.getName()); + connectorConfigs.put(TOPICS_CONFIG, topics); + connectorConfigs.put(FILE_CONFIG, filePath); + return connectorConfigs; + } + + /** + * Verify that the number of lines in the file at {@code filePath} is equal to {@code numLines} and that they all begin with the + * prefix {@link #MESSAGE_PREFIX}. + *

+ * If {@code verifyLinearity} is true, this method will also verify that the lines have a linearly increasing message number + * (beginning with 0) after the prefix. + * + * @param filePath the file path + * @param numLines the expected number of lines in the file + * @param verifyLinearity true if the line contents are to be verified + */ + private void verifyLinesInFile(Path filePath, int numLines, boolean verifyLinearity) throws Exception { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(Files.newInputStream(filePath)))) { + AtomicInteger i = new AtomicInteger(0); + TestUtils.waitForCondition(() -> { + reader.lines().forEach(line -> { + if (verifyLinearity) { + assertEquals(MESSAGE_PREFIX + i, line); + } else { + assertTrue(line.startsWith(MESSAGE_PREFIX)); + } + i.getAndIncrement(); + }); + + return i.get() >= numLines; + }, "Expected to read " + numLines + " lines from the file"); + } + + // Ensure that there are exactly the expected number of lines present + assertEquals(numLines, Files.readAllLines(filePath).size()); + } +} diff --git a/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java b/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java new file mode 100644 index 0000000000000..95dabf703c585 --- /dev/null +++ b/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java @@ -0,0 +1,214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.connect.file.integration; + +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.connect.file.FileStreamSourceConnector; +import org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster; +import org.apache.kafka.test.TestUtils; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.io.PrintStream; +import java.nio.file.Files; +import java.nio.file.StandardOpenOption; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.apache.kafka.connect.file.FileStreamSourceConnector.FILE_CONFIG; +import static org.apache.kafka.connect.file.FileStreamSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.file.FileStreamSourceTask.FILENAME_FIELD; +import static org.apache.kafka.connect.file.FileStreamSourceTask.POSITION_FIELD; +import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; +import static org.junit.jupiter.api.Assertions.assertEquals; + +@Tag("integration") +public class FileStreamSourceConnectorIntegrationTest { + + private static final String CONNECTOR_NAME = "test-connector"; + private static final String TOPIC = "test-topic"; + private static final String LINE_FORMAT = "Line %d"; + private static final int NUM_LINES = 5; + private static final long TIMEOUT_MS = TimeUnit.SECONDS.toMillis(15); + private final EmbeddedConnectCluster connect = new EmbeddedConnectCluster.Builder().build(); + private File sourceFile; + + @BeforeEach + public void setup() throws Exception { + connect.start(); + sourceFile = createTempFile(NUM_LINES); + connect.kafka().createTopic(TOPIC); + } + + @AfterEach + public void tearDown() { + connect.stop(); + } + + @Test + public void testSimpleSource() throws Exception { + Map connectorConfigs = baseConnectorConfigs(sourceFile.getAbsolutePath()); + connect.configureConnector(CONNECTOR_NAME, connectorConfigs); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 1, + "Connector and task did not start in time"); + + int i = 0; + for (ConsumerRecord record : connect.kafka().consume(NUM_LINES, TIMEOUT_MS, TOPIC)) { + assertEquals(String.format(LINE_FORMAT, i++), new String(record.value())); + } + } + + @Test + public void testStopResumeSavedOffset() throws Exception { + Map connectorConfigs = baseConnectorConfigs(sourceFile.getAbsolutePath()); + connect.configureConnector(CONNECTOR_NAME, connectorConfigs); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 1, + "Connector and task did not start in time"); + + // Wait for the initially written records to be sourced by the connector and produced to the configured Kafka topic + connect.kafka().consume(NUM_LINES, TIMEOUT_MS, TOPIC); + + connect.stopConnector(CONNECTOR_NAME); + connect.assertions().assertConnectorIsStopped(CONNECTOR_NAME, "Connector did not stop in time"); + + // Append NUM_LINES more lines to the file + try (PrintStream printStream = new PrintStream(Files.newOutputStream(sourceFile.toPath(), StandardOpenOption.APPEND))) { + for (int i = NUM_LINES; i < 2 * NUM_LINES; i++) { + printStream.println(String.format(LINE_FORMAT, i)); + } + } + + connect.resumeConnector(CONNECTOR_NAME); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 1, + "Connector and task did not resume in time"); + + int i = 0; + for (ConsumerRecord record : connect.kafka().consume(2 * NUM_LINES, TIMEOUT_MS, TOPIC)) { + assertEquals(String.format(LINE_FORMAT, i++), new String(record.value())); + } + + // We expect exactly (2 * NUM_LINES) messages to be produced since the connector should continue from where it left off on being resumed. + // We verify this by consuming all the messages from the topic after we've already ensured that at least (2 * NUM_LINES) messages can be + // consumed above. + assertEquals(2 * NUM_LINES, connect.kafka().consumeAll(TIMEOUT_MS, TOPIC).count()); + } + + @Test + public void testAlterOffsets() throws Exception { + Map connectorConfigs = baseConnectorConfigs(sourceFile.getAbsolutePath()); + connect.configureConnector(CONNECTOR_NAME, connectorConfigs); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 1, + "Connector and task did not start in time"); + + // Wait for the initially written records to be sourced by the connector and produced to the configured Kafka topic + connect.kafka().consume(NUM_LINES, TIMEOUT_MS, TOPIC); + + connect.stopConnector(CONNECTOR_NAME); + connect.assertions().assertConnectorIsStopped(CONNECTOR_NAME, "Connector did not stop in time"); + + // Alter the offsets to make the connector re-process the last line in the file + connect.alterSourceConnectorOffset( + CONNECTOR_NAME, + Collections.singletonMap(FILENAME_FIELD, sourceFile.getAbsolutePath()), + Collections.singletonMap(POSITION_FIELD, 28L) + ); + + connect.resumeConnector(CONNECTOR_NAME); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 1, + "Connector and task did not resume in time"); + + Iterator> recordIterator = connect.kafka().consume(NUM_LINES + 1, TIMEOUT_MS, TOPIC).iterator(); + + for (int i = 0; i < NUM_LINES; i++) { + assertEquals(String.format(LINE_FORMAT, i), new String(recordIterator.next().value())); + } + + // Verify that the last line has been sourced again after the alter offsets request + assertEquals(String.format(LINE_FORMAT, NUM_LINES - 1), new String(recordIterator.next().value())); + } + + @Test + public void testResetOffsets() throws Exception { + Map connectorConfigs = baseConnectorConfigs(sourceFile.getAbsolutePath()); + connect.configureConnector(CONNECTOR_NAME, connectorConfigs); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 1, + "Connector and task did not start in time"); + + // Wait for the initially written records to be sourced by the connector and produced to the configured Kafka topic + connect.kafka().consume(NUM_LINES, TIMEOUT_MS, TOPIC); + + connect.stopConnector(CONNECTOR_NAME); + connect.assertions().assertConnectorIsStopped(CONNECTOR_NAME, "Connector did not stop in time"); + + // Reset the offsets to make the connector re-read all the previously written lines + connect.resetConnectorOffsets(CONNECTOR_NAME); + + connect.resumeConnector(CONNECTOR_NAME); + connect.assertions().assertConnectorAndExactlyNumTasksAreRunning(CONNECTOR_NAME, 1, + "Connector and task did not resume in time"); + + Iterator> recordIterator = connect.kafka().consume(2 * NUM_LINES, TIMEOUT_MS, TOPIC).iterator(); + + int i = 0; + while (i < NUM_LINES) { + assertEquals(String.format(LINE_FORMAT, i++), new String(recordIterator.next().value())); + } + + // Verify that the same lines have been sourced again after the reset offsets request + while (i < 2 * NUM_LINES) { + assertEquals(String.format(LINE_FORMAT, i - NUM_LINES), new String(recordIterator.next().value())); + i++; + } + + // We expect exactly (2 * NUM_LINES) messages to be produced since the connector should reprocess exactly the same NUM_LINES messages after + // the offsets have been reset. We verify this by consuming all the messages from the topic after we've already ensured that at least + // (2 * NUM_LINES) messages can be consumed above. + assertEquals(2 * NUM_LINES, connect.kafka().consumeAll(TIMEOUT_MS, TOPIC).count()); + } + + /** + * Create a temporary file and append {@code numLines} to it + * + * @param numLines the number of lines to be appended to the created file + * @return the created file + */ + private File createTempFile(int numLines) throws Exception { + File sourceFile = TestUtils.tempFile(); + + try (PrintStream printStream = new PrintStream(Files.newOutputStream(sourceFile.toPath()))) { + for (int i = 0; i < numLines; i++) { + printStream.println(String.format(LINE_FORMAT, i)); + } + } + + return sourceFile; + } + + private Map baseConnectorConfigs(String filePath) { + Map connectorConfigs = new HashMap<>(); + connectorConfigs.put(CONNECTOR_CLASS_CONFIG, FileStreamSourceConnector.class.getName()); + connectorConfigs.put(TOPIC_CONFIG, TOPIC); + connectorConfigs.put(FILE_CONFIG, filePath); + return connectorConfigs; + } +} diff --git a/connect/file/src/test/resources/log4j.properties b/connect/file/src/test/resources/log4j.properties new file mode 100644 index 0000000000000..548e8c33cfbe9 --- /dev/null +++ b/connect/file/src/test/resources/log4j.properties @@ -0,0 +1,28 @@ +## +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## +log4j.rootLogger=INFO, stdout + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +# +# The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information +# in the log message, where appropriate. This makes it easier to identify those log messages that apply to a +# specific connector. Simply add this parameter to the log layout configuration below to include the contextual information. +# +log4j.appender.stdout.layout.ConversionPattern=[%d] %p %X{connector.context}%m (%c:%L)%n + +log4j.logger.kafka=WARN diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncStore.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncStore.java index 63a91a11b453a..7ba3deaad2958 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncStore.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncStore.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.connect.mirror; -import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.producer.Producer; @@ -75,7 +74,7 @@ class OffsetSyncStore implements AutoCloseable { try { consumer = MirrorUtils.newConsumer(config.offsetSyncsTopicConsumerConfig()); admin = new TopicAdmin( - config.offsetSyncsTopicAdminConfig().get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG), + config.offsetSyncsTopicAdminConfig(), config.forwardingAdmin(config.offsetSyncsTopicAdminConfig())); store = createBackingStore(config, consumer, admin); } catch (Throwable t) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicAdmin.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicAdmin.java index f8c30dc13b382..3db23d9e90920 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicAdmin.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicAdmin.java @@ -286,20 +286,30 @@ public static NewTopicBuilder defineTopic(String topicName) { * @param adminConfig the configuration for the {@link Admin} */ public TopicAdmin(Map adminConfig) { - this(adminConfig.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG), Admin.create(adminConfig)); + this(adminConfig, Admin.create(adminConfig)); } - public TopicAdmin(Object bootstrapServers, Admin adminClient) { - this(bootstrapServers, adminClient, true); + public TopicAdmin(Map adminConfig, Admin adminClient) { + this(bootstrapServers(adminConfig), adminClient, true); } // visible for testing - TopicAdmin(Object bootstrapServers, Admin adminClient, boolean logCreation) { + TopicAdmin(Admin adminClient) { + this(null, adminClient, true); + } + + // visible for testing + TopicAdmin(String bootstrapServers, Admin adminClient, boolean logCreation) { this.admin = adminClient; - this.bootstrapServers = bootstrapServers != null ? bootstrapServers.toString() : ""; + this.bootstrapServers = bootstrapServers != null ? bootstrapServers : ""; this.logCreation = logCreation; } + private static String bootstrapServers(Map adminConfig) { + Object result = adminConfig.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG); + return result != null ? result.toString() : null; + } + /** * Attempt to create the topic described by the given definition, returning true if the topic was created or false * if the topic already existed. @@ -720,23 +730,23 @@ public Map endOffsets(Set partitions) { String topic = partition.topic(); if (cause instanceof AuthorizationException) { String msg = String.format("Not authorized to get the end offsets for topic '%s' on brokers at %s", topic, bootstrapServers); - throw new ConnectException(msg, e); + throw new ConnectException(msg, cause); } else if (cause instanceof UnsupportedVersionException) { // Should theoretically never happen, because this method is the same as what the consumer uses and therefore // should exist in the broker since before the admin client was added String msg = String.format("API to get the get the end offsets for topic '%s' is unsupported on brokers at %s", topic, bootstrapServers); - throw new UnsupportedVersionException(msg, e); + throw new UnsupportedVersionException(msg, cause); } else if (cause instanceof TimeoutException) { String msg = String.format("Timed out while waiting to get end offsets for topic '%s' on brokers at %s", topic, bootstrapServers); - throw new TimeoutException(msg, e); + throw new TimeoutException(msg, cause); } else if (cause instanceof LeaderNotAvailableException) { String msg = String.format("Unable to get end offsets during leader election for topic '%s' on brokers at %s", topic, bootstrapServers); - throw new LeaderNotAvailableException(msg, e); + throw new LeaderNotAvailableException(msg, cause); } else if (cause instanceof org.apache.kafka.common.errors.RetriableException) { throw (org.apache.kafka.common.errors.RetriableException) cause; } else { String msg = String.format("Error while getting end offsets for topic '%s' on brokers at %s", topic, bootstrapServers); - throw new ConnectException(msg, e); + throw new ConnectException(msg, cause); } } catch (InterruptedException e) { Thread.interrupted(); @@ -774,7 +784,7 @@ public Map retryEndOffsets(Set partitions, // Older brokers don't support this admin method, so rethrow it without wrapping it throw e; } catch (Exception e) { - throw new ConnectException("Failed to list offsets for topic partitions.", e); + throw ConnectUtils.maybeWrap(e, "Failed to list offsets for topic partitions"); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java index 21cd734065365..26b2d7cba165e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java @@ -809,13 +809,7 @@ public void testSeparateOffsetsTopic() throws Exception { ); // also consume from the cluster's global offsets topic - offsetRecords = connect.kafka() - .consumeAll( - TimeUnit.MINUTES.toMillis(1), - null, - null, - globalOffsetsTopic - ); + offsetRecords = connect.kafka().consumeAll(TimeUnit.MINUTES.toMillis(1), globalOffsetsTopic); seqnos = parseAndAssertOffsetsForSingleTask(offsetRecords); seqnos.forEach(seqno -> assertEquals("Offset commits should occur on connector-defined poll boundaries, which happen every " + MINIMUM_MESSAGES + " records", diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java index 8f5df8f66cc04..9f78977967252 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java @@ -91,7 +91,7 @@ public void returnEmptyWithApiVersionMismatchOnCreate() { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(createTopicResponseWithUnsupportedVersion(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); assertTrue(admin.createOrFindTopics(newTopic).isEmpty()); } } @@ -108,7 +108,7 @@ public void throwsWithApiVersionMismatchOnDescribe() { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(describeTopicResponseWithUnsupportedVersion(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); Exception e = assertThrows(ConnectException.class, () -> admin.describeTopics(newTopic.name())); assertTrue(e.getCause() instanceof UnsupportedVersionException); } @@ -120,7 +120,7 @@ public void returnEmptyWithClusterAuthorizationFailureOnCreate() { Cluster cluster = createCluster(1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().prepareResponse(createTopicResponseWithClusterAuthorizationException(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); assertFalse(admin.createTopic(newTopic)); env.kafkaClient().prepareResponse(createTopicResponseWithClusterAuthorizationException(newTopic)); @@ -134,7 +134,7 @@ public void throwsWithClusterAuthorizationFailureOnDescribe() { Cluster cluster = createCluster(1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().prepareResponse(describeTopicResponseWithClusterAuthorizationException(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); Exception e = assertThrows(ConnectException.class, () -> admin.describeTopics(newTopic.name())); assertTrue(e.getCause() instanceof ClusterAuthorizationException); } @@ -146,7 +146,7 @@ public void returnEmptyWithTopicAuthorizationFailureOnCreate() { Cluster cluster = createCluster(1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().prepareResponse(createTopicResponseWithTopicAuthorizationException(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); assertFalse(admin.createTopic(newTopic)); env.kafkaClient().prepareResponse(createTopicResponseWithTopicAuthorizationException(newTopic)); @@ -160,7 +160,7 @@ public void throwsWithTopicAuthorizationFailureOnDescribe() { Cluster cluster = createCluster(1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().prepareResponse(describeTopicResponseWithTopicAuthorizationException(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); Exception e = assertThrows(ConnectException.class, () -> admin.describeTopics(newTopic.name())); assertTrue(e.getCause() instanceof TopicAuthorizationException); } @@ -173,7 +173,7 @@ public void shouldNotCreateTopicWhenItAlreadyExists() { try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); mockAdminClient.addTopic(false, "myTopic", Collections.singletonList(topicPartitionInfo), null); - TopicAdmin admin = new TopicAdmin(null, mockAdminClient); + TopicAdmin admin = new TopicAdmin(mockAdminClient); assertFalse(admin.createTopic(newTopic)); assertTrue(admin.createTopics(newTopic).isEmpty()); assertTrue(admin.createOrFindTopic(newTopic)); @@ -252,7 +252,7 @@ public void shouldCreateOneTopicWhenProvidedMultipleDefinitionsWithSameTopicName NewTopic newTopic1 = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build(); NewTopic newTopic2 = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build(); Cluster cluster = createCluster(1); - try (TopicAdmin admin = new TopicAdmin(null, new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) { + try (TopicAdmin admin = new TopicAdmin(new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) { Set newTopicNames = admin.createTopics(newTopic1, newTopic2); assertEquals(1, newTopicNames.size()); assertEquals(newTopic2.name(), newTopicNames.iterator().next()); @@ -264,7 +264,7 @@ public void shouldRetryCreateTopicWhenAvailableBrokersAreNotEnoughForReplication Cluster cluster = createCluster(1); NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).replicationFactor((short) 2).compacted().build(); - try (TopicAdmin admin = Mockito.spy(new TopicAdmin(null, new MockAdminClient(cluster.nodes(), cluster.nodeById(0))))) { + try (TopicAdmin admin = Mockito.spy(new TopicAdmin(new MockAdminClient(cluster.nodes(), cluster.nodeById(0))))) { try { admin.createTopicsWithRetry(newTopic, 2, 1, new MockTime()); } catch (Exception e) { @@ -281,7 +281,7 @@ public void shouldRetryWhenTopicCreateThrowsWrappedTimeoutException() { NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).replicationFactor((short) 1).compacted().build(); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0)); - TopicAdmin admin = Mockito.spy(new TopicAdmin(null, mockAdminClient))) { + TopicAdmin admin = Mockito.spy(new TopicAdmin(mockAdminClient))) { mockAdminClient.timeoutNextRequest(1); try { admin.createTopicsWithRetry(newTopic, 2, 1, new MockTime()); @@ -296,7 +296,7 @@ public void shouldRetryWhenTopicCreateThrowsWrappedTimeoutException() { @Test public void createShouldReturnFalseWhenSuppliedNullTopicDescription() { Cluster cluster = createCluster(1); - try (TopicAdmin admin = new TopicAdmin(null, new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) { + try (TopicAdmin admin = new TopicAdmin(new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) { boolean created = admin.createTopic(null); assertFalse(created); } @@ -306,7 +306,7 @@ public void createShouldReturnFalseWhenSuppliedNullTopicDescription() { public void describeShouldReturnEmptyWhenTopicDoesNotExist() { NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build(); Cluster cluster = createCluster(1); - try (TopicAdmin admin = new TopicAdmin(null, new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) { + try (TopicAdmin admin = new TopicAdmin(new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) { assertTrue(admin.describeTopics(newTopic.name()).isEmpty()); } } @@ -319,7 +319,7 @@ public void describeShouldReturnTopicDescriptionWhenTopicExists() { try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), null); - TopicAdmin admin = new TopicAdmin(null, mockAdminClient); + TopicAdmin admin = new TopicAdmin(mockAdminClient); Map desc = admin.describeTopics(newTopic.name()); assertFalse(desc.isEmpty()); TopicDescription topicDesc = new TopicDescription(topicName, false, Collections.singletonList(topicPartitionInfo)); @@ -333,7 +333,7 @@ public void describeTopicConfigShouldReturnEmptyMapWhenNoTopicsAreSpecified() { Cluster cluster = createCluster(1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().prepareResponse(describeConfigsResponseWithUnsupportedVersion(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); Map results = admin.describeTopicConfigs(); assertTrue(results.isEmpty()); } @@ -345,7 +345,7 @@ public void describeTopicConfigShouldReturnEmptyMapWhenUnsupportedVersionFailure Cluster cluster = createCluster(1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().prepareResponse(describeConfigsResponseWithUnsupportedVersion(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); Map results = admin.describeTopicConfigs(newTopic.name()); assertTrue(results.isEmpty()); } @@ -357,7 +357,7 @@ public void describeTopicConfigShouldReturnEmptyMapWhenClusterAuthorizationFailu Cluster cluster = createCluster(1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().prepareResponse(describeConfigsResponseWithClusterAuthorizationException(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); Map results = admin.describeTopicConfigs(newTopic.name()); assertTrue(results.isEmpty()); } @@ -369,7 +369,7 @@ public void describeTopicConfigShouldReturnEmptyMapWhenTopicAuthorizationFailure Cluster cluster = createCluster(1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().prepareResponse(describeConfigsResponseWithTopicAuthorizationException(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); Map results = admin.describeTopicConfigs(newTopic.name()); assertTrue(results.isEmpty()); } @@ -379,7 +379,7 @@ public void describeTopicConfigShouldReturnEmptyMapWhenTopicAuthorizationFailure public void describeTopicConfigShouldReturnMapWithNullValueWhenTopicDoesNotExist() { NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build(); Cluster cluster = createCluster(1); - try (TopicAdmin admin = new TopicAdmin(null, new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) { + try (TopicAdmin admin = new TopicAdmin(new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) { Map results = admin.describeTopicConfigs(newTopic.name()); assertFalse(results.isEmpty()); assertEquals(1, results.size()); @@ -399,7 +399,7 @@ public void describeTopicConfigShouldReturnTopicConfigWhenTopicExists() { try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), null); - TopicAdmin admin = new TopicAdmin(null, mockAdminClient); + TopicAdmin admin = new TopicAdmin(mockAdminClient); Map result = admin.describeTopicConfigs(newTopic.name()); assertFalse(result.isEmpty()); assertEquals(1, result.size()); @@ -415,7 +415,7 @@ public void verifyingTopicCleanupPolicyShouldReturnFalseWhenBrokerVersionIsUnsup Cluster cluster = createCluster(1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().prepareResponse(describeConfigsResponseWithUnsupportedVersion(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); boolean result = admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose"); assertFalse(result); } @@ -427,7 +427,7 @@ public void verifyingTopicCleanupPolicyShouldReturnFalseWhenClusterAuthorization Cluster cluster = createCluster(1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().prepareResponse(describeConfigsResponseWithClusterAuthorizationException(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); boolean result = admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose"); assertFalse(result); } @@ -439,7 +439,7 @@ public void verifyingTopicCleanupPolicyShouldReturnFalseWhenTopicAuthorizationEr Cluster cluster = createCluster(1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().prepareResponse(describeConfigsResponseWithTopicAuthorizationException(newTopic)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); boolean result = admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose"); assertFalse(result); } @@ -453,7 +453,7 @@ public void verifyingTopicCleanupPolicyShouldReturnTrueWhenTopicHasCorrectPolicy try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs); - TopicAdmin admin = new TopicAdmin(null, mockAdminClient); + TopicAdmin admin = new TopicAdmin(mockAdminClient); boolean result = admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose"); assertTrue(result); } @@ -467,7 +467,7 @@ public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeletePolicy() { try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs); - TopicAdmin admin = new TopicAdmin(null, mockAdminClient); + TopicAdmin admin = new TopicAdmin(mockAdminClient); ConfigException e = assertThrows(ConfigException.class, () -> admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose")); assertTrue(e.getMessage().contains("to guarantee consistency and durability")); } @@ -481,7 +481,7 @@ public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeleteAndCompactPol try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs); - TopicAdmin admin = new TopicAdmin(null, mockAdminClient); + TopicAdmin admin = new TopicAdmin(mockAdminClient); ConfigException e = assertThrows(ConfigException.class, () -> admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose")); assertTrue(e.getMessage().contains("to guarantee consistency and durability")); } @@ -495,7 +495,7 @@ public void verifyingGettingTopicCleanupPolicies() { try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs); - TopicAdmin admin = new TopicAdmin(null, mockAdminClient); + TopicAdmin admin = new TopicAdmin(mockAdminClient); Set policies = admin.topicCleanupPolicy("myTopic"); assertEquals(1, policies.size()); assertEquals(TopicConfig.CLEANUP_POLICY_COMPACT, policies.iterator().next()); @@ -519,7 +519,7 @@ public void retryEndOffsetsShouldRethrowUnknownVersionException() { env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); // Expect the admin client list offsets will throw unsupported version, simulating older brokers env.kafkaClient().prepareResponse(listOffsetsResultWithUnsupportedVersion(tp1, offset)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); // The retryEndOffsets should catch and rethrow an unsupported version exception assertThrows(UnsupportedVersionException.class, () -> admin.retryEndOffsets(tps, Duration.ofMillis(100), 1)); } @@ -533,18 +533,29 @@ public void retryEndOffsetsShouldWrapNonRetriableExceptionsWithConnectException( Long offset = 1000L; Cluster cluster = createCluster(1, "myTopic", 1); - try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(10), cluster)) { + try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { Map offsetMap = new HashMap<>(); offsetMap.put(tp1, offset); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.NONE)); - Map adminConfig = new HashMap<>(); - adminConfig.put(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "0"); - TopicAdmin admin = new TopicAdmin(adminConfig, env.adminClient()); - assertThrows(ConnectException.class, () -> { - admin.retryEndOffsets(tps, Duration.ofMillis(100), 1); - }); + // This error should be treated as non-retriable and cause TopicAdmin::retryEndOffsets to fail + env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.TOPIC_AUTHORIZATION_FAILED, Errors.NONE)); + // But, in case there's a bug in our logic, prepare a valid response afterward so that TopicAdmin::retryEndOffsets + // will return successfully if we retry (which should in turn cause this test to fail) + env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); + env.kafkaClient().prepareResponse(listOffsetsResult(tp1, offset)); + + TopicAdmin admin = new TopicAdmin(env.adminClient()); + ConnectException exception = assertThrows(ConnectException.class, () -> + admin.retryEndOffsets(tps, Duration.ofMillis(100), 1) + ); + + Throwable cause = exception.getCause(); + assertNotNull("cause of failure should be preserved", cause); + assertTrue( + "cause of failure should be accurately reported; expected topic authorization error, but was " + cause, + cause instanceof TopicAuthorizationException + ); } } @@ -556,7 +567,7 @@ public void retryEndOffsetsShouldRetryWhenTopicNotFound() { Long offset = 1000L; Cluster cluster = createCluster(1, "myTopic", 1); - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(10), cluster)) { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { Map offsetMap = new HashMap<>(); offsetMap.put(tp1, offset); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); @@ -564,13 +575,9 @@ public void retryEndOffsetsShouldRetryWhenTopicNotFound() { env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); env.kafkaClient().prepareResponse(listOffsetsResult(tp1, offset)); - Map adminConfig = new HashMap<>(); - adminConfig.put(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "0"); - TopicAdmin admin = new TopicAdmin(adminConfig, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); Map endoffsets = admin.retryEndOffsets(tps, Duration.ofMillis(100), 1); - assertNotNull(endoffsets); - assertTrue(endoffsets.containsKey(tp1)); - assertEquals(1000L, endoffsets.get(tp1).longValue()); + assertEquals(Collections.singletonMap(tp1, offset), endoffsets); } } @@ -585,7 +592,7 @@ public void endOffsetsShouldFailWithNonRetriableWhenAuthorizationFailureOccurs() env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); env.kafkaClient().prepareResponse(listOffsetsResultWithClusterAuthorizationException(tp1, offset)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); ConnectException e = assertThrows(ConnectException.class, () -> admin.endOffsets(tps)); assertTrue(e.getMessage().contains("Not authorized to get the end offsets")); } @@ -602,7 +609,7 @@ public void endOffsetsShouldFailWithUnsupportedVersionWhenVersionUnsupportedErro env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); env.kafkaClient().prepareResponse(listOffsetsResultWithUnsupportedVersion(tp1, offset)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); UnsupportedVersionException e = assertThrows(UnsupportedVersionException.class, () -> admin.endOffsets(tps)); } } @@ -620,7 +627,7 @@ public void endOffsetsShouldFailWithTimeoutExceptionWhenTimeoutErrorOccurs() { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); env.kafkaClient().prepareResponse(listOffsetsResultWithTimeout(tp1, offset)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); TimeoutException e = assertThrows(TimeoutException.class, () -> admin.endOffsets(tps)); } } @@ -636,7 +643,7 @@ public void endOffsetsShouldFailWithNonRetriableWhenUnknownErrorOccurs() { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); env.kafkaClient().prepareResponse(listOffsetsResultWithUnknownError(tp1, offset)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); ConnectException e = assertThrows(ConnectException.class, () -> admin.endOffsets(tps)); assertTrue(e.getMessage().contains("Error while getting end offsets for topic")); } @@ -647,7 +654,7 @@ public void endOffsetsShouldReturnEmptyMapWhenPartitionsSetIsNull() { String topicName = "myTopic"; Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); Map offsets = admin.endOffsets(Collections.emptySet()); assertTrue(offsets.isEmpty()); } @@ -664,7 +671,7 @@ public void endOffsetsShouldReturnOffsetsForOnePartition() { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); env.kafkaClient().prepareResponse(listOffsetsResult(tp1, offset)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); Map offsets = admin.endOffsets(tps); assertEquals(1, offsets.size()); assertEquals(Long.valueOf(offset), offsets.get(tp1)); @@ -684,7 +691,7 @@ public void endOffsetsShouldReturnOffsetsForMultiplePartitions() { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); env.kafkaClient().prepareResponse(listOffsetsResult(tp1, offset1, tp2, offset2)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); Map offsets = admin.endOffsets(tps); assertEquals(2, offsets.size()); assertEquals(Long.valueOf(offset1), offsets.get(tp1)); @@ -703,7 +710,7 @@ public void endOffsetsShouldFailWhenAnyTopicPartitionHasError() { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); env.kafkaClient().prepareResponse(listOffsetsResultWithClusterAuthorizationException(tp1, null)); - TopicAdmin admin = new TopicAdmin(null, env.adminClient()); + TopicAdmin admin = new TopicAdmin(env.adminClient()); ConnectException e = assertThrows(ConnectException.class, () -> admin.endOffsets(tps)); assertTrue(e.getMessage().contains("Not authorized to get the end offsets")); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectCluster.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectCluster.java index 4f0c4369f89ed..20dce332fcfa1 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectCluster.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectCluster.java @@ -18,6 +18,8 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.utils.Exit; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.errors.ConnectException; @@ -25,10 +27,12 @@ import org.apache.kafka.connect.runtime.rest.entities.ActiveTopicsInfo; import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos; import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo; +import org.apache.kafka.connect.runtime.rest.entities.ConnectorOffset; import org.apache.kafka.connect.runtime.rest.entities.ConnectorOffsets; import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo; import org.apache.kafka.connect.runtime.rest.entities.ServerInfo; import org.apache.kafka.connect.runtime.rest.errors.ConnectRestException; +import org.apache.kafka.connect.util.SinkUtils; import org.eclipse.jetty.client.HttpClient; import org.eclipse.jetty.client.api.ContentResponse; import org.eclipse.jetty.client.api.Request; @@ -669,11 +673,47 @@ public ConnectorOffsets connectorOffsets(String connectorName) { "Could not fetch connector offsets. Error response: " + responseToString(response)); } + /** + * Alter the offset for a source connector's partition via the PATCH /connectors/{connector}/offsets + * endpoint + * + * @param connectorName name of the source connector whose offset is to be altered + * @param partition the source partition for which the offset is to be altered + * @param offset the source offset to be written + * + * @return the API response as a {@link java.lang.String} + */ + public String alterSourceConnectorOffset(String connectorName, Map partition, Map offset) { + return alterConnectorOffsets( + connectorName, + new ConnectorOffsets(Collections.singletonList(new ConnectorOffset(partition, offset))) + ); + } + + /** + * Alter the offset for a sink connector's topic partition via the PATCH /connectors/{connector}/offsets + * endpoint + * + * @param connectorName name of the sink connector whose offset is to be altered + * @param topicPartition the topic partition for which the offset is to be altered + * @param offset the offset to be written + * + * @return the API response as a {@link java.lang.String} + */ + public String alterSinkConnectorOffset(String connectorName, TopicPartition topicPartition, Long offset) { + return alterConnectorOffsets( + connectorName, + SinkUtils.consumerGroupOffsetsToConnectorOffsets(Collections.singletonMap(topicPartition, new OffsetAndMetadata(offset))) + ); + } + /** * Alter a connector's offsets via the PATCH /connectors/{connector}/offsets endpoint * * @param connectorName name of the connector whose offsets are to be altered * @param offsets offsets to alter + * + * @return the API response as a {@link java.lang.String} */ public String alterConnectorOffsets(String connectorName, ConnectorOffsets offsets) { String url = endpointForResource(String.format("connectors/%s/offsets", connectorName)); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java index df8a2253531f6..19b8090f69db0 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java @@ -525,6 +525,19 @@ public ConsumerRecords consume(int n, long maxDuration, Map consumeAll( + long maxDurationMs, + String... topics + ) throws TimeoutException, InterruptedException, ExecutionException { + return consumeAll(maxDurationMs, null, null, topics); + } + /** * Consume all currently-available records for the specified topics in a given duration, or throw an exception. * @param maxDurationMs the max duration to wait for these records (in milliseconds). diff --git a/core/src/main/java/kafka/log/remote/RemoteLogManager.java b/core/src/main/java/kafka/log/remote/RemoteLogManager.java index f4b20014b49fb..4a35abf6a115f 100644 --- a/core/src/main/java/kafka/log/remote/RemoteLogManager.java +++ b/core/src/main/java/kafka/log/remote/RemoteLogManager.java @@ -584,8 +584,10 @@ boolean isLeader() { return leaderEpoch >= 0; } - // The copiedOffsetOption is OptionalLong.empty() initially for a new leader RLMTask, and needs to be fetched inside the task's run() method. + // The copied and log-start offset is empty initially for a new leader RLMTask, and needs to be fetched inside + // the task's run() method. private volatile OptionalLong copiedOffsetOption = OptionalLong.empty(); + private volatile boolean isLogStartOffsetUpdatedOnBecomingLeader = false; public void convertToLeader(int leaderEpochVal) { if (leaderEpochVal < 0) { @@ -594,22 +596,33 @@ public void convertToLeader(int leaderEpochVal) { if (this.leaderEpoch != leaderEpochVal) { leaderEpoch = leaderEpochVal; } - // Reset readOffset, so that it is set in next run of RLMTask + // Reset copied and log-start offset, so that it is set in next run of RLMTask copiedOffsetOption = OptionalLong.empty(); + isLogStartOffsetUpdatedOnBecomingLeader = false; } public void convertToFollower() { leaderEpoch = -1; } - private void maybeUpdateReadOffset(UnifiedLog log) throws RemoteStorageException { + private void maybeUpdateLogStartOffsetOnBecomingLeader(UnifiedLog log) throws RemoteStorageException { + if (!isLogStartOffsetUpdatedOnBecomingLeader) { + long logStartOffset = findLogStartOffset(topicIdPartition, log); + updateRemoteLogStartOffset.accept(topicIdPartition.topicPartition(), logStartOffset); + isLogStartOffsetUpdatedOnBecomingLeader = true; + logger.info("Found the logStartOffset: {} for partition: {} after becoming leader, leaderEpoch: {}", + logStartOffset, topicIdPartition, leaderEpoch); + } + } + + private void maybeUpdateCopiedOffset(UnifiedLog log) throws RemoteStorageException { if (!copiedOffsetOption.isPresent()) { // This is found by traversing from the latest leader epoch from leader epoch history and find the highest offset // of a segment with that epoch copied into remote storage. If it can not find an entry then it checks for the // previous leader epoch till it finds an entry, If there are no entries till the earliest leader epoch in leader // epoch cache then it starts copying the segments from the earliest epoch entry's offset. copiedOffsetOption = OptionalLong.of(findHighestRemoteOffset(topicIdPartition, log)); - logger.info("Found the highest copied remote offset: {} for partition: {} after becoming leader, " + + logger.info("Found the highest copiedRemoteOffset: {} for partition: {} after becoming leader, " + "leaderEpoch: {}", copiedOffsetOption, topicIdPartition, leaderEpoch); } } @@ -645,7 +658,8 @@ public void copyLogSegmentsToRemote(UnifiedLog log) throws InterruptedException return; try { - maybeUpdateReadOffset(log); + maybeUpdateLogStartOffsetOnBecomingLeader(log); + maybeUpdateCopiedOffset(log); long copiedOffset = copiedOffsetOption.getAsLong(); // LSO indicates the offset below are ready to be consumed (high-watermark or committed) @@ -823,10 +837,10 @@ private boolean deleteRetentionSizeBreachedSegments(RemoteLogSegmentMetadata met return false; } - boolean isSegmentDeleted = deleteRemoteLogSegment(metadata, x -> { + boolean isSegmentDeleted = deleteRemoteLogSegment(metadata, ignored -> { // Assumption that segments contain size >= 0 if (remainingBreachedSize > 0) { - long remainingBytes = remainingBreachedSize - x.segmentSizeInBytes(); + long remainingBytes = remainingBreachedSize - metadata.segmentSizeInBytes(); if (remainingBytes >= 0) { remainingBreachedSize = remainingBytes; return true; @@ -850,7 +864,7 @@ public boolean deleteRetentionTimeBreachedSegments(RemoteLogSegmentMetadata meta } boolean isSegmentDeleted = deleteRemoteLogSegment(metadata, - x -> x.maxTimestampMs() <= retentionTimeData.get().cleanupUntilMs); + ignored -> metadata.maxTimestampMs() <= retentionTimeData.get().cleanupUntilMs); if (isSegmentDeleted) { remainingBreachedSize = Math.max(0, remainingBreachedSize - metadata.segmentSizeInBytes()); // It is fine to have logStartOffset as `metadata.endOffset() + 1` as the segment offset intervals @@ -862,27 +876,40 @@ public boolean deleteRetentionTimeBreachedSegments(RemoteLogSegmentMetadata meta return isSegmentDeleted; } - private boolean deleteLogStartOffsetBreachedSegments(RemoteLogSegmentMetadata metadata, long startOffset) + private boolean deleteLogStartOffsetBreachedSegments(RemoteLogSegmentMetadata metadata, + long logStartOffset, + NavigableMap leaderEpochEntries) throws RemoteStorageException, ExecutionException, InterruptedException { - boolean isSegmentDeleted = deleteRemoteLogSegment(metadata, x -> startOffset > x.endOffset()); - if (isSegmentDeleted && retentionSizeData.isPresent()) { - remainingBreachedSize = Math.max(0, remainingBreachedSize - metadata.segmentSizeInBytes()); - logger.info("Deleted remote log segment {} due to log start offset {} breach", metadata.remoteLogSegmentId(), startOffset); + boolean isSegmentDeleted = deleteRemoteLogSegment(metadata, ignored -> { + if (!leaderEpochEntries.isEmpty()) { + // Note that `logStartOffset` and `leaderEpochEntries.firstEntry().getValue()` should be same + Integer firstEpoch = leaderEpochEntries.firstKey(); + return metadata.segmentLeaderEpochs().keySet().stream().allMatch(epoch -> epoch <= firstEpoch) + && metadata.endOffset() < logStartOffset; + } + return false; + }); + if (isSegmentDeleted) { + logger.info("Deleted remote log segment {} due to log-start-offset {} breach. " + + "Current earliest-epoch-entry: {}, segment-end-offset: {} and segment-epochs: {}", + metadata.remoteLogSegmentId(), logStartOffset, leaderEpochEntries.firstEntry(), + metadata.endOffset(), metadata.segmentLeaderEpochs()); } - return isSegmentDeleted; } // It removes the segments beyond the current leader's earliest epoch. Those segments are considered as // unreferenced because they are not part of the current leader epoch lineage. - private boolean deleteLogSegmentsDueToLeaderEpochCacheTruncation(EpochEntry earliestEpochEntry, RemoteLogSegmentMetadata metadata) throws RemoteStorageException, ExecutionException, InterruptedException { - boolean isSegmentDeleted = deleteRemoteLogSegment(metadata, x -> - x.segmentLeaderEpochs().keySet().stream().allMatch(epoch -> epoch < earliestEpochEntry.epoch)); + private boolean deleteLogSegmentsDueToLeaderEpochCacheTruncation(EpochEntry earliestEpochEntry, + RemoteLogSegmentMetadata metadata) + throws RemoteStorageException, ExecutionException, InterruptedException { + boolean isSegmentDeleted = deleteRemoteLogSegment(metadata, ignored -> + metadata.segmentLeaderEpochs().keySet().stream().allMatch(epoch -> epoch < earliestEpochEntry.epoch)); if (isSegmentDeleted) { - logger.info("Deleted remote log segment {} due to leader epoch cache truncation. Current earliest epoch: {}, segmentEndOffset: {} and segmentEpochs: {}", + logger.info("Deleted remote log segment {} due to leader-epoch-cache truncation. " + + "Current earliest-epoch-entry: {}, segment-end-offset: {} and segment-epochs: {}", metadata.remoteLogSegmentId(), earliestEpochEntry, metadata.endOffset(), metadata.segmentLeaderEpochs().keySet()); } - // No need to update the log-start-offset as these epochs/offsets are earlier to that value. return isSegmentDeleted; } @@ -890,7 +917,7 @@ private boolean deleteLogSegmentsDueToLeaderEpochCacheTruncation(EpochEntry earl private boolean deleteRemoteLogSegment(RemoteLogSegmentMetadata segmentMetadata, Predicate predicate) throws RemoteStorageException, ExecutionException, InterruptedException { if (predicate.test(segmentMetadata)) { - logger.info("Deleting remote log segment {}", segmentMetadata.remoteLogSegmentId()); + logger.debug("Deleting remote log segment {}", segmentMetadata.remoteLogSegmentId()); // Publish delete segment started event. remoteLogMetadataManager.updateRemoteLogSegmentMetadata( new RemoteLogSegmentMetadataUpdate(segmentMetadata.remoteLogSegmentId(), time.milliseconds(), @@ -903,10 +930,9 @@ private boolean deleteRemoteLogSegment(RemoteLogSegmentMetadata segmentMetadata, remoteLogMetadataManager.updateRemoteLogSegmentMetadata( new RemoteLogSegmentMetadataUpdate(segmentMetadata.remoteLogSegmentId(), time.milliseconds(), segmentMetadata.customMetadata(), RemoteLogSegmentState.DELETE_SEGMENT_FINISHED, brokerId)).get(); - logger.info("Deleted remote log segment {}", segmentMetadata.remoteLogSegmentId()); + logger.debug("Deleted remote log segment {}", segmentMetadata.remoteLogSegmentId()); return true; } - return false; } @@ -953,7 +979,6 @@ private void cleanupExpiredRemoteLogSegments() throws RemoteStorageException, Ex LeaderEpochFileCache leaderEpochCache = leaderEpochCacheOption.get(); // Build the leader epoch map by filtering the epochs that do not have any records. NavigableMap epochWithOffsets = buildFilteredLeaderEpochMap(leaderEpochCache.epochWithOffsets()); - Optional earliestEpochEntryOptional = leaderEpochCache.earliestEntry(); long logStartOffset = log.logStartOffset(); long logEndOffset = log.logEndOffset(); @@ -963,24 +988,35 @@ private void cleanupExpiredRemoteLogSegments() throws RemoteStorageException, Ex RemoteLogRetentionHandler remoteLogRetentionHandler = new RemoteLogRetentionHandler(retentionSizeData, retentionTimeData); Iterator epochIterator = epochWithOffsets.navigableKeySet().iterator(); - boolean isSegmentDeleted = true; - while (isSegmentDeleted && epochIterator.hasNext()) { + boolean canProcess = true; + while (canProcess && epochIterator.hasNext()) { Integer epoch = epochIterator.next(); Iterator segmentsIterator = remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition, epoch); - while (isSegmentDeleted && segmentsIterator.hasNext()) { + while (canProcess && segmentsIterator.hasNext()) { if (isCancelled() || !isLeader()) { logger.info("Returning from remote log segments cleanup for the remaining segments as the task state is changed."); return; } RemoteLogSegmentMetadata metadata = segmentsIterator.next(); - // check whether the segment contains the required epoch range with in the current leader epoch lineage. - if (isRemoteSegmentWithinLeaderEpochs(metadata, logEndOffset, epochWithOffsets)) { - isSegmentDeleted = - remoteLogRetentionHandler.deleteRetentionTimeBreachedSegments(metadata) || - remoteLogRetentionHandler.deleteRetentionSizeBreachedSegments(metadata) || - remoteLogRetentionHandler.deleteLogStartOffsetBreachedSegments(metadata, logStartOffset); + // When the log-start-offset is moved by the user, the leader-epoch-checkpoint file gets truncated + // as per the log-start-offset. Until the rlm-cleaner-thread runs in the next iteration, those + // remote log segments won't be removed. The `isRemoteSegmentWithinLeaderEpoch` validates whether + // the epochs present in the segment lies in the checkpoint file. It will always return false + // since the checkpoint file was already truncated. + boolean isSegmentDeleted = remoteLogRetentionHandler.deleteLogStartOffsetBreachedSegments( + metadata, logStartOffset, epochWithOffsets); + boolean isValidSegment = false; + if (!isSegmentDeleted) { + // check whether the segment contains the required epoch range with in the current leader epoch lineage. + isValidSegment = isRemoteSegmentWithinLeaderEpochs(metadata, logEndOffset, epochWithOffsets); + if (isValidSegment) { + isSegmentDeleted = + remoteLogRetentionHandler.deleteRetentionTimeBreachedSegments(metadata) || + remoteLogRetentionHandler.deleteRetentionSizeBreachedSegments(metadata); + } } + canProcess = isSegmentDeleted || !isValidSegment; } } @@ -988,9 +1024,12 @@ private void cleanupExpiredRemoteLogSegments() throws RemoteStorageException, Ex // to the leader. This will remove the unreferenced segments in the remote storage. This is needed for // unclean leader election scenarios as the remote storage can have epochs earlier to the current leader's // earliest leader epoch. + Optional earliestEpochEntryOptional = leaderEpochCache.earliestEntry(); if (earliestEpochEntryOptional.isPresent()) { EpochEntry earliestEpochEntry = earliestEpochEntryOptional.get(); - Iterator epochsToClean = remoteLeaderEpochs.stream().filter(x -> x < earliestEpochEntry.epoch).iterator(); + Iterator epochsToClean = remoteLeaderEpochs.stream() + .filter(remoteEpoch -> remoteEpoch < earliestEpochEntry.epoch) + .iterator(); while (epochsToClean.hasNext()) { int epoch = epochsToClean.next(); Iterator segmentsToBeCleaned = remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition, epoch); @@ -1079,8 +1118,9 @@ public static boolean isRemoteSegmentWithinLeaderEpochs(RemoteLogSegmentMetadata Integer segmentFirstEpoch = segmentLeaderEpochs.firstKey(); Integer segmentLastEpoch = segmentLeaderEpochs.lastKey(); if (segmentFirstEpoch < leaderEpochs.firstKey() || segmentLastEpoch > leaderEpochs.lastKey()) { - LOGGER.debug("[{}] Remote segment {} is not within the partition leader epoch lineage. Remote segment epochs: {} and partition leader epochs: {}", - segmentMetadata.topicIdPartition(), segmentMetadata.remoteLogSegmentId(), segmentLeaderEpochs, leaderEpochs); + LOGGER.debug("Segment {} is not within the partition leader epoch lineage. " + + "Remote segment epochs: {} and partition leader epochs: {}", + segmentMetadata.remoteLogSegmentId(), segmentLeaderEpochs, leaderEpochs); return false; } @@ -1090,15 +1130,16 @@ public static boolean isRemoteSegmentWithinLeaderEpochs(RemoteLogSegmentMetadata // If segment's epoch does not exist in the leader epoch lineage then it is not a valid segment. if (!leaderEpochs.containsKey(epoch)) { - LOGGER.debug("[{}] Remote segment {}'s epoch {} is not within the leader epoch lineage. Remote segment epochs: {} and partition leader epochs: {}", - segmentMetadata.topicIdPartition(), segmentMetadata.remoteLogSegmentId(), epoch, segmentLeaderEpochs, leaderEpochs); + LOGGER.debug("Segment {} epoch {} is not within the leader epoch lineage. " + + "Remote segment epochs: {} and partition leader epochs: {}", + segmentMetadata.remoteLogSegmentId(), epoch, segmentLeaderEpochs, leaderEpochs); return false; } // Segment's first epoch's offset should be more than or equal to the respective leader epoch's offset. if (epoch == segmentFirstEpoch && offset < leaderEpochs.get(epoch)) { - LOGGER.debug("[{}] Remote segment {}'s first epoch {}'s offset is less than leader epoch's offset {}.", - segmentMetadata.topicIdPartition(), segmentMetadata.remoteLogSegmentId(), epoch, leaderEpochs.get(epoch)); + LOGGER.debug("Segment {} first epoch {} offset is less than leader epoch offset {}.", + segmentMetadata.remoteLogSegmentId(), epoch, leaderEpochs.get(epoch)); return false; } @@ -1106,8 +1147,8 @@ public static boolean isRemoteSegmentWithinLeaderEpochs(RemoteLogSegmentMetadata if (epoch == segmentLastEpoch) { Map.Entry nextEntry = leaderEpochs.higherEntry(epoch); if (nextEntry != null && segmentEndOffset > nextEntry.getValue() - 1) { - LOGGER.debug("[{}] Remote segment {}'s end offset {} is more than leader epoch's offset {}.", - segmentMetadata.topicIdPartition(), segmentMetadata.remoteLogSegmentId(), segmentEndOffset, nextEntry.getValue() - 1); + LOGGER.debug("Segment {} end offset {} is more than leader epoch offset {}.", + segmentMetadata.remoteLogSegmentId(), segmentEndOffset, nextEntry.getValue() - 1); return false; } } @@ -1115,13 +1156,12 @@ public static boolean isRemoteSegmentWithinLeaderEpochs(RemoteLogSegmentMetadata // Next segment epoch entry and next leader epoch entry should be same to ensure that the segment's epoch // is within the leader epoch lineage. if (epoch != segmentLastEpoch && !leaderEpochs.higherEntry(epoch).equals(segmentLeaderEpochs.higherEntry(epoch))) { - LOGGER.debug("[{}] Remote segment {}'s epoch {} is not within the leader epoch lineage. Remote segment epochs: {} and partition leader epochs: {}", - segmentMetadata.topicIdPartition(), segmentMetadata.remoteLogSegmentId(), epoch, segmentLeaderEpochs, leaderEpochs); + LOGGER.debug("Segment {} epoch {} is not within the leader epoch lineage. " + + "Remote segment epochs: {} and partition leader epochs: {}", + segmentMetadata.remoteLogSegmentId(), epoch, segmentLeaderEpochs, leaderEpochs); return false; } - } - // segment end offset should be with in the log end offset. return segmentEndOffset < logEndOffset; } @@ -1272,7 +1312,7 @@ private FetchDataInfo addAbortedTransactions(long startOffset, OffsetIndex offsetIndex = indexCache.getIndexEntry(segmentMetadata).offsetIndex(); long upperBoundOffset = offsetIndex.fetchUpperBoundOffset(startOffsetPosition, fetchSize) - .map(x -> x.offset).orElse(segmentMetadata.endOffset() + 1); + .map(position -> position.offset).orElse(segmentMetadata.endOffset() + 1); final Set abortedTransactions = new HashSet<>(); @@ -1369,6 +1409,26 @@ long findHighestRemoteOffset(TopicIdPartition topicIdPartition, UnifiedLog log) return offset.orElse(-1L); } + long findLogStartOffset(TopicIdPartition topicIdPartition, UnifiedLog log) throws RemoteStorageException { + Optional logStartOffset = Optional.empty(); + Option maybeLeaderEpochFileCache = log.leaderEpochCache(); + if (maybeLeaderEpochFileCache.isDefined()) { + LeaderEpochFileCache cache = maybeLeaderEpochFileCache.get(); + OptionalInt earliestEpochOpt = cache.earliestEntry() + .map(epochEntry -> OptionalInt.of(epochEntry.epoch)) + .orElseGet(OptionalInt::empty); + while (!logStartOffset.isPresent() && earliestEpochOpt.isPresent()) { + Iterator iterator = + remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition, earliestEpochOpt.getAsInt()); + if (iterator.hasNext()) { + logStartOffset = Optional.of(iterator.next().startOffset()); + } + earliestEpochOpt = cache.nextEpoch(earliestEpochOpt.getAsInt()); + } + } + return logStartOffset.orElseGet(log::localLogStartOffset); + } + /** * Submit a remote log read task. * This method returns immediately. The read operation is executed in a thread pool. diff --git a/core/src/main/scala/kafka/network/RequestConvertToJson.scala b/core/src/main/scala/kafka/network/RequestConvertToJson.scala index 771a86e8f0fc0..889b76643bae0 100644 --- a/core/src/main/scala/kafka/network/RequestConvertToJson.scala +++ b/core/src/main/scala/kafka/network/RequestConvertToJson.scala @@ -96,6 +96,8 @@ object RequestConvertToJson { case req: DescribeTransactionsRequest => DescribeTransactionsRequestDataJsonConverter.write(req.data, request.version) case req: ListTransactionsRequest => ListTransactionsRequestDataJsonConverter.write(req.data, request.version) case req: ConsumerGroupHeartbeatRequest => ConsumerGroupHeartbeatRequestDataJsonConverter.write(req.data, request.version) + case req: ConsumerGroupDescribeRequest => ConsumerGroupDescribeRequestDataJsonConverter.write(req.data, request.version) + case req: ControllerRegistrationRequest => ControllerRegistrationRequestDataJsonConverter.write(req.data, request.version) case _ => throw new IllegalStateException(s"ApiKey ${request.apiKey} is not currently handled in `request`, the " + "code should be updated to do so."); } @@ -172,6 +174,8 @@ object RequestConvertToJson { case res: DescribeTransactionsResponse => DescribeTransactionsResponseDataJsonConverter.write(res.data, version) case res: ListTransactionsResponse => ListTransactionsResponseDataJsonConverter.write(res.data, version) case res: ConsumerGroupHeartbeatResponse => ConsumerGroupHeartbeatResponseDataJsonConverter.write(res.data, version) + case res: ConsumerGroupDescribeResponse => ConsumerGroupDescribeResponseDataJsonConverter.write(res.data, version) + case req: ControllerRegistrationResponse => ControllerRegistrationResponseDataJsonConverter.write(req.data, version) case _ => throw new IllegalStateException(s"ApiKey ${response.apiKey} is not currently handled in `response`, the " + "code should be updated to do so."); } diff --git a/core/src/main/scala/kafka/server/AuthHelper.scala b/core/src/main/scala/kafka/server/AuthHelper.scala index 50a13510ac03a..5e6a86a75aacd 100644 --- a/core/src/main/scala/kafka/server/AuthHelper.scala +++ b/core/src/main/scala/kafka/server/AuthHelper.scala @@ -22,9 +22,14 @@ import java.util.Collections import kafka.network.RequestChannel import kafka.security.authorizer.AclEntry import kafka.utils.CoreUtils +import org.apache.kafka.clients.admin.EndpointType import org.apache.kafka.common.acl.AclOperation +import org.apache.kafka.common.acl.AclOperation.DESCRIBE import org.apache.kafka.common.errors.ClusterAuthorizationException -import org.apache.kafka.common.requests.RequestContext +import org.apache.kafka.common.message.DescribeClusterResponseData +import org.apache.kafka.common.message.DescribeClusterResponseData.DescribeClusterBrokerCollection +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.requests.{DescribeClusterRequest, RequestContext} import org.apache.kafka.common.resource.Resource.CLUSTER_NAME import org.apache.kafka.common.resource.ResourceType.CLUSTER import org.apache.kafka.common.resource.{PatternType, Resource, ResourcePattern, ResourceType} @@ -35,7 +40,6 @@ import scala.collection.{Map, Seq} import scala.jdk.CollectionConverters._ class AuthHelper(authorizer: Option[Authorizer]) { - def authorize(requestContext: RequestContext, operation: AclOperation, resourceType: ResourceType, @@ -130,4 +134,57 @@ class AuthHelper(authorizer: Option[Authorizer]) { } } + def computeDescribeClusterResponse( + request: RequestChannel.Request, + expectedEndpointType: EndpointType, + clusterId: String, + getNodes: () => DescribeClusterBrokerCollection, + getControllerId: () => Int + ): DescribeClusterResponseData = { + val describeClusterRequest = request.body[DescribeClusterRequest] + val requestEndpointType = EndpointType.fromId(describeClusterRequest.data().endpointType()) + if (requestEndpointType.equals(EndpointType.UNKNOWN)) { + return new DescribeClusterResponseData(). + setErrorCode(if (request.header.data().requestApiVersion() == 0) { + Errors.INVALID_REQUEST.code() + } else { + Errors.UNSUPPORTED_ENDPOINT_TYPE.code() + }). + setErrorMessage("Unsupported endpoint type " + describeClusterRequest.data().endpointType().toInt) + } else if (!expectedEndpointType.equals(requestEndpointType)) { + return new DescribeClusterResponseData(). + setErrorCode(if (request.header.data().requestApiVersion() == 0) { + Errors.INVALID_REQUEST.code() + } else { + Errors.MISMATCHED_ENDPOINT_TYPE.code() + }). + setErrorMessage("The request was sent to an endpoint of type " + expectedEndpointType + + ", but we wanted an endpoint of type " + requestEndpointType) + } + var clusterAuthorizedOperations = Int.MinValue // Default value in the schema + // get cluster authorized operations + if (describeClusterRequest.data.includeClusterAuthorizedOperations) { + if (authorize(request.context, DESCRIBE, CLUSTER, CLUSTER_NAME)) + clusterAuthorizedOperations = authorizedOperations(request, Resource.CLUSTER) + else + clusterAuthorizedOperations = 0 + } + // Get the node list and the controller ID. + val nodes = getNodes() + val controllerId = getControllerId() + // If the provided controller ID is not in the node list, return -1 instead + // to avoid confusing the client. This could happen in a case where we know + // the controller ID, but we don't yet have KIP-919 information about that + // controller. + val effectiveControllerId = if (nodes.find(controllerId) == null) { + -1 + } else { + controllerId + } + new DescribeClusterResponseData(). + setClusterId(clusterId). + setControllerId(effectiveControllerId). + setClusterAuthorizedOperations(clusterAuthorizedOperations). + setBrokers(nodes) + } } diff --git a/core/src/main/scala/kafka/server/ControllerApis.scala b/core/src/main/scala/kafka/server/ControllerApis.scala index 9f9f775104db8..82c7b2b68b7da 100644 --- a/core/src/main/scala/kafka/server/ControllerApis.scala +++ b/core/src/main/scala/kafka/server/ControllerApis.scala @@ -27,7 +27,7 @@ import kafka.network.RequestChannel import kafka.raft.RaftManager import kafka.server.QuotaFactory.QuotaManagers import kafka.utils.Logging -import org.apache.kafka.clients.admin.AlterConfigOp +import org.apache.kafka.clients.admin.{AlterConfigOp, EndpointType} import org.apache.kafka.common.Uuid.ZERO_UUID import org.apache.kafka.common.acl.AclOperation.{ALTER, ALTER_CONFIGS, CLUSTER_ACTION, CREATE, CREATE_TOKENS, DELETE, DESCRIBE, DESCRIBE_CONFIGS} import org.apache.kafka.common.config.ConfigResource @@ -47,9 +47,10 @@ import org.apache.kafka.common.requests._ import org.apache.kafka.common.resource.Resource.CLUSTER_NAME import org.apache.kafka.common.resource.ResourceType.{CLUSTER, TOPIC, USER} import org.apache.kafka.common.utils.Time -import org.apache.kafka.common.{Node, Uuid} +import org.apache.kafka.common.Uuid import org.apache.kafka.controller.ControllerRequestContext.requestTimeoutMsToDeadlineNs import org.apache.kafka.controller.{Controller, ControllerRequestContext} +import org.apache.kafka.image.publisher.ControllerRegistrationsPublisher import org.apache.kafka.metadata.{BrokerHeartbeatReply, BrokerRegistrationReply} import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.security.auth.SecurityProtocol @@ -62,16 +63,18 @@ import scala.jdk.CollectionConverters._ /** * Request handler for Controller APIs */ -class ControllerApis(val requestChannel: RequestChannel, - val authorizer: Option[Authorizer], - val quotas: QuotaManagers, - val time: Time, - val controller: Controller, - val raftManager: RaftManager[ApiMessageAndVersion], - val config: KafkaConfig, - val metaProperties: MetaProperties, - val controllerNodes: Seq[Node], - val apiVersionManager: ApiVersionManager) extends ApiRequestHandler with Logging { +class ControllerApis( + val requestChannel: RequestChannel, + val authorizer: Option[Authorizer], + val quotas: QuotaManagers, + val time: Time, + val controller: Controller, + val raftManager: RaftManager[ApiMessageAndVersion], + val config: KafkaConfig, + val metaProperties: MetaProperties, + val registrationsPublisher: ControllerRegistrationsPublisher, + val apiVersionManager: ApiVersionManager +) extends ApiRequestHandler with Logging { this.logIdent = s"[ControllerApis nodeId=${config.nodeId}] " val authHelper = new AuthHelper(authorizer) @@ -117,6 +120,8 @@ class ControllerApis(val requestChannel: RequestChannel, case ApiKeys.DELETE_ACLS => aclApis.handleDeleteAcls(request) case ApiKeys.ELECT_LEADERS => handleElectLeaders(request) case ApiKeys.UPDATE_FEATURES => handleUpdateFeatures(request) + case ApiKeys.DESCRIBE_CLUSTER => handleDescribeCluster(request) + case ApiKeys.CONTROLLER_REGISTRATION => handleControllerRegistration(request) case _ => throw new ApiException(s"Unsupported ApiKey ${request.context.header.apiKey}") } @@ -824,6 +829,20 @@ class ControllerApis(val requestChannel: RequestChannel, } } + def handleControllerRegistration(request: RequestChannel.Request): CompletableFuture[Unit] = { + val registrationRequest = request.body[ControllerRegistrationRequest] + authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) + val context = new ControllerRequestContext(request.context.header.data, request.context.principal, + OptionalLong.empty()) + + controller.registerController(context, registrationRequest.data) + .thenApply[Unit] { _ => + requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => + new ControllerRegistrationResponse(new ControllerRegistrationResponseData(). + setThrottleTimeMs(requestThrottleMs))) + } + } + def handleAlterPartitionReassignments(request: RequestChannel.Request): CompletableFuture[Unit] = { val alterRequest = request.body[AlterPartitionReassignmentsRequest] authHelper.authorizeClusterOperation(request, ALTER) @@ -1005,4 +1024,20 @@ class ControllerApis(val requestChannel: RequestChannel, } } } + + def handleDescribeCluster(request: RequestChannel.Request): CompletableFuture[Unit] = { + // Unlike on the broker, DESCRIBE_CLUSTER on the controller requires a high level of + // permissions (ALTER on CLUSTER). + authHelper.authorizeClusterOperation(request, ALTER) + val response = authHelper.computeDescribeClusterResponse( + request, + EndpointType.CONTROLLER, + metaProperties.clusterId, + () => registrationsPublisher.describeClusterControllers(request.context.listenerName()), + () => raftManager.leaderAndEpoch.leaderId().orElse(-1) + ) + requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => + new DescribeClusterResponse(response.setThrottleTimeMs(requestThrottleMs))) + CompletableFuture.completedFuture[Unit](()) + } } diff --git a/core/src/main/scala/kafka/server/ControllerRegistrationManager.scala b/core/src/main/scala/kafka/server/ControllerRegistrationManager.scala new file mode 100644 index 0000000000000..b2284efa9c6f9 --- /dev/null +++ b/core/src/main/scala/kafka/server/ControllerRegistrationManager.scala @@ -0,0 +1,298 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server + +import java.util +import java.util.concurrent.TimeUnit.MILLISECONDS +import kafka.utils.Logging +import org.apache.kafka.clients.ClientResponse +import org.apache.kafka.common.Uuid +import org.apache.kafka.common.message.ControllerRegistrationRequestData.ListenerCollection +import org.apache.kafka.common.message.ControllerRegistrationRequestData +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.requests.{ControllerRegistrationRequest, ControllerRegistrationResponse} +import org.apache.kafka.metadata.VersionRange +import org.apache.kafka.common.utils.{ExponentialBackoff, LogContext, Time} +import org.apache.kafka.image.loader.LoaderManifest +import org.apache.kafka.image.{MetadataDelta, MetadataImage} +import org.apache.kafka.image.publisher.MetadataPublisher +import org.apache.kafka.queue.EventQueue.DeadlineFunction +import org.apache.kafka.queue.{EventQueue, KafkaEventQueue} +import org.apache.kafka.server.common.MetadataVersion + +import scala.jdk.CollectionConverters._ + +/** + * The controller registration manager handles registering this controller with the controller + * quorum. This support was added by KIP-919, and requires a metadata version of 3.7 or higher. + * + * This code uses an event queue paradigm. Modifications get translated into events, which + * are placed on the queue to be processed sequentially. As described in the JavaDoc for + * each variable, most mutable state can be accessed only from that event queue thread. + */ +class ControllerRegistrationManager( + val config: KafkaConfig, + val clusterId: String, + val time: Time, + val threadNamePrefix: String, + val supportedFeatures: util.Map[String, VersionRange], + val incarnationId: Uuid, + val listenerPortOverrides: Map[String, Int] = Map(), + val resendExponentialBackoff: ExponentialBackoff = new ExponentialBackoff(100, 2, 120000L, 0.02) +) extends Logging with MetadataPublisher { + override def name(): String = "ControllerRegistrationManager" + + val nodeId: Int = config.nodeId + + private def logPrefix(): String = { + val builder = new StringBuilder("[ControllerRegistrationManager") + builder.append(" id=").append(config.nodeId) + builder.append(" incarnation=").append(incarnationId) + builder.append("] ") + builder.toString() + } + + val logContext = new LogContext(logPrefix()) + + this.logIdent = logContext.logPrefix() + + val listenerCollection = { + val collection = new ListenerCollection() + config.controllerListeners.foreach(endPoint => { + collection.add(new ControllerRegistrationRequestData.Listener(). + setHost(endPoint.host). + setName(endPoint.listenerName.value()). + setPort(listenerPortOverrides.getOrElse(endPoint.listenerName.value(), endPoint.port)). + setSecurityProtocol(endPoint.securityProtocol.id)) + }) + collection + } + + /** + * True if there is a pending RPC. Only read or written from the event queue thread. + */ + var pendingRpc = false + + /** + * The number of RPCs that we successfully sent. + * Only read or written from the event queue thread. + */ + var successfulRpcs = 0L + + /** + * The number of RPCs that we failed to send, or got back a failure response for. This is + * cleared after a success. Only read or written from the event queue thread. + */ + var failedRpcs = 0L + + /** + * The current metadata version that is in effect. Only read or written from the event queue thread. + */ + private var metadataVersion: MetadataVersion = MetadataVersion.MINIMUM_KRAFT_VERSION + + /** + * True if we're registered. Only read or written from the event queue thread. + */ + var registeredInLog: Boolean = false + + /** + * The channel manager, or null if this manager has not been started yet. This variable + * can only be read or written from the event queue thread. + */ + private var _channelManager: BrokerToControllerChannelManager = _ + + /** + * The event queue. + */ + private[server] val eventQueue = new KafkaEventQueue(time, + logContext, + threadNamePrefix + "registration-manager-", + new ShutdownEvent()) + + private class ShutdownEvent extends EventQueue.Event { + override def run(): Unit = { + try { + info(s"shutting down.") + if (_channelManager != null) { + _channelManager.shutdown() + _channelManager = null + } + } catch { + case t: Throwable => error("ControllerRegistrationManager.stop error", t) + } + } + } + + /** + * Start the ControllerRegistrationManager. + * + * @param channelManager The channel manager to use. + */ + def start(channelManager: BrokerToControllerChannelManager): Unit = { + eventQueue.append(() => { + try { + info(s"initialized channel manager.") + _channelManager = channelManager + maybeSendControllerRegistration() + } catch { + case t: Throwable => error("start error", t) + } + }) + } + + /** + * Start shutting down the ControllerRegistrationManager, but do not block. + */ + def beginShutdown(): Unit = { + eventQueue.beginShutdown("beginShutdown"); + } + + /** + * Shut down the ControllerRegistrationManager and block until all threads are joined. + */ + override def close(): Unit = { + beginShutdown() + eventQueue.close() + } + + override def onMetadataUpdate( + delta: MetadataDelta, + newImage: MetadataImage, + manifest: LoaderManifest + ): Unit = { + if (delta.featuresDelta() != null || + (delta.clusterDelta() != null && delta.clusterDelta().changedControllers().containsKey(nodeId))) { + eventQueue.append(new MetadataUpdateEvent(delta, newImage)) + } + } + + private class MetadataUpdateEvent( + delta: MetadataDelta, + newImage: MetadataImage + ) extends EventQueue.Event { + override def run(): Unit = { + try { + if (delta.featuresDelta() != null) { + metadataVersion = newImage.features().metadataVersion() + } + if (delta.clusterDelta() != null) { + if (delta.clusterDelta().changedControllers().containsKey(nodeId)) { + val curRegistration = newImage.cluster().controllers().get(nodeId) + if (curRegistration == null) { + info(s"Registration removed for this node ID.") + registeredInLog = false + } else if (!curRegistration.incarnationId().equals(incarnationId)) { + info(s"Found registration for ${curRegistration.incarnationId()} instead of our incarnation.") + registeredInLog = false + } else { + info(s"Our registration has been persisted to the metadata log.") + registeredInLog = true + } + } + } + maybeSendControllerRegistration() + } catch { + case t: Throwable => error("onMetadataUpdate error", t) + } + } + } + + private def maybeSendControllerRegistration(): Unit = { + if (registeredInLog) { + debug("maybeSendControllerRegistration: controller is already registered.") + } else if (_channelManager == null) { + debug("maybeSendControllerRegistration: cannot register yet because the channel manager has " + + "not been initialized.") + } else if (!metadataVersion.isControllerRegistrationSupported) { + info("maybeSendControllerRegistration: cannot register yet because the metadata version is " + + s"still $metadataVersion, which does not support KIP-919 controller registration.") + } else if (pendingRpc) { + info("maybeSendControllerRegistration: waiting for the previous RPC to complete."); + } else { + sendControllerRegistration() + } + } + + private def sendControllerRegistration(): Unit = { + val features = new ControllerRegistrationRequestData.FeatureCollection() + supportedFeatures.asScala.foreach { + case (name, range) => features.add(new ControllerRegistrationRequestData.Feature(). + setName(name). + setMinSupportedVersion(range.min()). + setMaxSupportedVersion(range.max())) + } + val data = new ControllerRegistrationRequestData(). + setControllerId(nodeId). + setFeatures(features). + setIncarnationId(incarnationId). + setListeners(listenerCollection) + info(s"sendControllerRegistration: attempting to send $data") + _channelManager.sendRequest(new ControllerRegistrationRequest.Builder(data), + new RegistrationResponseHandler()) + pendingRpc = true + } + + private class RegistrationResponseHandler extends ControllerRequestCompletionHandler { + override def onComplete(response: ClientResponse): Unit = { + pendingRpc = false + if (response.authenticationException() != null) { + error(s"RegistrationResponseHandler: authentication error", response.authenticationException()) + scheduleNextCommunicationAfterFailure() + } else if (response.versionMismatch() != null) { + error(s"RegistrationResponseHandler: unsupported API version error", response.versionMismatch()) + scheduleNextCommunicationAfterFailure() + } else if (response.responseBody() == null) { + error(s"RegistrationResponseHandler: unknown error") + scheduleNextCommunicationAfterFailure() + } else if (!response.responseBody().isInstanceOf[ControllerRegistrationResponse]) { + error(s"RegistrationResponseHandler: invalid response type error") + scheduleNextCommunicationAfterFailure() + } else { + val message = response.responseBody().asInstanceOf[ControllerRegistrationResponse] + val errorCode = Errors.forCode(message.data().errorCode()) + if (errorCode == Errors.NONE) { + successfulRpcs = successfulRpcs + 1 + failedRpcs = 0 + info(s"RegistrationResponseHandler: controller acknowledged ControllerRegistrationRequest.") + } else { + info(s"RegistrationResponseHandler: controller returned error $errorCode " + + s"(${message.data().errorMessage()})") + scheduleNextCommunicationAfterFailure() + } + } + } + + override def onTimeout(): Unit = { + error(s"RegistrationResponseHandler: channel manager timed out before sending the request.") + scheduleNextCommunicationAfterFailure() + } + } + + private def scheduleNextCommunicationAfterFailure(): Unit = { + val delayMs = resendExponentialBackoff.backoff(failedRpcs) + failedRpcs = failedRpcs + 1 + scheduleNextCommunication(delayMs) + } + + private def scheduleNextCommunication(intervalMs: Long): Unit = { + trace(s"Scheduling next communication at ${intervalMs} ms from now.") + val deadlineNs = time.nanoseconds() + MILLISECONDS.toNanos(intervalMs) + eventQueue.scheduleDeferred("communication", + new DeadlineFunction(deadlineNs), + () => maybeSendControllerRegistration()) + } +} diff --git a/core/src/main/scala/kafka/server/ControllerServer.scala b/core/src/main/scala/kafka/server/ControllerServer.scala index 9a626ab84b561..36d41cf822ae0 100644 --- a/core/src/main/scala/kafka/server/ControllerServer.scala +++ b/core/src/main/scala/kafka/server/ControllerServer.scala @@ -35,10 +35,10 @@ import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.security.scram.internals.ScramMechanism import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache import org.apache.kafka.common.utils.LogContext -import org.apache.kafka.common.{ClusterResource, Endpoint} +import org.apache.kafka.common.{ClusterResource, Endpoint, Uuid} import org.apache.kafka.controller.metrics.{ControllerMetadataMetricsPublisher, QuorumControllerMetrics} import org.apache.kafka.controller.{Controller, QuorumController, QuorumFeatures} -import org.apache.kafka.image.publisher.MetadataPublisher +import org.apache.kafka.image.publisher.{ControllerRegistrationsPublisher, MetadataPublisher} import org.apache.kafka.metadata.KafkaConfigSchema import org.apache.kafka.metadata.authorizer.ClusterMetadataAuthorizer import org.apache.kafka.metadata.bootstrap.BootstrapMetadata @@ -92,6 +92,7 @@ class ControllerServer( private val metricsGroup = new KafkaMetricsGroup(this.getClass) val config = sharedServer.controllerConfig + val logContext = new LogContext(s"[ControllerServer id=${config.nodeId}] ") val time = sharedServer.time def metrics = sharedServer.metrics def raftManager: KafkaRaftManager[ApiMessageAndVersion] = sharedServer.raftManager @@ -117,7 +118,11 @@ class ControllerServer( var migrationSupport: Option[ControllerMigrationSupport] = None def kafkaYammerMetrics: KafkaYammerMetrics = KafkaYammerMetrics.INSTANCE val metadataPublishers: util.List[MetadataPublisher] = new util.ArrayList[MetadataPublisher]() - val featuresPublisher = new FeaturesPublisher() + @volatile var featuresPublisher: FeaturesPublisher = _ + @volatile var registrationsPublisher: ControllerRegistrationsPublisher = _ + @volatile var incarnationId: Uuid = _ + @volatile var registrationManager: ControllerRegistrationManager = _ + @volatile var registrationChannelManager: BrokerToControllerChannelManager = _ private def maybeChangeStatus(from: ProcessStatus, to: ProcessStatus): Boolean = { lock.lock() @@ -137,7 +142,7 @@ class ControllerServer( if (!maybeChangeStatus(SHUTDOWN, STARTING)) return val startupDeadline = Deadline.fromDelay(time, config.serverMaxStartupTimeMs, TimeUnit.MILLISECONDS) try { - this.logIdent = new LogContext(s"[ControllerServer id=${config.nodeId}] ").logPrefix() + this.logIdent = logContext.logPrefix() info("Starting controller") config.dynamicConfig.initialize(zkClientOpt = None) @@ -167,6 +172,12 @@ class ControllerServer( config.earlyStartListeners.map(_.value()).asJava)) } + featuresPublisher = new FeaturesPublisher(logContext) + + registrationsPublisher = new ControllerRegistrationsPublisher() + + incarnationId = Uuid.randomUuid() + val apiVersionManager = new SimpleApiVersionManager( ListenerType.CONTROLLER, config.unstableApiVersionsEnabled, @@ -201,10 +212,9 @@ class ControllerServer( sharedServer.controllerQuorumVotersFuture, startupDeadline, time) val controllerNodes = RaftConfig.voterConnectionsToNodes(voterConnections) - val quorumFeatures = QuorumFeatures.create(config.nodeId, - sharedServer.raftManager.apiVersions, + val quorumFeatures = new QuorumFeatures(config.nodeId, QuorumFeatures.defaultFeatureMap(), - controllerNodes) + controllerNodes.asScala.map(node => Integer.valueOf(node.id())).asJava) val delegationTokenKeyString = { if (config.tokenAuthEnabled) { @@ -306,7 +316,7 @@ class ControllerServer( raftManager, config, sharedServer.metaProps, - controllerNodes.asScala.toSeq, + registrationsPublisher, apiVersionManager) controllerApisHandlerPool = new KafkaRequestHandlerPool(config.nodeId, socketServer.dataPlaneRequestChannel, @@ -319,6 +329,27 @@ class ControllerServer( // Set up the metadata features publisher. metadataPublishers.add(featuresPublisher) + // Set up the controller registrations publisher. + metadataPublishers.add(registrationsPublisher) + + // Create the registration manager, which handles sending KIP-919 controller registrations. + registrationManager = new ControllerRegistrationManager(config, + clusterId, + time, + s"controller-${config.nodeId}-", + QuorumFeatures.defaultFeatureMap(), + incarnationId, + // We special-case the first controller listener, using the port value obtained from + // SocketServer directly. This is to handle the case where we are using an ephemeral port + // (aka binding to port 0) in unit tests. In this case, we need to register with the true + // port number which we obtained after binding, not with a literal 0. + Map[String, Int](config.controllerListeners.head.listenerName.value() -> + socketServerFirstBoundPortFuture.get())) + + // Add the registration manager to the list of metadata publishers, so that it receives + // callbacks when the cluster registrations change. + metadataPublishers.add(registrationManager) + // Set up the dynamic config publisher. This runs even in combined mode, since the broker // has its own separate dynamic configuration object. metadataPublishers.add(new DynamicConfigPublisher( @@ -388,6 +419,21 @@ class ControllerServer( */ val socketServerFuture = socketServer.enableRequestProcessing(authorizerFutures) + /** + * Start the KIP-919 controller registration manager. + */ + val controllerNodeProvider = RaftControllerNodeProvider(raftManager, config, controllerNodes.asScala) + registrationChannelManager = BrokerToControllerChannelManager( + controllerNodeProvider, + time, + metrics, + config, + "registration", + s"controller-${config.nodeId}-", + 5000) + registrationChannelManager.start() + registrationManager.start(registrationChannelManager) + // Block here until all the authorizer futures are complete FutureUtils.waitWithLogging(logger.underlying, logIdent, "all of the authorizer futures to be completed", @@ -416,6 +462,23 @@ class ControllerServer( // Ensure that we're not the Raft leader prior to shutting down our socket server, for a // smoother transition. sharedServer.ensureNotRaftLeader() + if (featuresPublisher != null) { + featuresPublisher.close() + featuresPublisher = null + } + if (registrationsPublisher != null) { + registrationsPublisher.close() + registrationsPublisher = null + } + incarnationId = null + if (registrationManager != null) { + CoreUtils.swallow(registrationManager.close(), this) + registrationManager = null + } + if (registrationChannelManager != null) { + CoreUtils.swallow(registrationChannelManager.shutdown(), this) + registrationChannelManager = null + } metadataPublishers.forEach(p => sharedServer.loader.removeAndClosePublisher(p).get()) metadataPublishers.clear() if (socketServer != null) diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index ab4bbe96237dc..b588f83e869a3 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -27,7 +27,7 @@ import kafka.utils.Implicits._ import kafka.utils.{CoreUtils, Logging} import org.apache.kafka.admin.AdminUtils import org.apache.kafka.clients.admin.AlterConfigOp.OpType -import org.apache.kafka.clients.admin.{AlterConfigOp, ConfigEntry} +import org.apache.kafka.clients.admin.{AlterConfigOp, ConfigEntry, EndpointType} import org.apache.kafka.common.acl.AclOperation._ import org.apache.kafka.common.acl.AclOperation import org.apache.kafka.common.config.ConfigResource @@ -242,6 +242,7 @@ class KafkaApis(val requestChannel: RequestChannel, case ApiKeys.ALLOCATE_PRODUCER_IDS => handleAllocateProducerIdsRequest(request) case ApiKeys.DESCRIBE_QUORUM => forwardToControllerOrFail(request) case ApiKeys.CONSUMER_GROUP_HEARTBEAT => handleConsumerGroupHeartbeat(request).exceptionally(handleError) + case ApiKeys.CONSUMER_GROUP_DESCRIBE => handleConsumerGroupDescribe(request).exceptionally(handleError) case _ => throw new IllegalStateException(s"No handler for request api key ${request.header.apiKey}") } } catch { @@ -3509,43 +3510,34 @@ class KafkaApis(val requestChannel: RequestChannel, } def handleDescribeCluster(request: RequestChannel.Request): Unit = { - val describeClusterRequest = request.body[DescribeClusterRequest] - - var clusterAuthorizedOperations = Int.MinValue // Default value in the schema - // get cluster authorized operations - if (describeClusterRequest.data.includeClusterAuthorizedOperations) { - if (authHelper.authorize(request.context, DESCRIBE, CLUSTER, CLUSTER_NAME)) - clusterAuthorizedOperations = authHelper.authorizedOperations(request, Resource.CLUSTER) - else - clusterAuthorizedOperations = 0 - } - - val brokers = metadataCache.getAliveBrokerNodes(request.context.listenerName) - val controllerId = { - metadataCache.getControllerId.flatMap { - case ZkCachedControllerId(id) => Some(id) - case KRaftCachedControllerId(_) => metadataCache.getRandomAliveBrokerId - } - } - - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => { - val data = new DescribeClusterResponseData() - .setThrottleTimeMs(requestThrottleMs) - .setClusterId(clusterId) - .setControllerId(controllerId.getOrElse(MetadataResponse.NO_CONTROLLER_ID)) - .setClusterAuthorizedOperations(clusterAuthorizedOperations) - - - brokers.foreach { broker => - data.brokers.add(new DescribeClusterResponseData.DescribeClusterBroker() - .setBrokerId(broker.id) - .setHost(broker.host) - .setPort(broker.port) - .setRack(broker.rack)) + val response = authHelper.computeDescribeClusterResponse( + request, + EndpointType.BROKER, + clusterId, + () => { + val brokers = new DescribeClusterResponseData.DescribeClusterBrokerCollection() + metadataCache.getAliveBrokerNodes(request.context.listenerName).foreach { node => + brokers.add(new DescribeClusterResponseData.DescribeClusterBroker(). + setBrokerId(node.id). + setHost(node.host). + setPort(node.port). + setRack(node.rack)) + } + brokers + }, + () => { + metadataCache.getControllerId match { + case Some(value) => + value match { + case ZkCachedControllerId (id) => id + case KRaftCachedControllerId (_) => metadataCache.getRandomAliveBrokerId.getOrElse(- 1) + } + case None => -1 + } } - - new DescribeClusterResponse(data) - }) + ) + requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => + new DescribeClusterResponse(response.setThrottleTimeMs(requestThrottleMs))) } def handleEnvelope(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { @@ -3721,6 +3713,11 @@ class KafkaApis(val requestChannel: RequestChannel, } } + def handleConsumerGroupDescribe(request: RequestChannel.Request): CompletableFuture[Unit] = { + requestHelper.sendMaybeThrottle(request, request.body[ConsumerGroupDescribeRequest].getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) + CompletableFuture.completedFuture[Unit](()) + } + private def updateRecordConversionStats(request: RequestChannel.Request, tp: TopicPartition, conversionStats: RecordConversionStats): Unit = { diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala index 948bd30d74750..f92e3717361a3 100644 --- a/core/src/main/scala/kafka/server/ReplicaManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaManager.scala @@ -1263,6 +1263,7 @@ class ReplicaManager(val config: KafkaConfig, } catch { case e: RejectedExecutionException => // Return the error if any in scheduling the remote fetch task + warn("Unable to fetch data from remote storage", e) return Some(createLogReadResult(e)) } diff --git a/core/src/main/scala/kafka/server/SharedServer.scala b/core/src/main/scala/kafka/server/SharedServer.scala index 892c528885a44..dc627f9eaaac7 100644 --- a/core/src/main/scala/kafka/server/SharedServer.scala +++ b/core/src/main/scala/kafka/server/SharedServer.scala @@ -309,6 +309,7 @@ class SharedServer( case e: Throwable => { error("Got exception while starting SharedServer", e) stop() + throw e } } } diff --git a/core/src/main/scala/kafka/tools/GetOffsetShell.scala b/core/src/main/scala/kafka/tools/GetOffsetShell.scala deleted file mode 100644 index 1acbee6976d56..0000000000000 --- a/core/src/main/scala/kafka/tools/GetOffsetShell.scala +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package kafka.tools - -import joptsimple._ -import kafka.utils.{Exit, ToolsUtils} -import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, ListTopicsOptions, OffsetSpec} -import org.apache.kafka.common.{KafkaException, TopicPartition} -import org.apache.kafka.common.requests.{ListOffsetsRequest, ListOffsetsResponse} -import org.apache.kafka.common.utils.Utils -import org.apache.kafka.server.util.CommandLineUtils -import org.apache.kafka.server.util.TopicFilter.IncludeList -import org.apache.kafka.server.util.TopicPartitionFilter -import org.apache.kafka.server.util.TopicPartitionFilter.TopicFilterAndPartitionFilter -import org.apache.kafka.server.util.TopicPartitionFilter.CompositeTopicPartitionFilter -import org.apache.kafka.server.util.PartitionFilter.UniquePartitionFilter -import org.apache.kafka.server.util.PartitionFilter.PartitionRangeFilter -import org.apache.kafka.server.util.PartitionFilter.PartitionsSetFilter - -import java.util.Properties -import java.util.concurrent.ExecutionException -import java.util.regex.Pattern -import scala.collection.Seq -import scala.jdk.CollectionConverters._ -import scala.math.Ordering.Implicits.infixOrderingOps - -object GetOffsetShell { - private val TopicPartitionPattern = Pattern.compile("([^:,]*)(?::(?:([0-9]*)|(?:([0-9]*)-([0-9]*))))?") - - def main(args: Array[String]): Unit = { - try { - fetchOffsets(args) - } catch { - case e: Exception => - println(s"Error occurred: ${e.getMessage}") - Exit.exit(1, Some(e.getMessage)) - } - } - - private[tools] def fetchOffsets(args: Array[String]): Unit = { - val parser = new OptionParser(false) - val brokerListOpt = parser.accepts("broker-list", "DEPRECATED, use --bootstrap-server instead; ignored if --bootstrap-server is specified. The server(s) to connect to in the form HOST1:PORT1,HOST2:PORT2.") - .withRequiredArg - .describedAs("HOST1:PORT1,...,HOST3:PORT3") - .ofType(classOf[String]) - val bootstrapServerOpt = parser.accepts("bootstrap-server", "REQUIRED. The server(s) to connect to in the form HOST1:PORT1,HOST2:PORT2.") - .requiredUnless("broker-list") - .withRequiredArg - .describedAs("HOST1:PORT1,...,HOST3:PORT3") - .ofType(classOf[String]) - val topicPartitionsOpt = parser.accepts("topic-partitions", s"Comma separated list of topic-partition patterns to get the offsets for, with the format of '$TopicPartitionPattern'." + - " The first group is an optional regex for the topic name, if omitted, it matches any topic name." + - " The section after ':' describes a 'partition' pattern, which can be: a number, a range in the format of 'NUMBER-NUMBER' (lower inclusive, upper exclusive), an inclusive lower bound in the format of 'NUMBER-', an exclusive upper bound in the format of '-NUMBER' or may be omitted to accept all partitions.") - .withRequiredArg - .describedAs("topic1:1,topic2:0-3,topic3,topic4:5-,topic5:-3") - .ofType(classOf[String]) - val topicOpt = parser.accepts("topic", s"The topic to get the offsets for. It also accepts a regular expression. If not present, all authorized topics are queried. Cannot be used if --topic-partitions is present.") - .withRequiredArg - .describedAs("topic") - .ofType(classOf[String]) - val partitionsOpt = parser.accepts("partitions", s"Comma separated list of partition ids to get the offsets for. If not present, all partitions of the authorized topics are queried. Cannot be used if --topic-partitions is present.") - .withRequiredArg - .describedAs("partition ids") - .ofType(classOf[String]) - val timeOpt = parser.accepts("time", "timestamp of the offsets before that. [Note: No offset is returned, if the timestamp greater than recently committed record timestamp is given.]") - .withRequiredArg - .describedAs(" / -1 or latest / -2 or earliest / -3 or max-timestamp") - .ofType(classOf[String]) - .defaultsTo("latest") - val commandConfigOpt = parser.accepts("command-config", s"Property file containing configs to be passed to Admin Client.") - .withRequiredArg - .describedAs("config file") - .ofType(classOf[String]) - val excludeInternalTopicsOpt = parser.accepts("exclude-internal-topics", s"By default, internal topics are included. If specified, internal topics are excluded.") - - if (args.isEmpty) - CommandLineUtils.printUsageAndExit(parser, "An interactive shell for getting topic-partition offsets.") - - val options = parser.parse(args : _*) - - val effectiveBrokerListOpt = if (options.has(bootstrapServerOpt)) - bootstrapServerOpt - else - brokerListOpt - - CommandLineUtils.checkRequiredArgs(parser, options, effectiveBrokerListOpt) - - val clientId = "GetOffsetShell" - val brokerList = options.valueOf(effectiveBrokerListOpt) - - ToolsUtils.validatePortOrDie(parser, brokerList) - val excludeInternalTopics = options.has(excludeInternalTopicsOpt) - - if (options.has(topicPartitionsOpt) && (options.has(topicOpt) || options.has(partitionsOpt))) { - throw new IllegalArgumentException("--topic-partitions cannot be used with --topic or --partitions") - } - - val offsetSpec = parseOffsetSpec(options.valueOf(timeOpt)) - - val topicPartitionFilter = if (options.has(topicPartitionsOpt)) { - createTopicPartitionFilterWithPatternList(options.valueOf(topicPartitionsOpt)) - } else { - createTopicPartitionFilterWithTopicAndPartitionPattern( - if (options.has(topicOpt)) Some(options.valueOf(topicOpt)) else None, - options.valueOf(partitionsOpt) - ) - } - - val config = if (options.has(commandConfigOpt)) - Utils.loadProps(options.valueOf(commandConfigOpt)) - else - new Properties - config.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList) - config.setProperty(AdminClientConfig.CLIENT_ID_CONFIG, clientId) - val adminClient = Admin.create(config) - - try { - val partitionInfos = listPartitionInfos(adminClient, topicPartitionFilter, excludeInternalTopics) - - if (partitionInfos.isEmpty) { - throw new IllegalArgumentException("Could not match any topic-partitions with the specified filters") - } - - val timestampsToSearch = partitionInfos.map(tp => tp -> offsetSpec).toMap.asJava - - val listOffsetsResult = adminClient.listOffsets(timestampsToSearch) - val partitionOffsets = partitionInfos.flatMap { tp => - try { - val partitionInfo = listOffsetsResult.partitionResult(tp).get - if (partitionInfo.offset != ListOffsetsResponse.UNKNOWN_OFFSET) { - Some((tp, partitionInfo.offset)) - } else { - None - } - } catch { - case e: ExecutionException => - e.getCause match { - case cause: KafkaException => - System.err.println(s"Skip getting offsets for topic-partition ${tp.topic}:${tp.partition} due to error: ${cause.getMessage}") - case _ => - throw e - } - None - } - } - - partitionOffsets.sortWith((tp1, tp2) => compareTopicPartitions(tp1._1, tp2._1)).foreach { - case (tp, offset) => println(s"${tp.topic}:${tp.partition}:${Option(offset).getOrElse("")}") - } - } finally { - adminClient.close() - } - } - - private def parseOffsetSpec(listOffsetsTimestamp: String): OffsetSpec = { - listOffsetsTimestamp match { - case "earliest" => OffsetSpec.earliest() - case "latest" => OffsetSpec.latest() - case "max-timestamp" => OffsetSpec.maxTimestamp() - case _ => - try { - listOffsetsTimestamp.toLong match { - case ListOffsetsRequest.EARLIEST_TIMESTAMP => OffsetSpec.earliest() - case ListOffsetsRequest.LATEST_TIMESTAMP => OffsetSpec.latest() - case ListOffsetsRequest.MAX_TIMESTAMP => OffsetSpec.maxTimestamp() - case value => OffsetSpec.forTimestamp(value) - } - } catch { - case e: NumberFormatException => - throw new IllegalArgumentException(s"Malformed time argument $listOffsetsTimestamp, please use -1 or latest / -2 or earliest / -3 or max-timestamp, or a specified long format timestamp", e) - } - } - } - - def compareTopicPartitions(a: TopicPartition, b: TopicPartition): Boolean = { - (a.topic(), a.partition()) < (b.topic(), b.partition()) - } - - /** - * Creates a topic-partition filter based on a list of patterns. - * Expected format: - * List: TopicPartitionPattern(, TopicPartitionPattern)* - * TopicPartitionPattern: TopicPattern(:PartitionPattern)? | :PartitionPattern - * TopicPattern: REGEX - * PartitionPattern: NUMBER | NUMBER-(NUMBER)? | -NUMBER - */ - def createTopicPartitionFilterWithPatternList( - topicPartitions: String - ): TopicPartitionFilter = { - val ruleSpecs = topicPartitions.split(",") - val rules = ruleSpecs.toSeq.map(ruleSpec => parseRuleSpec(ruleSpec)) - new CompositeTopicPartitionFilter(rules.asJava) - } - - def parseRuleSpec(ruleSpec: String): TopicPartitionFilter = { - val matcher = TopicPartitionPattern.matcher(ruleSpec) - if (!matcher.matches()) - throw new IllegalArgumentException(s"Invalid rule specification: $ruleSpec") - - def group(group: Int): Option[String] = { - Option(matcher.group(group)).filter(s => s != null && s.nonEmpty) - } - - val topicFilter = new IncludeList(group(1).getOrElse(".*")) - val partitionFilter = group(2).map(_.toInt) match { - case Some(partition) => - new UniquePartitionFilter(partition) - case None => - val lowerRange = group(3).map(_.toInt).getOrElse(0) - val upperRange = group(4).map(_.toInt).getOrElse(Int.MaxValue) - new PartitionRangeFilter(lowerRange, upperRange) - } - new TopicFilterAndPartitionFilter( - topicFilter, - partitionFilter - ) - } - - /** - * Creates a topic-partition filter based on a topic pattern and a set of partition ids. - */ - def createTopicPartitionFilterWithTopicAndPartitionPattern( - topicOpt: Option[String], - partitionIds: String - ): TopicFilterAndPartitionFilter = { - new TopicFilterAndPartitionFilter( - new IncludeList(topicOpt.getOrElse(".*")), - new PartitionsSetFilter(createPartitionSet(partitionIds).asJava) - ) - } - - def createPartitionSet(partitionsString: String): Set[Integer] = { - if (partitionsString == null || partitionsString.isEmpty) - Set.empty - else - partitionsString.split(",").map { partitionString => - try Integer.valueOf(partitionString) - catch { - case _: NumberFormatException => - throw new IllegalArgumentException(s"--partitions expects a comma separated list of numeric " + - s"partition ids, but received: $partitionsString") - } - }.toSet - } - - /** - * Return the partition infos. Filter them with topicPartitionFilter. - */ - private def listPartitionInfos( - client: Admin, - topicPartitionFilter: TopicPartitionFilter, - excludeInternalTopics: Boolean - ): Seq[TopicPartition] = { - val listTopicsOptions = new ListTopicsOptions().listInternal(!excludeInternalTopics) - val topics = client.listTopics(listTopicsOptions).names.get - val filteredTopics = topics.asScala.filter(topicPartitionFilter.isTopicAllowed) - - client.describeTopics(filteredTopics.asJava).allTopicNames.get.asScala.flatMap { case (topic, description) => - description - .partitions - .asScala - .map(tp => new TopicPartition(topic, tp.partition)) - .filter(topicPartitionFilter.isTopicPartitionAllowed) - }.toBuffer - } -} - diff --git a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java index 682b58b97256d..92a6c63537cd4 100644 --- a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java +++ b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java @@ -93,6 +93,7 @@ import java.util.TreeMap; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiConsumer; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -198,6 +199,10 @@ public RemoteStorageManager createRemoteStorageManager() { public RemoteLogMetadataManager createRemoteLogMetadataManager() { return remoteLogMetadataManager; } + @Override + long findLogStartOffset(TopicIdPartition topicIdPartition, UnifiedLog log) { + return 0L; + } }; } @@ -315,8 +320,6 @@ public RemoteLogMetadataManager createRemoteLogMetadataManager() { } } - - @Test void testStartup() { remoteLogManager.startup(); @@ -1399,6 +1402,112 @@ public void testStopPartitionsWithDeletion() throws RemoteStorageException { verify(remoteLogMetadataManager, times(16)).updateRemoteLogSegmentMetadata(any()); } + /** + * This test asserts that the newly elected leader for a partition is able to find the log-start-offset. + * Note that the case tested here is that the previous leader deleted the log segments up-to offset 500. And, the + * log-start-offset didn't propagate to the replicas before the leader-election. + */ + @Test + public void testFindLogStartOffset() throws RemoteStorageException, IOException { + List epochEntries = new ArrayList<>(); + epochEntries.add(new EpochEntry(0, 0L)); + epochEntries.add(new EpochEntry(1, 250L)); + epochEntries.add(new EpochEntry(2, 550L)); + checkpoint.write(epochEntries); + + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); + when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + + long timestamp = time.milliseconds(); + int segmentSize = 1024; + List segmentMetadataList = Arrays.asList( + new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), + 500, 539, timestamp, brokerId, timestamp, segmentSize, truncateAndGetLeaderEpochs(epochEntries, 500L, 539L)), + new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), + 540, 700, timestamp, brokerId, timestamp, segmentSize, truncateAndGetLeaderEpochs(epochEntries, 540L, 700L)) + ); + when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition), anyInt())) + .thenAnswer(invocation -> { + int epoch = invocation.getArgument(1); + if (epoch == 1) + return segmentMetadataList.iterator(); + else + return Collections.emptyIterator(); + }); + try (RemoteLogManager remoteLogManager = new RemoteLogManager(remoteLogManagerConfig, brokerId, logDir, clusterId, time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> { }, + brokerTopicStats) { + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + }) { + assertEquals(500L, remoteLogManager.findLogStartOffset(leaderTopicIdPartition, mockLog)); + } + } + + @Test + public void testFindLogStartOffsetFallbackToLocalLogStartOffsetWhenRemoteIsEmpty() throws RemoteStorageException, IOException { + List epochEntries = new ArrayList<>(); + epochEntries.add(new EpochEntry(1, 250L)); + epochEntries.add(new EpochEntry(2, 550L)); + checkpoint.write(epochEntries); + + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); + when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(mockLog.localLogStartOffset()).thenReturn(250L); + when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition), anyInt())) + .thenReturn(Collections.emptyIterator()); + + try (RemoteLogManager remoteLogManager = new RemoteLogManager(remoteLogManagerConfig, brokerId, logDir, clusterId, time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> { }, + brokerTopicStats) { + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + }) { + assertEquals(250L, remoteLogManager.findLogStartOffset(leaderTopicIdPartition, mockLog)); + } + } + + @Test + public void testLogStartOffsetUpdatedOnStartup() throws RemoteStorageException, IOException, InterruptedException { + List epochEntries = new ArrayList<>(); + epochEntries.add(new EpochEntry(1, 250L)); + epochEntries.add(new EpochEntry(2, 550L)); + checkpoint.write(epochEntries); + + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); + when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + + RemoteLogSegmentMetadata metadata = mock(RemoteLogSegmentMetadata.class); + when(metadata.startOffset()).thenReturn(600L); + when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition), anyInt())) + .thenAnswer(invocation -> { + int epoch = invocation.getArgument(1); + if (epoch == 2) + return Collections.singletonList(metadata).iterator(); + else + return Collections.emptyIterator(); + }); + + AtomicLong logStartOffset = new AtomicLong(0); + try (RemoteLogManager remoteLogManager = new RemoteLogManager(remoteLogManagerConfig, brokerId, logDir, clusterId, time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> logStartOffset.set(offset), + brokerTopicStats) { + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + }) { + RemoteLogManager.RLMTask task = remoteLogManager.new RLMTask(leaderTopicIdPartition, 128); + task.convertToLeader(4); + task.copyLogSegmentsToRemote(mockLog); + assertEquals(600L, logStartOffset.get()); + } + } + private List listRemoteLogSegmentMetadata(TopicIdPartition topicIdPartition, int segmentCount, int recordsPerSegment, diff --git a/core/src/test/java/kafka/test/ClusterTestExtensionsTest.java b/core/src/test/java/kafka/test/ClusterTestExtensionsTest.java index 9c9bfa2b48a93..7cd845da26ec7 100644 --- a/core/src/test/java/kafka/test/ClusterTestExtensionsTest.java +++ b/core/src/test/java/kafka/test/ClusterTestExtensionsTest.java @@ -117,6 +117,6 @@ public void testNoAutoStart() { @ClusterTest public void testDefaults(ClusterConfig config) { - Assertions.assertEquals(MetadataVersion.IBP_3_6_IV2, config.metadataVersion()); + Assertions.assertEquals(MetadataVersion.IBP_3_7_IV0, config.metadataVersion()); } } diff --git a/core/src/test/java/kafka/test/MockController.java b/core/src/test/java/kafka/test/MockController.java index fdd742c9e966e..490c56d1a2725 100644 --- a/core/src/test/java/kafka/test/MockController.java +++ b/core/src/test/java/kafka/test/MockController.java @@ -34,6 +34,7 @@ import org.apache.kafka.common.message.AlterUserScramCredentialsResponseData; import org.apache.kafka.common.message.BrokerHeartbeatRequestData; import org.apache.kafka.common.message.BrokerRegistrationRequestData; +import org.apache.kafka.common.message.ControllerRegistrationRequestData; import org.apache.kafka.common.message.CreateDelegationTokenRequestData; import org.apache.kafka.common.message.CreateDelegationTokenResponseData; import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic; @@ -510,6 +511,14 @@ synchronized public CompletableFuture> createP return CompletableFuture.completedFuture(results); } + @Override + public CompletableFuture registerController( + ControllerRequestContext context, + ControllerRegistrationRequestData request + ) { + throw new UnsupportedOperationException(); + } + @Override public void beginShutdown() { this.active = false; diff --git a/core/src/test/java/kafka/test/annotation/ClusterTest.java b/core/src/test/java/kafka/test/annotation/ClusterTest.java index d199d3418e326..886958533c237 100644 --- a/core/src/test/java/kafka/test/annotation/ClusterTest.java +++ b/core/src/test/java/kafka/test/annotation/ClusterTest.java @@ -41,6 +41,6 @@ String name() default ""; SecurityProtocol securityProtocol() default SecurityProtocol.PLAINTEXT; String listener() default ""; - MetadataVersion metadataVersion() default MetadataVersion.IBP_3_6_IV2; + MetadataVersion metadataVersion() default MetadataVersion.IBP_3_7_IV0; ClusterConfigProperty[] serverProperties() default {}; } diff --git a/core/src/test/java/kafka/testkit/KafkaClusterTestKit.java b/core/src/test/java/kafka/testkit/KafkaClusterTestKit.java index e09ee49402f8c..1adcb70236771 100644 --- a/core/src/test/java/kafka/testkit/KafkaClusterTestKit.java +++ b/core/src/test/java/kafka/testkit/KafkaClusterTestKit.java @@ -72,9 +72,12 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.stream.Collectors; +import static org.junit.jupiter.api.Assertions.assertNotNull; + @SuppressWarnings("deprecation") // Needed for Scala 2.12 compatibility public class KafkaClusterTestKit implements AutoCloseable { @@ -494,6 +497,19 @@ public Map controllers() { return controllers; } + public Controller waitForActiveController() throws InterruptedException { + AtomicReference active = new AtomicReference<>(null); + TestUtils.retryOnExceptionWithTimeout(60_000, () -> { + for (ControllerServer controllerServer : controllers.values()) { + if (controllerServer.controller().isActive()) { + active.set(controllerServer.controller()); + } + } + assertNotNull(active.get(), "No active controller found"); + }); + return active.get(); + } + public Map brokers() { return brokers; } diff --git a/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala b/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala index 94e6118d7d476..32036ca4405f0 100644 --- a/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala +++ b/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala @@ -1164,6 +1164,30 @@ class KRaftClusterTest { cluster.close() } } + + @Test + def testRegisteredControllerEndpoints(): Unit = { + val cluster = new KafkaClusterTestKit.Builder( + new TestKitNodes.Builder(). + setNumBrokerNodes(1). + setNumControllerNodes(3).build()). + build() + try { + cluster.format() + cluster.startup() + TestUtils.retry(60000) { + val controller = cluster.controllers().values().iterator().next() + val registeredControllers = controller.registrationsPublisher.controllers() + assertEquals(3, registeredControllers.size(), "Expected 3 controller registrations") + registeredControllers.values().forEach(registration => { + assertNotNull(registration.listeners.get("CONTROLLER")); + assertNotEquals(0, registration.listeners.get("CONTROLLER").port()); + }) + } + } finally { + cluster.close() + } + } } class BadAuthorizer() extends Authorizer { diff --git a/core/src/test/scala/integration/kafka/zk/ZkMigrationIntegrationTest.scala b/core/src/test/scala/integration/kafka/zk/ZkMigrationIntegrationTest.scala index e8e72400d4823..9e1026a5a171f 100644 --- a/core/src/test/scala/integration/kafka/zk/ZkMigrationIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/zk/ZkMigrationIntegrationTest.scala @@ -18,8 +18,8 @@ package kafka.zk import kafka.security.authorizer.AclEntry.{WildcardHost, WildcardPrincipalString} import kafka.server.{ConfigType, KafkaConfig} -import kafka.test.ClusterInstance -import kafka.test.annotation.{AutoStart, ClusterConfigProperty, ClusterTest, Type} +import kafka.test.{ClusterConfig, ClusterGenerator, ClusterInstance} +import kafka.test.annotation.{AutoStart, ClusterConfigProperty, ClusterTemplate, ClusterTest, Type} import kafka.test.junit.ClusterTestExtensions import kafka.test.junit.ZkClusterInvocationContext.ZkClusterInstance import kafka.testkit.{KafkaClusterTestKit, TestKitNodes} @@ -55,6 +55,26 @@ import java.util.{Properties, UUID} import scala.collection.Seq import scala.jdk.CollectionConverters._ +object ZkMigrationIntegrationTest { + def addZkBrokerProps(props: Properties): Unit = { + props.setProperty("inter.broker.listener.name", "EXTERNAL") + props.setProperty("listeners", "PLAINTEXT://localhost:0,EXTERNAL://localhost:0") + props.setProperty("advertised.listeners", "PLAINTEXT://localhost:0,EXTERNAL://localhost:0") + props.setProperty("listener.security.protocol.map", "EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT") + } + + def zkClustersForAllMigrationVersions(clusterGenerator: ClusterGenerator): Unit = { + Seq(MetadataVersion.IBP_3_4_IV0, MetadataVersion.IBP_3_5_IV2, MetadataVersion.IBP_3_6_IV2).foreach { mv => + val clusterConfig = ClusterConfig.defaultClusterBuilder() + .metadataVersion(mv) + .brokers(3) + .`type`(Type.ZK) + .build() + addZkBrokerProps(clusterConfig.serverProperties()) + clusterGenerator.accept(clusterConfig) + } + } +} @ExtendWith(value = Array(classOf[ClusterTestExtensions])) @Timeout(300) @@ -308,12 +328,7 @@ class ZkMigrationIntegrationTest { } // SCRAM and Quota are intermixed. Test Quota Only here - @ClusterTest(clusterType = Type.ZK, brokers = 3, metadataVersion = MetadataVersion.IBP_3_4_IV0, serverProperties = Array( - new ClusterConfigProperty(key = "inter.broker.listener.name", value = "EXTERNAL"), - new ClusterConfigProperty(key = "listeners", value = "PLAINTEXT://localhost:0,EXTERNAL://localhost:0"), - new ClusterConfigProperty(key = "advertised.listeners", value = "PLAINTEXT://localhost:0,EXTERNAL://localhost:0"), - new ClusterConfigProperty(key = "listener.security.protocol.map", value = "EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT"), - )) + @ClusterTemplate("zkClustersForAllMigrationVersions") def testDualWrite(zkCluster: ClusterInstance): Unit = { // Create a topic in ZK mode var admin = zkCluster.createAdminClient() @@ -334,7 +349,7 @@ class ZkMigrationIntegrationTest { val clusterId = zkCluster.clusterId() val kraftCluster = new KafkaClusterTestKit.Builder( new TestKitNodes.Builder(). - setBootstrapMetadataVersion(MetadataVersion.IBP_3_4_IV0). + setBootstrapMetadataVersion(zkCluster.config().metadataVersion()). setClusterId(Uuid.fromString(clusterId)). setNumBrokerNodes(0). setNumControllerNodes(1).build()) diff --git a/core/src/test/scala/kafka/tools/GetOffsetShellParsingTest.scala b/core/src/test/scala/kafka/tools/GetOffsetShellParsingTest.scala deleted file mode 100644 index 889631be19332..0000000000000 --- a/core/src/test/scala/kafka/tools/GetOffsetShellParsingTest.scala +++ /dev/null @@ -1,264 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.tools - -import org.apache.kafka.common.TopicPartition -import org.junit.jupiter.api.Assertions.{assertFalse, assertThrows, assertTrue} -import org.junit.jupiter.api.Test - -class GetOffsetShellParsingTest { - - @Test - def testTopicPartitionFilterForTopicName(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList("test") - - assertTrue(topicPartitionFilter.isTopicAllowed("test")) - assertFalse(topicPartitionFilter.isTopicAllowed("test1")) - assertFalse(topicPartitionFilter.isTopicAllowed("__consumer_offsets")) - - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 0))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 1))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test1", 0))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 0))) - } - - @Test - def testTopicPartitionFilterForInternalTopicName(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList("__consumer_offsets") - - assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")) - assertFalse(topicPartitionFilter.isTopicAllowed("test1")) - assertFalse(topicPartitionFilter.isTopicAllowed("test2")) - - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 0))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 1))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test1", 0))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test2", 0))) - } - - @Test - def testTopicPartitionFilterForTopicNameList(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList("test,test1,__consumer_offsets") - - assertTrue(topicPartitionFilter.isTopicAllowed("test")) - assertTrue(topicPartitionFilter.isTopicAllowed("test1")) - assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")) - assertFalse(topicPartitionFilter.isTopicAllowed("test2")) - - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 0))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test1", 1))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 0))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test2", 0))) - } - - @Test - def testTopicPartitionFilterForRegex(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList("test.*") - - assertTrue(topicPartitionFilter.isTopicAllowed("test")) - assertTrue(topicPartitionFilter.isTopicAllowed("test1")) - assertTrue(topicPartitionFilter.isTopicAllowed("test2")) - assertFalse(topicPartitionFilter.isTopicAllowed("__consumer_offsets")) - - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 0))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test1", 1))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test2", 0))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 0))) - } - - @Test - def testTopicPartitionFilterForPartitionIndexSpec(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList(":0") - - assertTrue(topicPartitionFilter.isTopicAllowed("test")) - assertTrue(topicPartitionFilter.isTopicAllowed("test1")) - assertTrue(topicPartitionFilter.isTopicAllowed("test2")) - assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")) - - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 0))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test1", 0))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test2", 1))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 0))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 1))) - } - - @Test - def testTopicPartitionFilterForPartitionRangeSpec(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList(":1-3") - - assertTrue(topicPartitionFilter.isTopicAllowed("test")) - assertTrue(topicPartitionFilter.isTopicAllowed("test1")) - assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")) - assertTrue(topicPartitionFilter.isTopicAllowed("test2")) - - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 1))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test1", 2))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 2))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test2", 0))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test2", 3))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 3))) - } - - @Test - def testTopicPartitionFilterForPartitionLowerBoundSpec(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList(":1-") - - assertTrue(topicPartitionFilter.isTopicAllowed("test")) - assertTrue(topicPartitionFilter.isTopicAllowed("test1")) - assertTrue(topicPartitionFilter.isTopicAllowed("test2")) - assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")) - - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 1))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test1", 2))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 2))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test2", 0))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 0))) - } - - @Test - def testTopicPartitionFilterForPartitionUpperBoundSpec(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList(":-3") - assertTrue(topicPartitionFilter.isTopicAllowed("test")) - assertTrue(topicPartitionFilter.isTopicAllowed("test1")) - assertTrue(topicPartitionFilter.isTopicAllowed("test2")) - assertTrue(topicPartitionFilter.isTopicAllowed("test3")) - assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")) - - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 0))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test1", 1))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test2", 2))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 2))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test3", 3))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 3))) - } - - @Test - def testTopicPartitionFilterComplex(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList("test.*:0,__consumer_offsets:1-2,.*:3") - - assertTrue(topicPartitionFilter.isTopicAllowed("test")) - assertTrue(topicPartitionFilter.isTopicAllowed("test1")) - assertTrue(topicPartitionFilter.isTopicAllowed("custom")) - assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")) - - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 0))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test1", 0))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 1))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test1", 1))) - - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("custom", 3))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("custom", 0))) - - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 1))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 3))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 0))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("__consumer_offsets", 2))) - } - - @Test - def testPartitionFilterForSingleIndex(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList(":1") - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 1))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 0))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 2))) - } - - @Test - def testPartitionFilterForRange(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList(":1-3") - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 0))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 1))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 2))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 3))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 4))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 5))) - } - - @Test - def testPartitionFilterForLowerBound(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList(":3-") - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 0))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 1))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 2))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 3))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 4))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 5))) - } - - @Test - def testPartitionFilterForUpperBound(): Unit = { - val topicPartitionFilter = GetOffsetShell.createTopicPartitionFilterWithPatternList(":-3") - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 0))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 1))) - assertTrue(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 2))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 3))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 4))) - assertFalse(topicPartitionFilter.isTopicPartitionAllowed(topicPartition("test", 5))) - } - - @Test - def testPartitionsSetFilter(): Unit = { - val partitionsSetFilter = GetOffsetShell.createTopicPartitionFilterWithTopicAndPartitionPattern(Some("topic"), "1,3,5") - - assertFalse(partitionsSetFilter.isTopicPartitionAllowed(topicPartition("topic", 0))) - assertFalse(partitionsSetFilter.isTopicPartitionAllowed(topicPartition("topic", 2))) - assertFalse(partitionsSetFilter.isTopicPartitionAllowed(topicPartition("topic", 4))) - - assertFalse(partitionsSetFilter.isTopicPartitionAllowed(topicPartition("topic1", 1))) - assertFalse(partitionsSetFilter.isTopicAllowed("topic1")) - - assertTrue(partitionsSetFilter.isTopicPartitionAllowed(topicPartition("topic", 1))) - assertTrue(partitionsSetFilter.isTopicPartitionAllowed(topicPartition("topic", 3))) - assertTrue(partitionsSetFilter.isTopicPartitionAllowed(topicPartition("topic", 5))) - assertTrue(partitionsSetFilter.isTopicAllowed("topic")) - } - - @Test - def testPartitionFilterForInvalidSingleIndex(): Unit = { - assertThrows(classOf[IllegalArgumentException], - () => GetOffsetShell.createTopicPartitionFilterWithPatternList(":a")) - } - - @Test - def testPartitionFilterForInvalidRange(): Unit = { - assertThrows(classOf[IllegalArgumentException], - () => GetOffsetShell.createTopicPartitionFilterWithPatternList(":a-b")) - } - - @Test - def testPartitionFilterForInvalidLowerBound(): Unit = { - assertThrows(classOf[IllegalArgumentException], - () => GetOffsetShell.createTopicPartitionFilterWithPatternList(":a-")) - } - - @Test - def testPartitionFilterForInvalidUpperBound(): Unit = { - assertThrows(classOf[IllegalArgumentException], - () => GetOffsetShell.createTopicPartitionFilterWithPatternList(":-b")) - } - - @Test - def testInvalidTimeValue(): Unit = { - assertThrows(classOf[IllegalArgumentException], - () => GetOffsetShell.fetchOffsets(Array("--bootstrap-server", "localhost:9092", "--time", "invalid"))) - } - - private def topicPartition(topic: String, partition: Int): TopicPartition = { - new TopicPartition(topic, partition) - } -} diff --git a/core/src/test/scala/kafka/tools/GetOffsetShellTest.scala b/core/src/test/scala/kafka/tools/GetOffsetShellTest.scala deleted file mode 100644 index cbce573192748..0000000000000 --- a/core/src/test/scala/kafka/tools/GetOffsetShellTest.scala +++ /dev/null @@ -1,278 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.tools - -import java.util.Properties -import kafka.integration.KafkaServerTestHarness -import kafka.server.KafkaConfig -import kafka.utils.{Exit, Logging, TestUtils} -import org.apache.kafka.clients.CommonClientConfigs -import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} -import org.apache.kafka.common.serialization.StringSerializer -import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource - -class GetOffsetShellTest extends KafkaServerTestHarness with Logging { - private val topicCount = 4 - private val offsetTopicPartitionCount = 4 - - override def generateConfigs: collection.Seq[KafkaConfig] = TestUtils.createBrokerConfigs(1, zkConnect) - .map { p => - p.put(KafkaConfig.OffsetsTopicPartitionsProp, Int.box(offsetTopicPartitionCount)) - p - }.map(KafkaConfig.fromProps) - - @BeforeEach - override def setUp(testInfo: TestInfo): Unit = { - super.setUp(testInfo) - Range(1, topicCount + 1).foreach(i => createTopic(topicName(i), i)) - - val props = new Properties() - props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer]) - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer]) - - // Send X messages to each partition of topicX - val producer = new KafkaProducer[String, String](props) - Range(1, topicCount + 1).foreach(i => Range(0, i*i) - .foreach(msgCount => producer.send(new ProducerRecord[String, String](topicName(i), msgCount % i, null, "val" + msgCount)))) - producer.close() - - TestUtils.createOffsetsTopic(zkClient, servers) - } - - @Test - def testNoFilterOptions(): Unit = { - val offsets = executeAndParse(Array()) - assertEquals(expectedOffsetsWithInternal(), offsets) - } - - @Test - def testInternalExcluded(): Unit = { - val offsets = executeAndParse(Array("--exclude-internal-topics")) - assertEquals(expectedTestTopicOffsets(), offsets) - } - - @Test - def testTopicNameArg(): Unit = { - Range(1, topicCount + 1).foreach(i => { - val offsets = executeAndParse(Array("--topic", topicName(i))) - assertEquals(expectedOffsetsForTopic(i), offsets, () => "Offset output did not match for " + topicName(i)) - }) - } - - @Test - def testTopicPatternArg(): Unit = { - val offsets = executeAndParse(Array("--topic", "topic.*")) - assertEquals(expectedTestTopicOffsets(), offsets) - } - - @Test - def testPartitionsArg(): Unit = { - val offsets = executeAndParse(Array("--partitions", "0,1")) - assertEquals(expectedOffsetsWithInternal().filter { case (_, partition, _) => partition <= 1 }, offsets) - } - - @Test - def testTopicPatternArgWithPartitionsArg(): Unit = { - val offsets = executeAndParse(Array("--topic", "topic.*", "--partitions", "0,1")) - assertEquals(expectedTestTopicOffsets().filter { case (_, partition, _) => partition <= 1 }, offsets) - } - - @Test - def testTopicPartitionsArg(): Unit = { - val offsets = executeAndParse(Array("--topic-partitions", "topic1:0,topic2:1,topic(3|4):2,__.*:3")) - assertEquals( - List( - ("__consumer_offsets", 3, Some(0)), - ("topic1", 0, Some(1)), - ("topic2", 1, Some(2)), - ("topic3", 2, Some(3)), - ("topic4", 2, Some(4)) - ), - offsets - ) - } - - @ParameterizedTest - @ValueSource(strings = Array("-1", "latest")) - def testGetLatestOffsets(time: String): Unit = { - val offsets = executeAndParse(Array("--topic-partitions", "topic.*:0", "--time", time)) - assertEquals( - List( - ("topic1", 0, Some(1)), - ("topic2", 0, Some(2)), - ("topic3", 0, Some(3)), - ("topic4", 0, Some(4)) - ), - offsets - ) - } - - @ParameterizedTest - @ValueSource(strings = Array("-2", "earliest")) - def testGetEarliestOffsets(time: String): Unit = { - val offsets = executeAndParse(Array("--topic-partitions", "topic.*:0", "--time", time)) - assertEquals( - List( - ("topic1", 0, Some(0)), - ("topic2", 0, Some(0)), - ("topic3", 0, Some(0)), - ("topic4", 0, Some(0)) - ), - offsets - ) - } - - @ParameterizedTest - @ValueSource(strings = Array("-3", "max-timestamp")) - def testGetOffsetsByMaxTimestamp(time: String): Unit = { - val offsets = executeAndParse(Array("--topic-partitions", "topic.*", "--time", time)) - offsets.foreach { case (topic, _, timestampOpt) => - // We can't know the exact offsets with max timestamp - assertTrue(timestampOpt.get >= 0 && timestampOpt.get <= topic.replace("topic", "").toInt) - } - } - - @Test - def testGetOffsetsByTimestamp(): Unit = { - val time = (System.currentTimeMillis() / 2).toString - val offsets = executeAndParse(Array("--topic-partitions", "topic.*:0", "--time", time)) - assertEquals( - List( - ("topic1", 0, Some(0)), - ("topic2", 0, Some(0)), - ("topic3", 0, Some(0)), - ("topic4", 0, Some(0)) - ), - offsets - ) - } - - @Test - def testNoOffsetIfTimestampGreaterThanLatestRecord(): Unit = { - val time = (System.currentTimeMillis() * 2).toString - val offsets = executeAndParse(Array("--topic-partitions", "topic.*", "--time", time)) - assertEquals(List.empty, offsets) - } - - @Test - def testTopicPartitionsArgWithInternalExcluded(): Unit = { - val offsets = executeAndParse(Array("--topic-partitions", - "topic1:0,topic2:1,topic(3|4):2,__.*:3", "--exclude-internal-topics")) - assertEquals( - List( - ("topic1", 0, Some(1)), - ("topic2", 1, Some(2)), - ("topic3", 2, Some(3)), - ("topic4", 2, Some(4)) - ), - offsets - ) - } - - @Test - def testTopicPartitionsArgWithInternalIncluded(): Unit = { - val offsets = executeAndParse(Array("--topic-partitions", "__.*:0")) - assertEquals(List(("__consumer_offsets", 0, Some(0))), offsets) - } - - @Test - def testTopicPartitionsNotFoundForNonExistentTopic(): Unit = { - assertExitCodeIsOne(Array("--topic", "some_nonexistent_topic")) - } - - @Test - def testTopicPartitionsNotFoundForExcludedInternalTopic(): Unit = { - assertExitCodeIsOne(Array("--topic", "some_nonexistent_topic:*")) - } - - @Test - def testTopicPartitionsNotFoundForNonMatchingTopicPartitionPattern(): Unit = { - assertExitCodeIsOne(Array("--topic-partitions", "__consumer_offsets", "--exclude-internal-topics")) - } - - @Test - def testTopicPartitionsFlagWithTopicFlagCauseExit(): Unit = { - assertExitCodeIsOne(Array("--topic-partitions", "__consumer_offsets", "--topic", "topic1")) - } - - @Test - def testTopicPartitionsFlagWithPartitionsFlagCauseExit(): Unit = { - assertExitCodeIsOne(Array("--topic-partitions", "__consumer_offsets", "--partitions", "0")) - } - - private def expectedOffsetsWithInternal(): List[(String, Int, Option[Long])] = { - Range(0, offsetTopicPartitionCount).map(i => ("__consumer_offsets", i, Some(0L))).toList ++ expectedTestTopicOffsets() - } - - private def expectedTestTopicOffsets(): List[(String, Int, Option[Long])] = { - Range(1, topicCount + 1).flatMap(i => expectedOffsetsForTopic(i)).toList - } - - private def expectedOffsetsForTopic(i: Int): List[(String, Int, Option[Long])] = { - val name = topicName(i) - Range(0, i).map(p => (name, p, Some(i.toLong))).toList - } - - private def topicName(i: Int): String = "topic" + i - - private def assertExitCodeIsOne(args: Array[String]): Unit = { - var exitStatus: Option[Int] = None - Exit.setExitProcedure { (status, _) => - exitStatus = Some(status) - throw new RuntimeException - } - - try { - GetOffsetShell.main(addBootstrapServer(args)) - } catch { - case e: RuntimeException => - } finally { - Exit.resetExitProcedure() - } - - assertEquals(Some(1), exitStatus) - } - - private def executeAndParse(args: Array[String]): List[(String, Int, Option[Long])] = { - val output = executeAndGrabOutput(args) - output.split(System.lineSeparator()) - .map(_.split(":")) - .filter(_.length >= 2) - .map { line => - val topic = line(0) - val partition = line(1).toInt - val timestamp = if (line.length == 2 || line(2).isEmpty) None else Some(line(2).toLong) - (topic, partition, timestamp) - } - .toList - } - - private def executeAndGrabOutput(args: Array[String]): String = { - TestUtils.grabConsoleOutput(GetOffsetShell.main(addBootstrapServer(args))) - } - - private def addBootstrapServer(args: Array[String]): Array[String] = { - args ++ Array("--bootstrap-server", bootstrapServers()) - } -} - - diff --git a/core/src/test/scala/unit/kafka/admin/ReassignPartitionsCommandArgsTest.scala b/core/src/test/scala/unit/kafka/admin/ReassignPartitionsCommandArgsTest.scala deleted file mode 100644 index 98d54629f2081..0000000000000 --- a/core/src/test/scala/unit/kafka/admin/ReassignPartitionsCommandArgsTest.scala +++ /dev/null @@ -1,284 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package kafka.admin - -import kafka.utils.Exit -import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, Timeout} - -@Timeout(60) -class ReassignPartitionsCommandArgsTest { - - val missingBootstrapServerMsg = "Please specify --bootstrap-server" - - @BeforeEach - def setUp(): Unit = { - Exit.setExitProcedure((_, message) => throw new IllegalArgumentException(message.orNull)) - } - - @AfterEach - def tearDown(): Unit = { - Exit.resetExitProcedure() - } - - ///// Test valid argument parsing - @Test - def shouldCorrectlyParseValidMinimumGenerateOptions(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--generate", - "--broker-list", "101,102", - "--topics-to-move-json-file", "myfile.json") - ReassignPartitionsCommand.validateAndParseArgs(args) - } - - @Test - def shouldCorrectlyParseValidMinimumExecuteOptions(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--execute", - "--reassignment-json-file", "myfile.json") - ReassignPartitionsCommand.validateAndParseArgs(args) - } - - @Test - def shouldCorrectlyParseValidMinimumVerifyOptions(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--verify", - "--reassignment-json-file", "myfile.json") - ReassignPartitionsCommand.validateAndParseArgs(args) - } - - @Test - def shouldAllowThrottleOptionOnExecute(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--execute", - "--throttle", "100", - "--reassignment-json-file", "myfile.json") - ReassignPartitionsCommand.validateAndParseArgs(args) - } - - @Test - def shouldUseDefaultsIfEnabled(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--execute", - "--reassignment-json-file", "myfile.json") - val opts = ReassignPartitionsCommand.validateAndParseArgs(args) - assertEquals(10000L, opts.options.valueOf(opts.timeoutOpt)) - assertEquals(-1L, opts.options.valueOf(opts.interBrokerThrottleOpt)) - } - - @Test - def testList(): Unit = { - val args = Array( - "--list", - "--bootstrap-server", "localhost:1234") - ReassignPartitionsCommand.validateAndParseArgs(args) - } - - @Test - def testCancelWithPreserveThrottlesOption(): Unit = { - val args = Array( - "--cancel", - "--bootstrap-server", "localhost:1234", - "--reassignment-json-file", "myfile.json", - "--preserve-throttles") - ReassignPartitionsCommand.validateAndParseArgs(args) - } - - ///// Test handling missing or invalid actions - @Test - def shouldFailIfNoArgs(): Unit = { - val args: Array[String]= Array() - shouldFailWith(ReassignPartitionsCommand.helpText, args) - } - - @Test - def shouldFailIfBlankArg(): Unit = { - val args = Array(" ") - shouldFailWith("Command must include exactly one action", args) - } - - @Test - def shouldFailIfMultipleActions(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--execute", - "--verify", - "--reassignment-json-file", "myfile.json" - ) - shouldFailWith("Command must include exactly one action", args) - } - - ///// Test --execute - @Test - def shouldNotAllowExecuteWithTopicsOption(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--execute", - "--reassignment-json-file", "myfile.json", - "--topics-to-move-json-file", "myfile.json") - shouldFailWith("Option \"[topics-to-move-json-file]\" can't be used with action \"[execute]\"", args) - } - - @Test - def shouldNotAllowExecuteWithBrokerList(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--execute", - "--reassignment-json-file", "myfile.json", - "--broker-list", "101,102" - ) - shouldFailWith("Option \"[broker-list]\" can't be used with action \"[execute]\"", args) - } - - @Test - def shouldNotAllowExecuteWithoutReassignmentOption(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--execute") - shouldFailWith("Missing required argument \"[reassignment-json-file]\"", args) - } - - @Test - def testMissingBootstrapServerArgumentForExecute(): Unit = { - val args = Array( - "--execute") - shouldFailWith(missingBootstrapServerMsg, args) - } - - ///// Test --generate - @Test - def shouldNotAllowGenerateWithoutBrokersAndTopicsOptions(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--generate") - shouldFailWith("Missing required argument \"[topics-to-move-json-file]\"", args) - } - - @Test - def shouldNotAllowGenerateWithoutBrokersOption(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--topics-to-move-json-file", "myfile.json", - "--generate") - shouldFailWith("Missing required argument \"[broker-list]\"", args) - } - - @Test - def shouldNotAllowGenerateWithoutTopicsOption(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--broker-list", "101,102", - "--generate") - shouldFailWith("Missing required argument \"[topics-to-move-json-file]\"", args) - } - - @Test - def shouldNotAllowGenerateWithThrottleOption(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--generate", - "--broker-list", "101,102", - "--throttle", "100", - "--topics-to-move-json-file", "myfile.json") - shouldFailWith("Option \"[throttle]\" can't be used with action \"[generate]\"", args) - } - - @Test - def shouldNotAllowGenerateWithReassignmentOption(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--generate", - "--broker-list", "101,102", - "--topics-to-move-json-file", "myfile.json", - "--reassignment-json-file", "myfile.json") - shouldFailWith("Option \"[reassignment-json-file]\" can't be used with action \"[generate]\"", args) - } - - @Test - def shouldPrintHelpTextIfHelpArg(): Unit = { - val args: Array[String]= Array("--help") - // note, this is not actually a failed case, it's just we share the same `printUsageAndExit` method when wrong arg received - shouldFailWith(ReassignPartitionsCommand.helpText, args) - } - - ///// Test --verify - @Test - def shouldNotAllowVerifyWithoutReassignmentOption(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--verify") - shouldFailWith("Missing required argument \"[reassignment-json-file]\"", args) - } - - @Test - def shouldNotAllowBrokersListWithVerifyOption(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--verify", - "--broker-list", "100,101", - "--reassignment-json-file", "myfile.json") - shouldFailWith("Option \"[broker-list]\" can't be used with action \"[verify]\"", args) - } - - @Test - def shouldNotAllowThrottleWithVerifyOption(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--verify", - "--throttle", "100", - "--reassignment-json-file", "myfile.json") - shouldFailWith("Option \"[throttle]\" can't be used with action \"[verify]\"", args) - } - - @Test - def shouldNotAllowTopicsOptionWithVerify(): Unit = { - val args = Array( - "--bootstrap-server", "localhost:1234", - "--verify", - "--reassignment-json-file", "myfile.json", - "--topics-to-move-json-file", "myfile.json") - shouldFailWith("Option \"[topics-to-move-json-file]\" can't be used with action \"[verify]\"", args) - } - - def shouldFailWith(msg: String, args: Array[String]): Unit = { - val e = assertThrows(classOf[Exception], () => ReassignPartitionsCommand.validateAndParseArgs(args), - () => s"Should have failed with [$msg] but no failure occurred.") - assertTrue(e.getMessage.startsWith(msg), s"Expected exception with message:\n[$msg]\nbut was\n[${e.getMessage}]") - } - - ///// Test --cancel - @Test - def shouldNotAllowCancelWithoutBootstrapServerOption(): Unit = { - val args = Array( - "--cancel") - shouldFailWith(missingBootstrapServerMsg, args) - } - - @Test - def shouldNotAllowCancelWithoutReassignmentJsonFile(): Unit = { - val args = Array( - "--cancel", - "--bootstrap-server", "localhost:1234", - "--preserve-throttles") - shouldFailWith("Missing required argument \"[reassignment-json-file]\"", args) - } -} diff --git a/core/src/test/scala/unit/kafka/migration/MigrationPropagatorTest.scala b/core/src/test/scala/unit/kafka/migration/MigrationPropagatorTest.scala index b7cdb57cc887e..89fdd9b6586bc 100644 --- a/core/src/test/scala/unit/kafka/migration/MigrationPropagatorTest.scala +++ b/core/src/test/scala/unit/kafka/migration/MigrationPropagatorTest.scala @@ -20,10 +20,11 @@ package kafka.migration import kafka.cluster.Broker import org.apache.kafka.common.metadata.RegisterBrokerRecord import org.apache.kafka.image.ClusterImage -import org.apache.kafka.metadata.BrokerRegistration +import org.apache.kafka.metadata.{BrokerRegistration, ControllerRegistration} import org.junit.jupiter.api.Assertions.{assertFalse, assertTrue} import org.junit.jupiter.api.Test +import java.util.Collections import scala.jdk.CollectionConverters._ class MigrationPropagatorTest { @@ -39,7 +40,7 @@ class MigrationPropagatorTest { def brokersToClusterImage(brokers: Seq[BrokerRegistration]): ClusterImage = { val brokerMap = brokers.map(broker => Integer.valueOf(broker.id()) -> broker).toMap.asJava - new ClusterImage(brokerMap) + new ClusterImage(brokerMap, Collections.emptyMap[Integer, ControllerRegistration]) } @Test diff --git a/core/src/test/scala/unit/kafka/server/AuthHelperTest.scala b/core/src/test/scala/unit/kafka/server/AuthHelperTest.scala index 7f229e93be937..80884da849bb5 100644 --- a/core/src/test/scala/unit/kafka/server/AuthHelperTest.scala +++ b/core/src/test/scala/unit/kafka/server/AuthHelperTest.scala @@ -17,12 +17,17 @@ package kafka.server +import kafka.network.RequestChannel.Request +import org.apache.kafka.clients.admin.EndpointType + import java.net.InetAddress import java.util import org.apache.kafka.common.acl.AclOperation +import org.apache.kafka.common.message.{DescribeClusterRequestData, DescribeClusterResponseData} +import org.apache.kafka.common.message.DescribeClusterResponseData.DescribeClusterBrokerCollection import org.apache.kafka.common.network.{ClientInformation, ListenerName} -import org.apache.kafka.common.protocol.ApiKeys -import org.apache.kafka.common.requests.{RequestContext, RequestHeader} +import org.apache.kafka.common.protocol.{ApiKeys, Errors} +import org.apache.kafka.common.requests.{DescribeClusterRequest, RequestContext, RequestHeader} import org.apache.kafka.common.resource.{PatternType, ResourcePattern, ResourceType} import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.server.authorizer.{Action, AuthorizationResult, Authorizer} @@ -35,7 +40,31 @@ import org.mockito.Mockito.{mock, verify, when} import scala.collection.Seq import scala.jdk.CollectionConverters._ +object AuthHelperTest { + def newMockDescribeClusterRequest( + data: DescribeClusterRequestData, + requestVersion: Int + ): Request = { + val requestContext = new RequestContext( + new RequestHeader(ApiKeys.DESCRIBE_CLUSTER, requestVersion.toShort, "", 0), + "", + InetAddress.getLocalHost, + KafkaPrincipal.ANONYMOUS, + new ListenerName("PLAINTEXT"), + SecurityProtocol.PLAINTEXT, + ClientInformation.EMPTY, + false) + val request: Request = mock(classOf[Request]) + when(request.body[DescribeClusterRequest]).thenReturn( + new DescribeClusterRequest(data, requestVersion.toShort)) + when(request.context).thenReturn(requestContext) + when(request.header).thenReturn(requestContext.header) + request + } +} + class AuthHelperTest { + import AuthHelperTest.newMockDescribeClusterRequest private val clientId = "" @@ -118,4 +147,99 @@ class AuthHelperTest { assertEquals(Set(resourceName1, resourceName3), result) } + @Test + def testComputeDescribeClusterResponseV1WithUnknownEndpointType(): Unit = { + val authHelper = new AuthHelper(Some(mock(classOf[Authorizer]))) + val request = newMockDescribeClusterRequest( + new DescribeClusterRequestData().setEndpointType(123.toByte), 1) + val responseData = authHelper.computeDescribeClusterResponse(request, + EndpointType.BROKER, + "ltCWoi9wRhmHSQCIgAznEg", + () => new DescribeClusterBrokerCollection(), + () => 1) + assertEquals(new DescribeClusterResponseData(). + setErrorCode(Errors.UNSUPPORTED_ENDPOINT_TYPE.code()). + setErrorMessage("Unsupported endpoint type 123"), responseData) + } + + @Test + def testComputeDescribeClusterResponseV0WithUnknownEndpointType(): Unit = { + val authHelper = new AuthHelper(Some(mock(classOf[Authorizer]))) + val request = newMockDescribeClusterRequest( + new DescribeClusterRequestData().setEndpointType(123.toByte), 0) + val responseData = authHelper.computeDescribeClusterResponse(request, + EndpointType.BROKER, + "ltCWoi9wRhmHSQCIgAznEg", + () => new DescribeClusterBrokerCollection(), + () => 1) + assertEquals(new DescribeClusterResponseData(). + setErrorCode(Errors.INVALID_REQUEST.code()). + setErrorMessage("Unsupported endpoint type 123"), responseData) + } + + @Test + def testComputeDescribeClusterResponseV1WithUnexpectedEndpointType(): Unit = { + val authHelper = new AuthHelper(Some(mock(classOf[Authorizer]))) + val request = newMockDescribeClusterRequest( + new DescribeClusterRequestData().setEndpointType(EndpointType.BROKER.id()), 1) + val responseData = authHelper.computeDescribeClusterResponse(request, + EndpointType.CONTROLLER, + "ltCWoi9wRhmHSQCIgAznEg", + () => new DescribeClusterBrokerCollection(), + () => 1) + assertEquals(new DescribeClusterResponseData(). + setErrorCode(Errors.MISMATCHED_ENDPOINT_TYPE.code()). + setErrorMessage("The request was sent to an endpoint of type CONTROLLER, but we wanted an endpoint of type BROKER"), responseData) + } + + @Test + def testComputeDescribeClusterResponseV0WithUnexpectedEndpointType(): Unit = { + val authHelper = new AuthHelper(Some(mock(classOf[Authorizer]))) + val request = newMockDescribeClusterRequest( + new DescribeClusterRequestData().setEndpointType(EndpointType.BROKER.id()), 0) + val responseData = authHelper.computeDescribeClusterResponse(request, + EndpointType.CONTROLLER, + "ltCWoi9wRhmHSQCIgAznEg", + () => new DescribeClusterBrokerCollection(), + () => 1) + assertEquals(new DescribeClusterResponseData(). + setErrorCode(Errors.INVALID_REQUEST.code()). + setErrorMessage("The request was sent to an endpoint of type CONTROLLER, but we wanted an endpoint of type BROKER"), responseData) + } + + @Test + def testComputeDescribeClusterResponseWhereControllerIsNotFound(): Unit = { + val authHelper = new AuthHelper(Some(mock(classOf[Authorizer]))) + val request = newMockDescribeClusterRequest( + new DescribeClusterRequestData().setEndpointType(EndpointType.CONTROLLER.id()), 1) + val responseData = authHelper.computeDescribeClusterResponse(request, + EndpointType.CONTROLLER, + "ltCWoi9wRhmHSQCIgAznEg", + () => new DescribeClusterBrokerCollection(), + () => 1) + assertEquals(new DescribeClusterResponseData(). + setClusterId("ltCWoi9wRhmHSQCIgAznEg"). + setControllerId(-1). + setClusterAuthorizedOperations(Int.MinValue), responseData) + } + + @Test + def testComputeDescribeClusterResponseSuccess(): Unit = { + val authHelper = new AuthHelper(Some(mock(classOf[Authorizer]))) + val request = newMockDescribeClusterRequest( + new DescribeClusterRequestData().setEndpointType(EndpointType.CONTROLLER.id()), 1) + val nodes = new DescribeClusterBrokerCollection( + java.util.Arrays.asList[DescribeClusterResponseData.DescribeClusterBroker]( + new DescribeClusterResponseData.DescribeClusterBroker().setBrokerId(1)).iterator()) + val responseData = authHelper.computeDescribeClusterResponse(request, + EndpointType.CONTROLLER, + "ltCWoi9wRhmHSQCIgAznEg", + () => nodes, + () => 1) + assertEquals(new DescribeClusterResponseData(). + setClusterId("ltCWoi9wRhmHSQCIgAznEg"). + setControllerId(1). + setClusterAuthorizedOperations(Int.MinValue). + setBrokers(nodes), responseData) + } } diff --git a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala index d0c5ec8772f95..588508ae3d023 100644 --- a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala @@ -18,28 +18,15 @@ package kafka.server import java.util.{Collections, Properties} -import java.util.concurrent.atomic.{AtomicLong, AtomicReference} import kafka.utils.TestUtils -import org.apache.kafka.clients.{Metadata, MockClient, NodeApiVersions} -import org.apache.kafka.common.config.SaslConfigs import org.apache.kafka.common.Node -import org.apache.kafka.common.internals.ClusterResourceListeners -import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion -import org.apache.kafka.common.message.BrokerRegistrationRequestData.{Listener, ListenerCollection} import org.apache.kafka.common.message.{BrokerHeartbeatResponseData, BrokerRegistrationResponseData} -import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.protocol.ApiKeys.{BROKER_HEARTBEAT, BROKER_REGISTRATION} import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{AbstractRequest, BrokerHeartbeatRequest, BrokerHeartbeatResponse, BrokerRegistrationResponse} -import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.common.utils.LogContext import org.apache.kafka.metadata.BrokerState -import org.apache.kafka.server.util.MockTime import org.junit.jupiter.api.{Test, Timeout} import org.junit.jupiter.api.Assertions._ -import scala.jdk.CollectionConverters._ - @Timeout(value = 12) class BrokerLifecycleManagerTest { @@ -54,57 +41,16 @@ class BrokerLifecycleManagerTest { properties } - class SimpleControllerNodeProvider extends ControllerNodeProvider { - val node = new AtomicReference[Node](null) - - def listenerName: ListenerName = new ListenerName("PLAINTEXT") - - def securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT - - def saslMechanism: String = SaslConfigs.DEFAULT_SASL_MECHANISM - - override def getControllerInfo(): ControllerInformation = ControllerInformation(Option(node.get()), - listenerName, securityProtocol, saslMechanism, isZkController = false) - } - - class BrokerLifecycleManagerTestContext(properties: Properties) { - val config = new KafkaConfig(properties) - val time = new MockTime(1, 1) - val highestMetadataOffset = new AtomicLong(0) - val metadata = new Metadata(1000, 1000, 1000, new LogContext(), new ClusterResourceListeners()) - val mockClient = new MockClient(time, metadata) - val controllerNodeProvider = new SimpleControllerNodeProvider() - val nodeApiVersions = NodeApiVersions.create(Seq(BROKER_REGISTRATION, BROKER_HEARTBEAT).map { - apiKey => new ApiVersion().setApiKey(apiKey.id). - setMinVersion(apiKey.oldestVersion()).setMaxVersion(apiKey.latestVersion()) - }.toList.asJava) - val mockChannelManager = new MockBrokerToControllerChannelManager(mockClient, - time, controllerNodeProvider, nodeApiVersions) - val clusterId = "x4AJGXQSRnephtTZzujw4w" - val advertisedListeners = new ListenerCollection() - config.effectiveAdvertisedListeners.foreach { ep => - advertisedListeners.add(new Listener().setHost(ep.host). - setName(ep.listenerName.value()). - setPort(ep.port.shortValue()). - setSecurityProtocol(ep.securityProtocol.id)) - } - - def poll(): Unit = { - mockClient.wakeup() - mockChannelManager.poll() - } - } - @Test def testCreateAndClose(): Unit = { - val context = new BrokerLifecycleManagerTestContext(configProperties) + val context = new RegistrationTestContext(configProperties) val manager = new BrokerLifecycleManager(context.config, context.time, "create-and-close-", isZkBroker = false) manager.close() } @Test def testCreateStartAndClose(): Unit = { - val context = new BrokerLifecycleManagerTestContext(configProperties) + val context = new RegistrationTestContext(configProperties) val manager = new BrokerLifecycleManager(context.config, context.time, "create-start-and-close-", isZkBroker = false) assertEquals(BrokerState.NOT_RUNNING, manager.state) manager.start(() => context.highestMetadataOffset.get(), @@ -119,7 +65,7 @@ class BrokerLifecycleManagerTest { @Test def testSuccessfulRegistration(): Unit = { - val context = new BrokerLifecycleManagerTestContext(configProperties) + val context = new RegistrationTestContext(configProperties) val manager = new BrokerLifecycleManager(context.config, context.time, "successful-registration-", isZkBroker = false) val controllerNode = new Node(3000, "localhost", 8021) context.controllerNodeProvider.node.set(controllerNode) @@ -138,7 +84,7 @@ class BrokerLifecycleManagerTest { @Test def testRegistrationTimeout(): Unit = { - val context = new BrokerLifecycleManagerTestContext(configProperties) + val context = new RegistrationTestContext(configProperties) val controllerNode = new Node(3000, "localhost", 8021) val manager = new BrokerLifecycleManager(context.config, context.time, "registration-timeout-", isZkBroker = false) context.controllerNodeProvider.node.set(controllerNode) @@ -180,7 +126,7 @@ class BrokerLifecycleManagerTest { @Test def testControlledShutdown(): Unit = { - val context = new BrokerLifecycleManagerTestContext(configProperties) + val context = new RegistrationTestContext(configProperties) val manager = new BrokerLifecycleManager(context.config, context.time, "controlled-shutdown-", isZkBroker = false) val controllerNode = new Node(3000, "localhost", 8021) context.controllerNodeProvider.node.set(controllerNode) diff --git a/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala b/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala index bd2a306719b47..58eba87326ecd 100644 --- a/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala @@ -46,10 +46,11 @@ import org.apache.kafka.common.protocol.{ApiKeys, ApiMessage, Errors} import org.apache.kafka.common.requests._ import org.apache.kafka.common.resource.{PatternType, Resource, ResourcePattern, ResourceType} import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} -import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.MockTime import org.apache.kafka.common.{ElectionType, Uuid} import org.apache.kafka.controller.ControllerRequestContextUtil.ANONYMOUS_CONTEXT import org.apache.kafka.controller.{Controller, ControllerRequestContext, ResultOrError} +import org.apache.kafka.image.publisher.ControllerRegistrationsPublisher import org.apache.kafka.server.authorizer.{Action, AuthorizableRequestContext, AuthorizationResult, Authorizer} import org.apache.kafka.server.common.{ApiMessageAndVersion, Features, MetadataVersion, ProducerIdsBlock} import org.junit.jupiter.api.Assertions._ @@ -154,7 +155,7 @@ class ControllerApisTest { raftManager, new KafkaConfig(props), MetaProperties("JgxuGe9URy-E-ceaL04lEw", nodeId = nodeId), - Seq.empty, + new ControllerRegistrationsPublisher(), new SimpleApiVersionManager( ListenerType.CONTROLLER, true, @@ -1138,6 +1139,22 @@ class ControllerApisTest { assertEquals(1, errorResponse.errorCounts().getOrDefault(Errors.UNSUPPORTED_VERSION, 0)) } + @Test + def testUnauthorizedControllerRegistrationRequest(): Unit = { + assertThrows(classOf[ClusterAuthorizationException], () => createControllerApis( + Some(createDenyAllAuthorizer()), new MockController.Builder().build()). + handleControllerRegistration(buildRequest( + new ControllerRegistrationRequest(new ControllerRegistrationRequestData(), 0.toShort)))) + } + + @Test + def testUnauthorizedDescribeClusterRequest(): Unit = { + assertThrows(classOf[ClusterAuthorizationException], () => createControllerApis( + Some(createDenyAllAuthorizer()), new MockController.Builder().build()). + handleDescribeCluster(buildRequest( + new DescribeClusterRequest(new DescribeClusterRequestData(), 1.toShort)))) + } + @AfterEach def tearDown(): Unit = { quotasNeverThrottleControllerMutations.shutdown() diff --git a/core/src/test/scala/unit/kafka/server/ControllerRegistrationManagerTest.scala b/core/src/test/scala/unit/kafka/server/ControllerRegistrationManagerTest.scala new file mode 100644 index 0000000000000..f6a49bfbe26b9 --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/ControllerRegistrationManagerTest.scala @@ -0,0 +1,268 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +package kafka.server + +import org.apache.kafka.common.{Node, Uuid} +import org.apache.kafka.common.message.ControllerRegistrationResponseData +import org.apache.kafka.common.metadata.{FeatureLevelRecord, RegisterControllerRecord} +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.requests.ControllerRegistrationResponse +import org.apache.kafka.common.utils.{ExponentialBackoff, Time} +import org.apache.kafka.image.loader.{LogDeltaManifest, SnapshotManifest} +import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataProvenance} +import org.apache.kafka.metadata.{RecordTestUtils, VersionRange} +import org.apache.kafka.raft.LeaderAndEpoch +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.test.TestUtils +import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertTrue} +import org.junit.jupiter.api.{Test, Timeout} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource + +import java.util +import java.util.{OptionalInt, Properties} +import java.util.concurrent.{CompletableFuture, TimeUnit} + +@Timeout(value = 60) +class ControllerRegistrationManagerTest { + private val controller1 = new Node(1, "localhost", 7000) + + private def configProperties = { + val properties = new Properties() + properties.setProperty(KafkaConfig.LogDirsProp, "/tmp/foo") + properties.setProperty(KafkaConfig.ProcessRolesProp, "controller") + properties.setProperty(KafkaConfig.ListenerSecurityProtocolMapProp, s"CONTROLLER:PLAINTEXT") + properties.setProperty(KafkaConfig.ListenersProp, s"CONTROLLER://localhost:0") + properties.setProperty(KafkaConfig.ControllerListenerNamesProp, "CONTROLLER") + properties.setProperty(KafkaConfig.NodeIdProp, "1") + properties.setProperty(KafkaConfig.QuorumVotersProp, s"1@localhost:8000,2@localhost:5000,3@localhost:7000") + properties + } + + private def createSupportedFeatures( + highestSupportedMetadataVersion: MetadataVersion + ): java.util.Map[String, VersionRange] = { + val results = new util.HashMap[String, VersionRange]() + results.put(MetadataVersion.FEATURE_NAME, VersionRange.of( + MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel(), + highestSupportedMetadataVersion.featureLevel())) + results + } + + private def newControllerRegistrationManager( + context: RegistrationTestContext, + ): ControllerRegistrationManager = { + new ControllerRegistrationManager(context.config, + context.clusterId, + Time.SYSTEM, + "controller-registration-manager-test-", + createSupportedFeatures(MetadataVersion.IBP_3_7_IV0), + RecordTestUtils.createTestControllerRegistration(1, false).incarnationId(), + Map(), + new ExponentialBackoff(1, 2, 100, 0.02)) + } + + private def registeredInLog(manager: ControllerRegistrationManager): Boolean = { + val registeredInLog = new CompletableFuture[Boolean] + manager.eventQueue.append(() => { + registeredInLog.complete(manager.registeredInLog) + }) + registeredInLog.get(30, TimeUnit.SECONDS) + } + + private def rpcStats(manager: ControllerRegistrationManager): (Boolean, Long, Long) = { + val failedAttempts = new CompletableFuture[(Boolean, Long, Long)] + manager.eventQueue.append(() => { + failedAttempts.complete((manager.pendingRpc, manager.successfulRpcs, manager.failedRpcs)) + }) + failedAttempts.get(30, TimeUnit.SECONDS) + } + + private def doMetadataUpdate( + prevImage: MetadataImage, + manager: ControllerRegistrationManager, + metadataVersion: MetadataVersion, + registrationModifier: RegisterControllerRecord => Option[RegisterControllerRecord] + ): MetadataImage = { + val delta = new MetadataDelta.Builder(). + setImage(prevImage). + build() + if (!prevImage.features().metadataVersion().equals(metadataVersion)) { + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(metadataVersion.featureLevel())) + } + if (metadataVersion.isControllerRegistrationSupported) { + for (i <- Seq(1, 2, 3)) { + registrationModifier(RecordTestUtils.createTestControllerRegistration(i, false)).foreach { + registration => delta.replay(registration) + } + } + } + val provenance = new MetadataProvenance(100, 200, 300) + val newImage = delta.apply(provenance) + val manifest = if (!prevImage.features().metadataVersion().equals(metadataVersion)) { + new SnapshotManifest(provenance, 1000) + } else { + new LogDeltaManifest.Builder(). + provenance(provenance). + leaderAndEpoch(new LeaderAndEpoch(OptionalInt.of(1), 100)). + numBatches(1). + elapsedNs(100). + numBytes(200). + build(); + } + manager.onMetadataUpdate(delta, newImage, manifest) + newImage + } + + @Test + def testCreateAndClose(): Unit = { + val context = new RegistrationTestContext(configProperties) + val manager = newControllerRegistrationManager(context) + assertFalse(registeredInLog(manager)) + assertEquals((false, 0, 0), rpcStats(manager)) + manager.close() + } + + @Test + def testCreateStartAndClose(): Unit = { + val context = new RegistrationTestContext(configProperties) + val manager = newControllerRegistrationManager(context) + try { + manager.start(context.mockChannelManager) + assertFalse(registeredInLog(manager)) + assertEquals((false, 0, 0), rpcStats(manager)) + } finally { + manager.close() + } + } + + @ParameterizedTest + @ValueSource(booleans = Array(false, true)) + def testRegistration(metadataVersionSupportsRegistration: Boolean): Unit = { + val context = new RegistrationTestContext(configProperties) + val metadataVersion = if (metadataVersionSupportsRegistration) { + MetadataVersion.IBP_3_7_IV0 + } else { + MetadataVersion.IBP_3_6_IV0 + } + val manager = newControllerRegistrationManager(context) + try { + if (!metadataVersionSupportsRegistration) { + context.mockClient.prepareUnsupportedVersionResponse(_ => true) + } else { + context.controllerNodeProvider.node.set(controller1) + } + manager.start(context.mockChannelManager) + assertFalse(registeredInLog(manager)) + assertEquals((false, 0, 0), rpcStats(manager)) + val image = doMetadataUpdate(MetadataImage.EMPTY, + manager, + metadataVersion, + r => if (r.controllerId() == 1) None else Some(r)) + if (!metadataVersionSupportsRegistration) { + assertFalse(registeredInLog(manager)) + assertEquals((false, 0, 0), rpcStats(manager)) + } else { + TestUtils.retryOnExceptionWithTimeout(30000, () => { + assertEquals((true, 0, 0), rpcStats(manager)) + }) + context.mockClient.prepareResponseFrom(new ControllerRegistrationResponse( + new ControllerRegistrationResponseData()), controller1) + TestUtils.retryOnExceptionWithTimeout(30000, () => { + context.mockChannelManager.poll() + assertEquals((false, 1, 0), rpcStats(manager)) + }) + assertFalse(registeredInLog(manager)) + doMetadataUpdate(image, + manager, + metadataVersion, + r => Some(r)) + assertTrue(registeredInLog(manager)) + } + } finally { + manager.close() + } + } + + @Test + def testWrongIncarnationId(): Unit = { + val context = new RegistrationTestContext(configProperties) + val manager = newControllerRegistrationManager(context) + try { + // We try to send an RPC, because the incarnation ID is wrong. + context.controllerNodeProvider.node.set(controller1) + doMetadataUpdate(MetadataImage.EMPTY, + manager, + MetadataVersion.IBP_3_7_IV0, + r => Some(r.setIncarnationId(new Uuid(456, r.controllerId())))) + manager.start(context.mockChannelManager) + TestUtils.retryOnExceptionWithTimeout(30000, () => { + context.mockChannelManager.poll() + assertEquals((true, 0, 0), rpcStats(manager)) + }) + + // Complete the RPC. + context.mockClient.prepareResponseFrom(new ControllerRegistrationResponse( + new ControllerRegistrationResponseData()), controller1) + TestUtils.retryOnExceptionWithTimeout(30000, () => { + context.mockChannelManager.poll() + assertEquals((false, 1, 0), rpcStats(manager)) + }) + + // If the incarnation ID is still wrong, we'll resend again. + doMetadataUpdate(MetadataImage.EMPTY, + manager, + MetadataVersion.IBP_3_7_IV0, + r => Some(r.setIncarnationId(new Uuid(457, r.controllerId())))) + TestUtils.retryOnExceptionWithTimeout(30000, () => { + context.mockChannelManager.poll() + assertEquals((true, 1, 0), rpcStats(manager)) + }) + } finally { + manager.close() + } + } + + @Test + def testRetransmitRegistration(): Unit = { + val context = new RegistrationTestContext(configProperties) + val manager = newControllerRegistrationManager(context) + try { + context.controllerNodeProvider.node.set(controller1) + manager.start(context.mockChannelManager) + context.mockClient.prepareResponseFrom(new ControllerRegistrationResponse( + new ControllerRegistrationResponseData(). + setErrorCode(Errors.UNKNOWN_CONTROLLER_ID.code()). + setErrorMessage("Unknown controller 1")), controller1) + context.mockClient.prepareResponseFrom(new ControllerRegistrationResponse( + new ControllerRegistrationResponseData()), controller1) + doMetadataUpdate(MetadataImage.EMPTY, + manager, + MetadataVersion.IBP_3_7_IV0, + r => if (r.controllerId() == 1) None else Some(r)) + TestUtils.retryOnExceptionWithTimeout(30000, () => { + context.mockChannelManager.poll() + assertEquals((false, 1, 0), rpcStats(manager)) + }) + } finally { + manager.close() + } + } +} diff --git a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala index 50ba898b25ce7..7b398622cc38a 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala @@ -48,6 +48,7 @@ import org.apache.kafka.common.message.AlterConfigsRequestData.{AlterableConfigC import org.apache.kafka.common.message.AlterConfigsRequestData.{AlterableConfig => LAlterableConfig} import org.apache.kafka.common.message.AlterConfigsResponseData.{AlterConfigsResourceResponse => LAlterConfigsResourceResponse} import org.apache.kafka.common.message.ApiMessageType.ListenerType +import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData.DescribedGroup import org.apache.kafka.common.message.CreateTopicsRequestData.{CreatableTopic, CreatableTopicCollection} import org.apache.kafka.common.message.DescribeConfigsResponseData.DescribeConfigsResult import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.{AlterConfigsResource => IAlterConfigsResource} @@ -6210,4 +6211,21 @@ class KafkaApisTest { val response = verifyNoThrottling[ConsumerGroupHeartbeatResponse](requestChannelRequest) assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code, response.data.errorCode) } + + @Test + def testConsumerGroupDescribeReturnsUnsupportedVersion(): Unit = { + val groupId = "group0" + val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() + consumerGroupDescribeRequestData.groupIds.add(groupId) + val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) + val errorCode = Errors.UNSUPPORTED_VERSION.code + val expectedDescribedGroup = new DescribedGroup().setGroupId(groupId).setErrorCode(errorCode) + val expectedResponse = new ConsumerGroupDescribeResponseData() + expectedResponse.groups.add(expectedDescribedGroup) + + createKafkaApis().handle(requestChannelRequest, RequestLocal.NoCaching) + val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) + + assertEquals(expectedResponse, response.data) + } } diff --git a/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala b/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala new file mode 100644 index 0000000000000..2f5b705db511d --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import org.apache.kafka.clients.{Metadata, MockClient, NodeApiVersions} +import org.apache.kafka.common.config.SaslConfigs +import org.apache.kafka.common.Node +import org.apache.kafka.common.internals.ClusterResourceListeners +import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion +import org.apache.kafka.common.message.BrokerRegistrationRequestData.{Listener, ListenerCollection} +import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.protocol.ApiKeys.{BROKER_HEARTBEAT, BROKER_REGISTRATION, CONTROLLER_REGISTRATION} +import org.apache.kafka.common.security.auth.SecurityProtocol +import org.apache.kafka.common.utils.LogContext +import org.apache.kafka.server.util.MockTime + +import java.util.Properties +import java.util.concurrent.atomic.{AtomicInteger, AtomicLong, AtomicReference} +import scala.jdk.CollectionConverters._ + +class SimpleControllerNodeProvider extends ControllerNodeProvider { + val node = new AtomicReference[Node](null) + + def listenerName: ListenerName = new ListenerName("PLAINTEXT") + + def securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT + + def saslMechanism: String = SaslConfigs.DEFAULT_SASL_MECHANISM + + override def getControllerInfo(): ControllerInformation = ControllerInformation(Option(node.get()), + listenerName, securityProtocol, saslMechanism, isZkController = false) +} + +class RegistrationTestContext( + properties: Properties +) { + val config = new KafkaConfig(properties) + val time = new MockTime(1, 1) + val highestMetadataOffset = new AtomicLong(0) + val metadata = new Metadata(1000, 1000, 1000, new LogContext(), new ClusterResourceListeners()) + val mockClient = new MockClient(time, metadata) + val controllerNodeProvider = new SimpleControllerNodeProvider() + val nodeApiVersions = NodeApiVersions.create(Seq(BROKER_REGISTRATION, BROKER_HEARTBEAT, CONTROLLER_REGISTRATION).map { + apiKey => new ApiVersion().setApiKey(apiKey.id). + setMinVersion(apiKey.oldestVersion()).setMaxVersion(apiKey.latestVersion()) + }.toList.asJava) + val mockChannelManager = new MockBrokerToControllerChannelManager(mockClient, + time, controllerNodeProvider, nodeApiVersions) + val clusterId = "x4AJGXQSRnephtTZzujw4w" + val advertisedListeners = new ListenerCollection() + val controllerEpoch = new AtomicInteger(123) + config.effectiveAdvertisedListeners.foreach { ep => + advertisedListeners.add(new Listener().setHost(ep.host). + setName(ep.listenerName.value()). + setPort(ep.port.shortValue()). + setSecurityProtocol(ep.securityProtocol.id)) + } + + def poll(): Unit = { + mockClient.wakeup() + mockChannelManager.poll() + } +} diff --git a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala index 6e09a3a90c125..1943f22563bc8 100644 --- a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala +++ b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala @@ -710,6 +710,9 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.CONSUMER_GROUP_HEARTBEAT => new ConsumerGroupHeartbeatRequest.Builder(new ConsumerGroupHeartbeatRequestData(), true) + case ApiKeys.CONSUMER_GROUP_DESCRIBE => + new ConsumerGroupDescribeRequest.Builder(new ConsumerGroupDescribeRequestData(), true) + case _ => throw new IllegalArgumentException("Unsupported API key " + apiKey) } diff --git a/docs/design.html b/docs/design.html index 139eb0c1e3b00..a2edc552413fb 100644 --- a/docs/design.html +++ b/docs/design.html @@ -136,8 +136,9 @@

- Kafka supports this with an efficient batching format. A batch of messages can be clumped together compressed and sent to the server in this form. This batch of messages will be written in compressed form and will - remain compressed in the log and will only be decompressed by the consumer. + Kafka supports this with an efficient batching format. A batch of messages can be grouped together, compressed, and sent to the server in this form. The broker decompresses the batch in order to validate it. For + example, it validates that the number of records in the batch is same as what batch header states. This batch of messages is then written to disk in compressed form. The batch will remain compressed in the log and it will also be transmitted to the + consumer in compressed form. The consumer decompresses any compressed data that it receives.

Kafka supports GZIP, Snappy, LZ4 and ZStandard compression protocols. More details on compression can be found here. diff --git a/docs/ops.html b/docs/ops.html index 60fdab188180a..77c2b0967add3 100644 --- a/docs/ops.html +++ b/docs/ops.html @@ -2961,12 +2961,12 @@

max.warmup.replicas
  • num.standby.replicas
  • num.stream.threads
  • -
  • partition.grouper
  • probing.rebalance.interval.ms
  • processing.guarantee
  • rack.aware.assignment.non_overlap_cost
  • @@ -365,11 +364,6 @@

    num.standby.replicasThe number of threads to execute stream processing. 1 - partition.grouper - Low - Partition grouper class that implements the PartitionGrouper interface. - See Partition Grouper - probing.rebalance.interval.ms Low The maximum time in milliseconds to wait before triggering a rebalance to probe for warmup replicas that have sufficiently caught up. @@ -883,17 +877,6 @@

    num.stream.threadsThreading Model. -
    -

    partition.grouper

    -
    -
    - [DEPRECATED] A partition grouper creates a list of stream tasks from the partitions of source topics, where each created task is assigned with a group of source topic partitions. - The default implementation provided by Kafka Streams is DefaultPartitionGrouper. - It assigns each task with one partition for each of the source topic partitions. The generated number of tasks equals the largest - number of partitions among the input topics. Usually an application does not need to customize the partition grouper. -
    -
    -

    probing.rebalance.interval.ms

    diff --git a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java index 7bd6f22f6ea1c..2d1847839a337 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/ClusterControlManager.java @@ -25,11 +25,15 @@ import org.apache.kafka.common.errors.StaleBrokerEpochException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.BrokerRegistrationRequestData; +import org.apache.kafka.common.message.ControllerRegistrationRequestData; import org.apache.kafka.common.metadata.BrokerRegistrationChangeRecord; import org.apache.kafka.common.metadata.FenceBrokerRecord; import org.apache.kafka.common.metadata.RegisterBrokerRecord; import org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpoint; import org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerFeature; +import org.apache.kafka.common.metadata.RegisterControllerRecord; +import org.apache.kafka.common.metadata.RegisterControllerRecord.ControllerEndpointCollection; +import org.apache.kafka.common.metadata.RegisterControllerRecord.ControllerFeatureCollection; import org.apache.kafka.common.metadata.UnfenceBrokerRecord; import org.apache.kafka.common.metadata.UnregisterBrokerRecord; import org.apache.kafka.common.protocol.ApiMessage; @@ -40,6 +44,7 @@ import org.apache.kafka.metadata.BrokerRegistrationFencingChange; import org.apache.kafka.metadata.BrokerRegistrationInControlledShutdownChange; import org.apache.kafka.metadata.BrokerRegistrationReply; +import org.apache.kafka.metadata.ControllerRegistration; import org.apache.kafka.metadata.FinalizedControllerFeatures; import org.apache.kafka.metadata.VersionRange; import org.apache.kafka.metadata.placement.ReplicaPlacer; @@ -51,6 +56,7 @@ import org.apache.kafka.timeline.TimelineHashMap; import org.slf4j.Logger; +import java.util.AbstractMap; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -241,6 +247,11 @@ boolean check() { */ private final boolean zkMigrationEnabled; + /** + * Maps controller IDs to controller registrations. + */ + private final TimelineHashMap controllerRegistrations; + private ClusterControlManager( LogContext logContext, String clusterId, @@ -263,6 +274,7 @@ private ClusterControlManager( this.readyBrokersFuture = Optional.empty(); this.featureControl = featureControl; this.zkMigrationEnabled = zkMigrationEnabled; + this.controllerRegistrations = new TimelineHashMap<>(snapshotRegistry, 0); } ReplicaPlacer replicaPlacer() { @@ -290,13 +302,6 @@ Map brokerRegistrations() { return brokerRegistrations; } - Map> brokerSupportedVersions() { - return brokerRegistrations() - .entrySet() - .stream() - .collect(Collectors.toMap(Entry::getKey, entry -> entry.getValue().supportedFeatures())); - } - Set fencedBrokerIds() { return brokerRegistrations.values() .stream() @@ -382,6 +387,37 @@ public ControllerResult registerBroker( return ControllerResult.atomicOf(records, new BrokerRegistrationReply(brokerEpoch)); } + ControllerResult registerController(ControllerRegistrationRequestData request) { + if (!featureControl.metadataVersion().isControllerRegistrationSupported()) { + throw new UnsupportedVersionException("The current MetadataVersion is too old to " + + "support controller registrations."); + } + ControllerEndpointCollection endpoints = new ControllerEndpointCollection(); + request.listeners().forEach(listener -> { + endpoints.add(new RegisterControllerRecord.ControllerEndpoint(). + setHost(listener.host()). + setName(listener.name()). + setPort(listener.port()). + setSecurityProtocol(listener.securityProtocol())); + }); + ControllerFeatureCollection features = new ControllerFeatureCollection(); + request.features().forEach(feature -> { + features.add(new RegisterControllerRecord.ControllerFeature(). + setName(feature.name()). + setMaxSupportedVersion(feature.maxSupportedVersion()). + setMinSupportedVersion(feature.minSupportedVersion())); + }); + List records = new ArrayList<>(); + records.add(new ApiMessageAndVersion(new RegisterControllerRecord(). + setControllerId(request.controllerId()). + setIncarnationId(request.incarnationId()). + setZkMigrationReady(request.zkMigrationReady()). + setEndPoints(endpoints). + setFeatures(features), + (short) 0)); + return ControllerResult.atomicOf(records, null); + } + BrokerFeature processRegistrationFeature( int brokerId, FinalizedControllerFeatures finalizedFeatures, @@ -541,6 +577,15 @@ private void replayRegistrationChange( } } + public void replay(RegisterControllerRecord record) { + ControllerRegistration newRegistration = new ControllerRegistration.Builder(record).build(); + ControllerRegistration prevRegistration = + controllerRegistrations.put(record.controllerId(), newRegistration); + log.info("Replayed RegisterControllerRecord contaning {}.{}", newRegistration, + prevRegistration == null ? "" : + " Previous incarnation was " + prevRegistration.incarnationId()); + } + Iterator usableBrokers() { if (heartbeatManager == null) { throw new RuntimeException("ClusterControlManager is not active."); @@ -615,4 +660,44 @@ public void addReadyBrokersFuture(CompletableFuture future, int minBrokers readyBrokersFuture = Optional.empty(); } } + + Iterator>> brokerSupportedFeatures() { + return new Iterator>>() { + private final Iterator iter = brokerRegistrations.values().iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public Entry> next() { + BrokerRegistration registration = iter.next(); + return new AbstractMap.SimpleImmutableEntry<>(registration.id(), + registration.supportedFeatures()); + } + }; + } + + Iterator>> controllerSupportedFeatures() { + if (!featureControl.metadataVersion().isControllerRegistrationSupported()) { + throw new UnsupportedVersionException("The current MetadataVersion is too old to " + + "support controller registrations."); + } + return new Iterator>>() { + private final Iterator iter = controllerRegistrations.values().iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public Entry> next() { + ControllerRegistration registration = iter.next(); + return new AbstractMap.SimpleImmutableEntry<>(registration.id(), + registration.supportedFeatures()); + } + }; + } } diff --git a/metadata/src/main/java/org/apache/kafka/controller/ClusterFeatureSupportDescriber.java b/metadata/src/main/java/org/apache/kafka/controller/ClusterFeatureSupportDescriber.java new file mode 100644 index 0000000000000..7091079a6b9c7 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/controller/ClusterFeatureSupportDescriber.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.controller; + +import java.util.Iterator; +import java.util.Map.Entry; +import java.util.Map; + +import org.apache.kafka.metadata.VersionRange; + + +public interface ClusterFeatureSupportDescriber { + Iterator>> brokerSupported(); + Iterator>> controllerSupported(); +} + diff --git a/metadata/src/main/java/org/apache/kafka/controller/Controller.java b/metadata/src/main/java/org/apache/kafka/controller/Controller.java index 9209dfb757117..b963ac75cf4ac 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/Controller.java +++ b/metadata/src/main/java/org/apache/kafka/controller/Controller.java @@ -30,6 +30,7 @@ import org.apache.kafka.common.message.AlterUserScramCredentialsResponseData; import org.apache.kafka.common.message.BrokerHeartbeatRequestData; import org.apache.kafka.common.message.BrokerRegistrationRequestData; +import org.apache.kafka.common.message.ControllerRegistrationRequestData; import org.apache.kafka.common.message.CreateDelegationTokenRequestData; import org.apache.kafka.common.message.CreateDelegationTokenResponseData; import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic; @@ -390,6 +391,19 @@ CompletableFuture> createPartitions( boolean validateOnly ); + /** + * Attempt to register the given controller. + * + * @param context The controller request context. + * @param request The registration request. + * + * @return A future yielding the broker registration reply. + */ + CompletableFuture registerController( + ControllerRequestContext context, + ControllerRegistrationRequestData request + ); + /** * Begin shutting down, but don't block. You must still call close to clean up all * resources. diff --git a/metadata/src/main/java/org/apache/kafka/controller/FeatureControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/FeatureControlManager.java index be0eb4fce22f5..4aacee9641e2c 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/FeatureControlManager.java +++ b/metadata/src/main/java/org/apache/kafka/controller/FeatureControlManager.java @@ -19,6 +19,8 @@ import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.Map; @@ -26,7 +28,6 @@ import java.util.TreeMap; import java.util.function.Consumer; -import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.admin.FeatureUpdate; import org.apache.kafka.common.metadata.FeatureLevelRecord; import org.apache.kafka.common.metadata.ZkMigrationStateRecord; @@ -49,13 +50,23 @@ public class FeatureControlManager { - public static class Builder { private LogContext logContext = null; private SnapshotRegistry snapshotRegistry = null; private QuorumFeatures quorumFeatures = null; private MetadataVersion metadataVersion = MetadataVersion.latest(); private MetadataVersion minimumBootstrapVersion = MetadataVersion.MINIMUM_BOOTSTRAP_VERSION; + private ClusterFeatureSupportDescriber clusterSupportDescriber = new ClusterFeatureSupportDescriber() { + @Override + public Iterator>> brokerSupported() { + return Collections.>emptyMap().entrySet().iterator(); + } + + @Override + public Iterator>> controllerSupported() { + return Collections.>emptyMap().entrySet().iterator(); + } + }; Builder setLogContext(LogContext logContext) { this.logContext = logContext; @@ -82,19 +93,28 @@ Builder setMinimumBootstrapVersion(MetadataVersion minimumBootstrapVersion) { return this; } + Builder setClusterFeatureSupportDescriber(ClusterFeatureSupportDescriber clusterSupportDescriber) { + this.clusterSupportDescriber = clusterSupportDescriber; + return this; + } + public FeatureControlManager build() { if (logContext == null) logContext = new LogContext(); if (snapshotRegistry == null) snapshotRegistry = new SnapshotRegistry(logContext); if (quorumFeatures == null) { - quorumFeatures = new QuorumFeatures(0, new ApiVersions(), QuorumFeatures.defaultFeatureMap(), - Collections.emptyList()); + Map localSupportedFeatures = new HashMap<>(); + localSupportedFeatures.put(MetadataVersion.FEATURE_NAME, VersionRange.of( + MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel(), + MetadataVersion.latest().featureLevel())); + quorumFeatures = new QuorumFeatures(0, localSupportedFeatures, Collections.singletonList(0)); } return new FeatureControlManager( logContext, quorumFeatures, snapshotRegistry, metadataVersion, - minimumBootstrapVersion + minimumBootstrapVersion, + clusterSupportDescriber ); } } @@ -126,12 +146,18 @@ public FeatureControlManager build() { */ private final MetadataVersion minimumBootstrapVersion; + /** + * Gives information about the supported versions in the cluster. + */ + private final ClusterFeatureSupportDescriber clusterSupportDescriber; + private FeatureControlManager( LogContext logContext, QuorumFeatures quorumFeatures, SnapshotRegistry snapshotRegistry, MetadataVersion metadataVersion, - MetadataVersion minimumBootstrapVersion + MetadataVersion minimumBootstrapVersion, + ClusterFeatureSupportDescriber clusterSupportDescriber ) { this.log = logContext.logger(FeatureControlManager.class); this.quorumFeatures = quorumFeatures; @@ -139,12 +165,12 @@ private FeatureControlManager( this.metadataVersion = new TimelineObject<>(snapshotRegistry, metadataVersion); this.minimumBootstrapVersion = minimumBootstrapVersion; this.migrationControlState = new TimelineObject<>(snapshotRegistry, ZkMigrationState.NONE); + this.clusterSupportDescriber = clusterSupportDescriber; } ControllerResult> updateFeatures( Map updates, Map upgradeTypes, - Map> brokerFeatures, boolean validateOnly ) { TreeMap results = new TreeMap<>(); @@ -152,7 +178,7 @@ ControllerResult> updateFeatures( BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP); for (Entry entry : updates.entrySet()) { results.put(entry.getKey(), updateFeature(entry.getKey(), entry.getValue(), - upgradeTypes.getOrDefault(entry.getKey(), FeatureUpdate.UpgradeType.UPGRADE), brokerFeatures, records)); + upgradeTypes.getOrDefault(entry.getKey(), FeatureUpdate.UpgradeType.UPGRADE), records)); } if (validateOnly) { @@ -174,7 +200,6 @@ private ApiError updateFeature( String featureName, short newVersion, FeatureUpdate.UpgradeType upgradeType, - Map> brokersAndFeatures, List records ) { if (upgradeType.equals(FeatureUpdate.UpgradeType.UNKNOWN)) { @@ -194,23 +219,11 @@ private ApiError updateFeature( "A feature version cannot be less than 0."); } - Optional reasonNotSupported = quorumFeatures.reasonNotSupported(featureName, newVersion); + Optional reasonNotSupported = reasonNotSupported(featureName, newVersion); if (reasonNotSupported.isPresent()) { return invalidUpdateVersion(featureName, newVersion, reasonNotSupported.get()); } - for (Entry> brokerEntry : brokersAndFeatures.entrySet()) { - VersionRange brokerRange = brokerEntry.getValue().get(featureName); - if (brokerRange == null) { - return invalidUpdateVersion(featureName, newVersion, - "Broker " + brokerEntry.getKey() + " does not support this feature."); - } else if (!brokerRange.contains(newVersion)) { - return invalidUpdateVersion(featureName, newVersion, - "Broker " + brokerEntry.getKey() + " does not support the given " + - "version. It supports " + brokerRange.min() + " to " + brokerRange.max() + "."); - } - } - if (newVersion < currentVersion) { if (upgradeType.equals(FeatureUpdate.UpgradeType.UPGRADE)) { return invalidUpdateVersion(featureName, newVersion, @@ -234,9 +247,64 @@ private ApiError updateFeature( } } + private Optional reasonNotSupported( + String featureName, + short newVersion + ) { + int numBrokersChecked = 0; + int numControllersChecked = 0; + Optional reason = quorumFeatures.reasonNotLocallySupported(featureName, newVersion); + if (reason.isPresent()) return reason; + numControllersChecked++; + for (Iterator>> iter = + clusterSupportDescriber.brokerSupported(); + iter.hasNext(); ) { + Entry> entry = iter.next(); + reason = QuorumFeatures.reasonNotSupported(newVersion, + "Broker " + entry.getKey(), + entry.getValue().getOrDefault(featureName, QuorumFeatures.DISABLED)); + if (reason.isPresent()) return reason; + numBrokersChecked++; + } + String registrationSuffix = ""; + HashSet foundControllers = new HashSet<>(); + foundControllers.add(quorumFeatures.nodeId()); + if (metadataVersion.get().isControllerRegistrationSupported()) { + for (Iterator>> iter = + clusterSupportDescriber.controllerSupported(); + iter.hasNext(); ) { + Entry> entry = iter.next(); + if (entry.getKey() == quorumFeatures.nodeId()) { + // No need to re-check the features supported by this controller, since we + // already checked that above. + continue; + } + reason = QuorumFeatures.reasonNotSupported(newVersion, + "Controller " + entry.getKey(), + entry.getValue().getOrDefault(featureName, QuorumFeatures.DISABLED)); + if (reason.isPresent()) return reason; + foundControllers.add(entry.getKey()); + numControllersChecked++; + } + for (int id : quorumFeatures.quorumNodeIds()) { + if (!foundControllers.contains(id)) { + return Optional.of("controller " + id + " has not registered, and may not " + + "support this feature"); + } + } + } else { + registrationSuffix = " Note: unable to verify controller support in the current " + + "MetadataVersion."; + } + log.info("Verified that {} broker(s) and {} controller(s) supported changing {} to " + + "feature level {}.{}", numBrokersChecked, numControllersChecked, featureName, + newVersion, registrationSuffix); + return Optional.empty(); + } + private ApiError invalidUpdateVersion(String feature, short version, String message) { String errorMessage = String.format("Invalid update version %d for feature %s. %s", version, feature, message); - log.debug(errorMessage); + log.warn(errorMessage); return new ApiError(Errors.INVALID_UPDATE_VERSION, errorMessage); } @@ -273,7 +341,7 @@ private ApiError updateMetadataVersion( // This is a downgrade boolean metadataChanged = MetadataVersion.checkIfMetadataChanged(currentVersion, newVersion); if (!metadataChanged) { - log.info("Downgrading metadata.version from {} to {}.", currentVersion, newVersion); + log.warn("Downgrading metadata.version from {} to {}.", currentVersion, newVersion); } else if (allowUnsafeDowngrade) { return invalidMetadataVersion(newVersionLevel, "Unsafe metadata downgrade is not supported " + "in this version."); @@ -283,7 +351,7 @@ private ApiError updateMetadataVersion( "UNSAFE_DOWNGRADE if you want to force the downgrade to proceed."); } } else { - log.info("Upgrading metadata.version from {} to {}.", currentVersion, newVersion); + log.warn("Upgrading metadata.version from {} to {}.", currentVersion, newVersion); } recordConsumer.accept(new ApiMessageAndVersion( @@ -296,7 +364,7 @@ private ApiError updateMetadataVersion( private ApiError invalidMetadataVersion(short version, String message) { String errorMessage = String.format("Invalid metadata.version %d. %s", version, message); - log.error(errorMessage); + log.warn(errorMessage); return new ApiError(Errors.INVALID_UPDATE_VERSION, errorMessage); } diff --git a/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java b/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java index 44784cfaa8c62..5b437cec754ac 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java +++ b/metadata/src/main/java/org/apache/kafka/controller/QuorumController.java @@ -39,6 +39,7 @@ import org.apache.kafka.common.message.AlterUserScramCredentialsResponseData; import org.apache.kafka.common.message.BrokerHeartbeatRequestData; import org.apache.kafka.common.message.BrokerRegistrationRequestData; +import org.apache.kafka.common.message.ControllerRegistrationRequestData; import org.apache.kafka.common.message.CreateDelegationTokenRequestData; import org.apache.kafka.common.message.CreateDelegationTokenResponseData; import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic; @@ -71,6 +72,7 @@ import org.apache.kafka.common.metadata.PartitionRecord; import org.apache.kafka.common.metadata.ProducerIdsRecord; import org.apache.kafka.common.metadata.RegisterBrokerRecord; +import org.apache.kafka.common.metadata.RegisterControllerRecord; import org.apache.kafka.common.metadata.RemoveAccessControlEntryRecord; import org.apache.kafka.common.metadata.RemoveDelegationTokenRecord; import org.apache.kafka.common.metadata.RemoveTopicRecord; @@ -95,6 +97,7 @@ import org.apache.kafka.metadata.BrokerRegistrationReply; import org.apache.kafka.metadata.FinalizedControllerFeatures; import org.apache.kafka.metadata.KafkaConfigSchema; +import org.apache.kafka.metadata.VersionRange; import org.apache.kafka.metadata.bootstrap.BootstrapMetadata; import org.apache.kafka.metadata.migration.ZkMigrationState; import org.apache.kafka.metadata.migration.ZkRecordConsumer; @@ -130,6 +133,7 @@ import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.Map; @@ -147,7 +151,6 @@ import static java.util.concurrent.TimeUnit.MICROSECONDS; import static java.util.concurrent.TimeUnit.NANOSECONDS; import static org.apache.kafka.controller.QuorumController.ControllerOperationFlag.DOES_NOT_UPDATE_QUEUE_TIME; -import static org.apache.kafka.controller.QuorumController.ControllerOperationFlag.COMPLETES_IN_TRANSACTION; import static org.apache.kafka.controller.QuorumController.ControllerOperationFlag.RUNS_IN_PREMIGRATION; @@ -469,6 +472,18 @@ public void accept(ConfigResource configResource) { } } + class QuorumClusterFeatureSupportDescriber implements ClusterFeatureSupportDescriber { + @Override + public Iterator>> brokerSupported() { + return clusterControl.brokerSupportedFeatures(); + } + + @Override + public Iterator>> controllerSupported() { + return clusterControl.controllerSupportedFeatures(); + } + } + public static final String CONTROLLER_THREAD_SUFFIX = "QuorumControllerEventHandler"; private OptionalInt latestController() { @@ -668,21 +683,16 @@ enum ControllerOperationFlag { * Operations without this flag will always return NOT_CONTROLLER when invoked in premigration * mode. *

    - * In pre-migration mode, we are still waiting to load the metadata from Apache - * ZooKeeper into the metadata log. Therefore, the metadata log is mostly empty, - * even though the cluster really does have metadata. Very few operations should - * use this flag. - */ - RUNS_IN_PREMIGRATION, - - /** - * This flag signifies that an event will be completed even if it is part of an unfinished transaction. - * This is needed for metadata transactions so that external callers can add records to a transaction - * and still use the returned future. One example usage of this flag is the batches of migrations records. - * The migration driver needs to wait on each submitted batch to avoid overwhelming the controller queue - * with events, so it needs events to be completed based on the committed (i.e., not stable) offset. + * In pre-migration mode, we are still waiting to load the metadata from Apache ZooKeeper into + * the metadata log. Therefore, the metadata log is mostly empty, even though the cluster really + * does have metadata + *

    + * Events using this flag will be completed even if a transaction is ongoing. Pre-migration + * events will be completed using the unstable (committed) offset rather than the stable offset. + *

    + * In practice, very few operations should use this flag. */ - COMPLETES_IN_TRANSACTION + RUNS_IN_PREMIGRATION } interface ControllerWriteOperation { @@ -763,7 +773,14 @@ public void run() throws Exception { // If the operation did not return any records, then it was actually just // a read after all, and not a read + write. However, this read was done // from the latest in-memory state, which might contain uncommitted data. - OptionalLong maybeOffset = deferredEventQueue.highestPendingOffset(); + // If the operation can complete within a transaction, let it use the + // unstable purgatory so that it can complete sooner. + OptionalLong maybeOffset; + if (featureControl.inPreMigrationMode() && flags.contains(RUNS_IN_PREMIGRATION)) { + maybeOffset = deferredUnstableEventQueue.highestPendingOffset(); + } else { + maybeOffset = deferredEventQueue.highestPendingOffset(); + } if (!maybeOffset.isPresent()) { // If the purgatory is empty, there are no pending operations and no // uncommitted state. We can complete immediately. @@ -825,7 +842,7 @@ public Long apply(List records) { // Remember the latest offset and future if it is not already completed if (!future.isDone()) { - if (flags.contains(COMPLETES_IN_TRANSACTION)) { + if (featureControl.inPreMigrationMode() && flags.contains(RUNS_IN_PREMIGRATION)) { deferredUnstableEventQueue.add(resultAndOffset.offset(), this); } else { deferredEventQueue.add(resultAndOffset.offset(), this); @@ -945,9 +962,7 @@ CompletableFuture appendWriteEvent( } class MigrationRecordConsumer implements ZkRecordConsumer { - private final EnumSet eventFlags = EnumSet.of( - RUNS_IN_PREMIGRATION, COMPLETES_IN_TRANSACTION - ); + private final EnumSet eventFlags = EnumSet.of(RUNS_IN_PREMIGRATION); private volatile OffsetAndEpoch highestMigrationRecordOffset; @@ -1297,7 +1312,7 @@ private void rescheduleMaybeFenceStaleBrokers() { rescheduleMaybeFenceStaleBrokers(); return result; }, - EnumSet.of(DOES_NOT_UPDATE_QUEUE_TIME, RUNS_IN_PREMIGRATION)); + EnumSet.of(DOES_NOT_UPDATE_QUEUE_TIME)); } private void cancelMaybeFenceReplicas() { @@ -1404,7 +1419,7 @@ private void maybeScheduleNextExpiredDelegationTokenSweep() { delegationTokenControlManager.isEnabled()) { log.debug( - "Scheduling write event for {} because DelegationTokens are enabled.", + "Scheduling write event for {} because DelegationTokens are enabled.", SWEEP_EXPIRED_DELEGATION_TOKENS ); @@ -1419,7 +1434,7 @@ private void maybeScheduleNextExpiredDelegationTokenSweep() { EnumSet.of(DOES_NOT_UPDATE_QUEUE_TIME) ); - long delayNs = time.nanoseconds() + + long delayNs = time.nanoseconds() + NANOSECONDS.convert(delegationTokenExpiryCheckIntervalMs, TimeUnit.MILLISECONDS); queue.scheduleDeferred(SWEEP_EXPIRED_DELEGATION_TOKENS, new EarliestDeadlineFunction(delayNs), event); @@ -1532,6 +1547,9 @@ private void replay(ApiMessage message, Optional snapshotId, lon case ABORT_TRANSACTION_RECORD: offsetControl.replay((AbortTransactionRecord) message, offset); break; + case REGISTER_CONTROLLER_RECORD: + clusterControl.replay((RegisterControllerRecord) message); + break; default: throw new RuntimeException("Unhandled record type " + type); } @@ -1618,6 +1636,11 @@ private void replay(ApiMessage message, Optional snapshotId, lon */ private final ClientQuotaControlManager clientQuotaControlManager; + /** + * Describes the feature versions in the cluster. + */ + private final QuorumClusterFeatureSupportDescriber clusterSupportDescriber; + /** * An object which stores the controller's view of the cluster. * This must be accessed only by the event queue thread. @@ -1796,6 +1819,7 @@ private QuorumController( setLogContext(logContext). setSnapshotRegistry(snapshotRegistry). build(); + this.clusterSupportDescriber = new QuorumClusterFeatureSupportDescriber(); this.featureControl = new FeatureControlManager.Builder(). setLogContext(logContext). setQuorumFeatures(quorumFeatures). @@ -1806,6 +1830,7 @@ private QuorumController( // are all treated as 3.0IV1. In newer versions the metadata.version will be specified // by the log. setMetadataVersion(MetadataVersion.MINIMUM_KRAFT_VERSION). + setClusterFeatureSupportDescriber(clusterSupportDescriber). build(); this.clusterControl = new ClusterControlManager.Builder(). setLogContext(logContext). @@ -2184,8 +2209,7 @@ public CompletableFuture updateFeatures( upgradeTypes.put(featureName, FeatureUpdate.UpgradeType.fromCode(featureUpdate.upgradeType())); updates.put(featureName, featureUpdate.maxVersionLevel()); }); - return featureControl.updateFeatures(updates, upgradeTypes, clusterControl.brokerSupportedVersions(), - request.validateOnly()); + return featureControl.updateFeatures(updates, upgradeTypes, request.validateOnly()); }).thenApply(result -> { UpdateFeaturesResponseData responseData = new UpdateFeaturesResponseData(); responseData.setResults(new UpdateFeaturesResponseData.UpdatableFeatureResultCollection(result.size())); @@ -2221,6 +2245,16 @@ public CompletableFuture> createPartitions( }); } + @Override + public CompletableFuture registerController( + ControllerRequestContext context, + ControllerRegistrationRequestData request + ) { + return appendWriteEvent("registerController", context.deadlineNs(), + () -> clusterControl.registerController(request), + EnumSet.of(RUNS_IN_PREMIGRATION)); + } + @Override public CompletableFuture> createAcls( ControllerRequestContext context, diff --git a/metadata/src/main/java/org/apache/kafka/controller/QuorumFeatures.java b/metadata/src/main/java/org/apache/kafka/controller/QuorumFeatures.java index 19431d2a06d75..b6766ac7e6f4c 100644 --- a/metadata/src/main/java/org/apache/kafka/controller/QuorumFeatures.java +++ b/metadata/src/main/java/org/apache/kafka/controller/QuorumFeatures.java @@ -17,141 +17,137 @@ package org.apache.kafka.controller; -import org.apache.kafka.clients.ApiVersions; -import org.apache.kafka.clients.NodeApiVersions; -import org.apache.kafka.common.Node; -import org.apache.kafka.common.feature.SupportedVersionRange; +import org.apache.kafka.metadata.ControllerRegistration; import org.apache.kafka.metadata.VersionRange; import org.apache.kafka.server.common.MetadataVersion; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; -import java.util.stream.Collectors; /** - * A holder class of the local node's supported feature flags as well as the ApiVersions of other nodes. + * A holder class of the local node's supported feature flags as well as the quorum node IDs. */ -public class QuorumFeatures { - private static final VersionRange DISABLED = VersionRange.of(0, 0); - - private static final Logger log = LoggerFactory.getLogger(QuorumFeatures.class); +public final class QuorumFeatures { + public static final VersionRange DISABLED = VersionRange.of(0, 0); private final int nodeId; - private final ApiVersions apiVersions; private final Map localSupportedFeatures; private final List quorumNodeIds; - QuorumFeatures( + static public Optional reasonNotSupported( + short newVersion, + String what, + VersionRange range + ) { + if (!range.contains(newVersion)) { + if (range.max() == (short) 0) { + return Optional.of(what + " does not support this feature."); + } else { + return Optional.of(what + " only supports versions " + range); + } + } + return Optional.empty(); + } + + public static Map defaultFeatureMap() { + Map features = new HashMap<>(1); + features.put(MetadataVersion.FEATURE_NAME, VersionRange.of( + MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel(), + MetadataVersion.latest().featureLevel())); + return features; + } + + public QuorumFeatures( int nodeId, - ApiVersions apiVersions, Map localSupportedFeatures, List quorumNodeIds ) { this.nodeId = nodeId; - this.apiVersions = apiVersions; this.localSupportedFeatures = Collections.unmodifiableMap(localSupportedFeatures); this.quorumNodeIds = Collections.unmodifiableList(quorumNodeIds); } - public static QuorumFeatures create( - int nodeId, - ApiVersions apiVersions, - Map localSupportedFeatures, - Collection quorumNodes - ) { - List nodeIds = quorumNodes.stream().map(Node::id).collect(Collectors.toList()); - return new QuorumFeatures(nodeId, apiVersions, localSupportedFeatures, nodeIds); + public int nodeId() { + return nodeId; } - public static Map defaultFeatureMap() { - Map features = new HashMap<>(1); - features.put(MetadataVersion.FEATURE_NAME, VersionRange.of( - MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel(), - MetadataVersion.latest().featureLevel())); - return features; + public Map localSupportedFeatures() { + return localSupportedFeatures; } - /** - * Return the reason a specific feature level is not supported, or Optional.empty if it is supported. - * - * @param featureName The feature name. - * @param level The feature level. - * @return The reason why the feature level is not supported, or Optional.empty if it is supported. - */ - public Optional reasonNotSupported(String featureName, short level) { - VersionRange localRange = localSupportedFeatures.getOrDefault(featureName, DISABLED); - if (!localRange.contains(level)) { - if (localRange.equals(DISABLED)) { - return Optional.of("Local controller " + nodeId + " does not support this feature."); - } else { - return Optional.of("Local controller " + nodeId + " only supports versions " + localRange); - } - } - List missing = new ArrayList<>(); - for (int id : quorumNodeIds) { - if (nodeId == id) { - continue; // We get the local node's features from localSupportedFeatures. - } - NodeApiVersions nodeVersions = apiVersions.get(Integer.toString(id)); - if (nodeVersions == null) { - missing.add(Integer.toString(id)); - continue; - } - SupportedVersionRange supportedRange = nodeVersions.supportedFeatures().get(featureName); - VersionRange range = supportedRange == null ? DISABLED : - VersionRange.of(supportedRange.min(), supportedRange.max()); - if (!range.contains(level)) { - if (range.equals(DISABLED)) { - return Optional.of("Controller " + id + " does not support this feature."); - } else { - return Optional.of("Controller " + id + " only supports versions " + range); - } - } - } - if (!missing.isEmpty()) { - log.info("Unable to get feature level information for controller(s): " + String.join(", ", missing)); - } - return Optional.empty(); + public List quorumNodeIds() { + return quorumNodeIds; } - VersionRange localSupportedFeature(String featureName) { - return localSupportedFeatures.getOrDefault(featureName, DISABLED); + public VersionRange localSupportedFeature(String name) { + return localSupportedFeatures.getOrDefault(name, DISABLED); } - boolean isControllerId(int nodeId) { + public boolean isControllerId(int nodeId) { return quorumNodeIds.contains(nodeId); } - // check if all controller nodes are ZK Migration ready - public Optional reasonAllControllersZkMigrationNotReady() { - List missingApiVers = new ArrayList<>(); - List zkMigrationNotReady = new ArrayList<>(); - for (int id : quorumNodeIds) { - if (nodeId == id) { - continue; // No need to check local node because the KraftMigrationDriver will be created only when migration config set - } - NodeApiVersions nodeVersions = apiVersions.get(Integer.toString(id)); - if (nodeVersions == null) { - missingApiVers.add(String.valueOf(id)); - } else if (!nodeVersions.zkMigrationEnabled()) { - zkMigrationNotReady.add(String.valueOf(id)); + public Optional reasonNotLocallySupported( + String featureName, + short newVersion + ) { + return reasonNotSupported(newVersion, + "Local controller " + nodeId, + localSupportedFeature(featureName)); + } + + public Optional reasonAllControllersZkMigrationNotReady( + MetadataVersion metadataVersion, + Map controllers + ) { + if (!metadataVersion.isMigrationSupported()) { + return Optional.of("Metadata version too low at " + metadataVersion); + } else if (!metadataVersion.isControllerRegistrationSupported()) { + return Optional.empty(); + } + for (int quorumNodeId : quorumNodeIds) { + ControllerRegistration registration = controllers.get(quorumNodeId); + if (registration == null) { + return Optional.of("No registration found for controller " + quorumNodeId); + } else if (!registration.zkMigrationReady()) { + return Optional.of("Controller " + quorumNodeId + " has not enabled " + + "zookeeper.metadata.migration.enable"); } } + return Optional.empty(); + } - boolean isReady = missingApiVers.isEmpty() && zkMigrationNotReady.isEmpty(); - if (!isReady) { - String zkMigrationNotReadyMsg = zkMigrationNotReady.isEmpty() ? "" : "Nodes don't enable `zookeeper.metadata.migration.enable`: " + zkMigrationNotReady + "."; - String missingApiVersionMsg = missingApiVers.isEmpty() ? "" : " Missing apiVersion from nodes: " + missingApiVers; - return Optional.of(zkMigrationNotReadyMsg + missingApiVersionMsg); - } + @Override + public int hashCode() { + return Objects.hash(nodeId, localSupportedFeatures, quorumNodeIds); + } - return Optional.empty(); + @Override + public boolean equals(Object o) { + if (o == null || !(o.getClass().equals(QuorumFeatures.class))) return false; + QuorumFeatures other = (QuorumFeatures) o; + return nodeId == other.nodeId && + localSupportedFeatures.equals(other.localSupportedFeatures) && + quorumNodeIds.equals(other.quorumNodeIds); + } + + @Override + public String toString() { + List features = new ArrayList<>(); + localSupportedFeatures.entrySet().forEach(f -> features.add(f.getKey() + ": " + f.getValue())); + features.sort(String::compareTo); + List nodeIds = new ArrayList<>(); + quorumNodeIds.forEach(id -> nodeIds.add("" + id)); + nodeIds.sort(String::compareTo); + return "QuorumFeatures" + + "(nodeId=" + nodeId + + ", localSupportedFeatures={" + features + "}" + + ", quorumNodeIds=[" + nodeIds + "]" + + ")"; } } diff --git a/metadata/src/main/java/org/apache/kafka/image/ClusterDelta.java b/metadata/src/main/java/org/apache/kafka/image/ClusterDelta.java index 39d6fdb3d744c..d87f8d898a797 100644 --- a/metadata/src/main/java/org/apache/kafka/image/ClusterDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/ClusterDelta.java @@ -20,11 +20,13 @@ import org.apache.kafka.common.metadata.BrokerRegistrationChangeRecord; import org.apache.kafka.common.metadata.FenceBrokerRecord; import org.apache.kafka.common.metadata.RegisterBrokerRecord; +import org.apache.kafka.common.metadata.RegisterControllerRecord; import org.apache.kafka.common.metadata.UnfenceBrokerRecord; import org.apache.kafka.common.metadata.UnregisterBrokerRecord; import org.apache.kafka.metadata.BrokerRegistration; import org.apache.kafka.metadata.BrokerRegistrationFencingChange; import org.apache.kafka.metadata.BrokerRegistrationInControlledShutdownChange; +import org.apache.kafka.metadata.ControllerRegistration; import org.apache.kafka.server.common.MetadataVersion; import java.util.HashMap; @@ -39,6 +41,7 @@ public final class ClusterDelta { private final ClusterImage image; private final HashMap> changedBrokers = new HashMap<>(); + private final HashMap> changedControllers = new HashMap<>(); public ClusterDelta(ClusterImage image) { this.image = image; @@ -48,6 +51,10 @@ public HashMap> changedBrokers() { return changedBrokers; } + public HashMap> changedControllers() { + return changedControllers; + } + public BrokerRegistration broker(int nodeId) { Optional result = changedBrokers.get(nodeId); if (result != null) { @@ -62,6 +69,11 @@ public void finishSnapshot() { changedBrokers.put(brokerId, Optional.empty()); } } + for (Integer controllerId : image.controllers().keySet()) { + if (!changedControllers.containsKey(controllerId)) { + changedControllers.put(controllerId, Optional.empty()); + } + } } public void handleMetadataVersionChange(MetadataVersion newVersion) { @@ -77,6 +89,11 @@ public void replay(UnregisterBrokerRecord record) { changedBrokers.put(record.brokerId(), Optional.empty()); } + public void replay(RegisterControllerRecord record) { + ControllerRegistration controller = new ControllerRegistration.Builder(record).build(); + changedControllers.put(controller.id(), Optional.of(controller)); + } + private BrokerRegistration getBrokerOrThrow(int brokerId, long epoch, String action) { BrokerRegistration broker = broker(brokerId); if (broker == null) { @@ -147,13 +164,33 @@ public ClusterImage apply() { } } } - return new ClusterImage(newBrokers); + Map newControllers = new HashMap<>(image.controllers().size()); + for (Entry entry : image.controllers().entrySet()) { + int nodeId = entry.getKey(); + Optional change = changedControllers.get(nodeId); + if (change == null) { + newControllers.put(nodeId, entry.getValue()); + } else if (change.isPresent()) { + newControllers.put(nodeId, change.get()); + } + } + for (Entry> entry : changedControllers.entrySet()) { + int nodeId = entry.getKey(); + Optional controllerRegistration = entry.getValue(); + if (!newControllers.containsKey(nodeId)) { + if (controllerRegistration.isPresent()) { + newControllers.put(nodeId, controllerRegistration.get()); + } + } + } + return new ClusterImage(newBrokers, newControllers); } @Override public String toString() { return "ClusterDelta(" + "changedBrokers=" + changedBrokers + + ", changedControllers=" + changedControllers + ')'; } } diff --git a/metadata/src/main/java/org/apache/kafka/image/ClusterImage.java b/metadata/src/main/java/org/apache/kafka/image/ClusterImage.java index 253bd193ffe36..3ffe16778c34d 100644 --- a/metadata/src/main/java/org/apache/kafka/image/ClusterImage.java +++ b/metadata/src/main/java/org/apache/kafka/image/ClusterImage.java @@ -21,6 +21,7 @@ import org.apache.kafka.image.writer.ImageWriter; import org.apache.kafka.image.writer.ImageWriterOptions; import org.apache.kafka.metadata.BrokerRegistration; +import org.apache.kafka.metadata.ControllerRegistration; import java.util.Collections; import java.util.Map; @@ -32,12 +33,20 @@ * This class is thread-safe. */ public final class ClusterImage { - public static final ClusterImage EMPTY = new ClusterImage(Collections.emptyMap()); + public static final ClusterImage EMPTY = new ClusterImage( + Collections.emptyMap(), + Collections.emptyMap()); private final Map brokers; - public ClusterImage(Map brokers) { + private final Map controllers; + + public ClusterImage( + Map brokers, + Map controllers + ) { this.brokers = Collections.unmodifiableMap(brokers); + this.controllers = Collections.unmodifiableMap(controllers); } public boolean isEmpty() { @@ -52,6 +61,10 @@ public BrokerRegistration broker(int nodeId) { return brokers.get(nodeId); } + public Map controllers() { + return controllers; + } + public boolean containsBroker(int brokerId) { return brokers.containsKey(brokerId); } diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java index 6c3927bdc7d0c..b4120ad8595c9 100644 --- a/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java +++ b/metadata/src/main/java/org/apache/kafka/image/MetadataDelta.java @@ -29,6 +29,7 @@ import org.apache.kafka.common.metadata.PartitionRecord; import org.apache.kafka.common.metadata.ProducerIdsRecord; import org.apache.kafka.common.metadata.RegisterBrokerRecord; +import org.apache.kafka.common.metadata.RegisterControllerRecord; import org.apache.kafka.common.metadata.RemoveAccessControlEntryRecord; import org.apache.kafka.common.metadata.RemoveDelegationTokenRecord; import org.apache.kafka.common.metadata.RemoveTopicRecord; @@ -248,6 +249,9 @@ public void replay(ApiMessage record) { case ZK_MIGRATION_STATE_RECORD: replay((ZkMigrationStateRecord) record); break; + case REGISTER_CONTROLLER_RECORD: + replay((RegisterControllerRecord) record); + break; default: throw new RuntimeException("Unknown metadata record type " + type); } @@ -345,6 +349,10 @@ public void replay(ZkMigrationStateRecord record) { getOrCreateFeaturesDelta().replay(record); } + public void replay(RegisterControllerRecord record) { + getOrCreateClusterDelta().replay(record); + } + /** * Create removal deltas for anything which was in the base image, but which was not * referenced in the snapshot records we just applied. diff --git a/metadata/src/main/java/org/apache/kafka/image/node/ClusterImageBrokersNode.java b/metadata/src/main/java/org/apache/kafka/image/node/ClusterImageBrokersNode.java new file mode 100644 index 0000000000000..d0b2d7872d7b6 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/image/node/ClusterImageBrokersNode.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image.node; + +import org.apache.kafka.image.ClusterImage; +import org.apache.kafka.metadata.BrokerRegistration; + +import java.util.ArrayList; +import java.util.Collection; + + +public class ClusterImageBrokersNode implements MetadataNode { + /** + * The name of this node. + */ + public final static String NAME = "brokers"; + + /** + * The cluster image. + */ + private final ClusterImage image; + + public ClusterImageBrokersNode(ClusterImage image) { + this.image = image; + } + + @Override + public Collection childNames() { + ArrayList childNames = new ArrayList<>(); + for (Integer brokerId : image.brokers().keySet()) { + childNames.add(brokerId.toString()); + } + return childNames; + } + + @Override + public MetadataNode child(String name) { + try { + Integer brokerId = Integer.valueOf(name); + BrokerRegistration registration = image.brokers().get(brokerId); + if (registration == null) return null; + return new MetadataLeafNode(registration.toString()); + } catch (NumberFormatException e) { + return null; + } + } +} diff --git a/metadata/src/main/java/org/apache/kafka/image/node/ClusterImageControllersNode.java b/metadata/src/main/java/org/apache/kafka/image/node/ClusterImageControllersNode.java new file mode 100644 index 0000000000000..02bc281344fe2 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/image/node/ClusterImageControllersNode.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image.node; + +import org.apache.kafka.image.ClusterImage; +import org.apache.kafka.metadata.ControllerRegistration; + +import java.util.ArrayList; +import java.util.Collection; + + +public class ClusterImageControllersNode implements MetadataNode { + /** + * The name of this node. + */ + public final static String NAME = "controllers"; + + /** + * The cluster image. + */ + private final ClusterImage image; + + public ClusterImageControllersNode(ClusterImage image) { + this.image = image; + } + + @Override + public Collection childNames() { + ArrayList childNames = new ArrayList<>(); + for (Integer brokerId : image.controllers().keySet()) { + childNames.add(brokerId.toString()); + } + return childNames; + } + + @Override + public MetadataNode child(String name) { + try { + Integer brokerId = Integer.valueOf(name); + ControllerRegistration registration = image.controllers().get(brokerId); + if (registration == null) return null; + return new MetadataLeafNode(registration.toString()); + } catch (NumberFormatException e) { + return null; + } + } +} diff --git a/metadata/src/main/java/org/apache/kafka/image/node/ClusterImageNode.java b/metadata/src/main/java/org/apache/kafka/image/node/ClusterImageNode.java index 5788160a0ce3a..778378477ad52 100644 --- a/metadata/src/main/java/org/apache/kafka/image/node/ClusterImageNode.java +++ b/metadata/src/main/java/org/apache/kafka/image/node/ClusterImageNode.java @@ -18,9 +18,8 @@ package org.apache.kafka.image.node; import org.apache.kafka.image.ClusterImage; -import org.apache.kafka.metadata.BrokerRegistration; -import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; @@ -41,21 +40,16 @@ public ClusterImageNode(ClusterImage image) { @Override public Collection childNames() { - ArrayList childNames = new ArrayList<>(); - for (Integer brokerId : image.brokers().keySet()) { - childNames.add(brokerId.toString()); - } - return childNames; + return Arrays.asList(ClusterImageBrokersNode.NAME, ClusterImageControllersNode.NAME); } @Override public MetadataNode child(String name) { - try { - Integer brokerId = Integer.valueOf(name); - BrokerRegistration registration = image.brokers().get(brokerId); - if (registration == null) return null; - return new MetadataLeafNode(registration.toString()); - } catch (NumberFormatException e) { + if (name.equals(ClusterImageBrokersNode.NAME)) { + return new ClusterImageBrokersNode(image); + } else if (name.equals(ClusterImageControllersNode.NAME)) { + return new ClusterImageControllersNode(image); + } else { return null; } } diff --git a/metadata/src/main/java/org/apache/kafka/image/publisher/ControllerRegistrationsPublisher.java b/metadata/src/main/java/org/apache/kafka/image/publisher/ControllerRegistrationsPublisher.java new file mode 100644 index 0000000000000..d04d308d063c5 --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/image/publisher/ControllerRegistrationsPublisher.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image.publisher; + +import org.apache.kafka.common.Endpoint; +import org.apache.kafka.common.message.DescribeClusterResponseData.DescribeClusterBroker; +import org.apache.kafka.common.message.DescribeClusterResponseData.DescribeClusterBrokerCollection; +import org.apache.kafka.image.MetadataDelta; +import org.apache.kafka.image.MetadataImage; +import org.apache.kafka.image.loader.LoaderManifest; +import org.apache.kafka.image.loader.LoaderManifestType; +import org.apache.kafka.metadata.ControllerRegistration; + +import java.util.Collections; +import java.util.Map; + + +/** + * A publisher to track controller registrations. + */ +public class ControllerRegistrationsPublisher implements MetadataPublisher { + private volatile Map controllers; + + public ControllerRegistrationsPublisher() { + this.controllers = Collections.emptyMap(); + } + + @Override + public String name() { + return "ControllerRegistrationsPublisher"; + } + + @Override + public void onMetadataUpdate( + MetadataDelta delta, + MetadataImage newImage, + LoaderManifest manifest + ) { + if (manifest.type() == LoaderManifestType.LOG_DELTA || delta.clusterDelta() != null) { + controllers = newImage.cluster().controllers(); + } + } + + public DescribeClusterBrokerCollection describeClusterControllers( + String endpointName + ) { + DescribeClusterBrokerCollection results = new DescribeClusterBrokerCollection(); + for (ControllerRegistration registration : controllers.values()) { + Endpoint endpoint = registration.listeners().get(endpointName); + if (endpoint != null) { + results.add(new DescribeClusterBroker(). + setBrokerId(registration.id()). + setHost(endpoint.host()). + setPort(endpoint.port()). + setRack(null)); + } + } + return results; + } + + public Map controllers() { + return controllers; + } + + @Override + public void close() { + } +} diff --git a/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java b/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java new file mode 100644 index 0000000000000..f79515b8a106f --- /dev/null +++ b/metadata/src/main/java/org/apache/kafka/metadata/ControllerRegistration.java @@ -0,0 +1,233 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.metadata; + +import org.apache.kafka.common.Endpoint; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.metadata.RegisterControllerRecord; +import org.apache.kafka.common.metadata.RegisterControllerRecord.ControllerEndpoint; +import org.apache.kafka.common.metadata.RegisterControllerRecord.ControllerFeature; +import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.apache.kafka.image.writer.ImageWriterOptions; +import org.apache.kafka.server.common.ApiMessageAndVersion; +import org.apache.kafka.server.common.MetadataVersion; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * An immutable class which represents controller registrations. + */ +public class ControllerRegistration { + public static class Builder { + private int id = 0; + private Uuid incarnationId = null; + private boolean zkMigrationReady = false; + private Map listeners = null; + private Map supportedFeatures = null; + + public Builder() { + this.id = 0; + this.incarnationId = null; + this.zkMigrationReady = false; + this.listeners = null; + this.supportedFeatures = null; + } + + public Builder(RegisterControllerRecord record) { + this.id = record.controllerId(); + this.incarnationId = record.incarnationId(); + this.zkMigrationReady = record.zkMigrationReady(); + Map newListeners = new HashMap<>(); + record.endPoints().forEach(endPoint -> { + SecurityProtocol protocol = SecurityProtocol.forId(endPoint.securityProtocol()); + if (protocol == null) { + throw new RuntimeException("Unknown security protocol " + + (int) endPoint.securityProtocol()); + } + newListeners.put(endPoint.name(), new Endpoint(endPoint.name(), + protocol, + endPoint.host(), + endPoint.port())); + }); + this.listeners = Collections.unmodifiableMap(newListeners); + Map newSupportedFeatures = new HashMap<>(); + record.features().forEach(feature -> { + newSupportedFeatures.put(feature.name(), VersionRange.of( + feature.minSupportedVersion(), feature.maxSupportedVersion())); + }); + this.supportedFeatures = Collections.unmodifiableMap(newSupportedFeatures); + } + + public Builder setId(int id) { + this.id = id; + return this; + } + + public Builder setIncarnationId(Uuid incarnationId) { + this.incarnationId = incarnationId; + return this; + } + + public Builder setZkMigrationReady(boolean zkMigrationReady) { + this.zkMigrationReady = zkMigrationReady; + return this; + } + + public Builder setListeners(Map listeners) { + this.listeners = listeners; + return this; + } + + public Builder setSupportedFeatures(Map supportedFeatures) { + this.supportedFeatures = supportedFeatures; + return this; + } + + public ControllerRegistration build() { + if (incarnationId == null) throw new RuntimeException("You must set incarnationId."); + if (listeners == null) throw new RuntimeException("You must set listeners."); + if (supportedFeatures == null) { + supportedFeatures = new HashMap<>(); + supportedFeatures.put(MetadataVersion.FEATURE_NAME, VersionRange.of( + MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel(), + MetadataVersion.latest().featureLevel())); + } + return new ControllerRegistration(id, + incarnationId, + zkMigrationReady, + listeners, + supportedFeatures); + } + } + + private final int id; + private final Uuid incarnationId; + private final boolean zkMigrationReady; + private final Map listeners; + private final Map supportedFeatures; + + private ControllerRegistration(int id, + Uuid incarnationId, + boolean zkMigrationReady, + Map listeners, + Map supportedFeatures + ) { + this.id = id; + this.incarnationId = incarnationId; + this.zkMigrationReady = zkMigrationReady; + this.listeners = listeners; + this.supportedFeatures = supportedFeatures; + } + + public int id() { + return id; + } + + public Uuid incarnationId() { + return incarnationId; + } + + public boolean zkMigrationReady() { + return zkMigrationReady; + } + + public Map listeners() { + return listeners; + } + + public Optional node(String listenerName) { + Endpoint endpoint = listeners().get(listenerName); + if (endpoint == null) { + return Optional.empty(); + } + return Optional.of(new Node(id, endpoint.host(), endpoint.port(), null)); + } + + public Map supportedFeatures() { + return supportedFeatures; + } + + public ApiMessageAndVersion toRecord(ImageWriterOptions options) { + RegisterControllerRecord registrationRecord = new RegisterControllerRecord(). + setControllerId(id). + setIncarnationId(incarnationId). + setZkMigrationReady(zkMigrationReady); + for (Entry entry : listeners.entrySet()) { + Endpoint endpoint = entry.getValue(); + registrationRecord.endPoints().add(new ControllerEndpoint(). + setName(entry.getKey()). + setHost(endpoint.host()). + setPort(endpoint.port()). + setSecurityProtocol(endpoint.securityProtocol().id)); + } + for (Entry entry : supportedFeatures.entrySet()) { + registrationRecord.features().add(new ControllerFeature(). + setName(entry.getKey()). + setMinSupportedVersion(entry.getValue().min()). + setMaxSupportedVersion(entry.getValue().max())); + } + return new ApiMessageAndVersion(registrationRecord, + options.metadataVersion().registerBrokerRecordVersion()); + } + + @Override + public int hashCode() { + return Objects.hash(id, + incarnationId, + zkMigrationReady, + listeners, + supportedFeatures); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ControllerRegistration)) return false; + ControllerRegistration other = (ControllerRegistration) o; + return other.id == id && + other.incarnationId.equals(incarnationId) && + other.zkMigrationReady == zkMigrationReady && + other.listeners.equals(listeners) && + other.supportedFeatures.equals(supportedFeatures); + } + + @Override + public String toString() { + StringBuilder bld = new StringBuilder(); + bld.append("ControllerRegistration(id=").append(id); + bld.append(", incarnationId=").append(incarnationId); + bld.append(", zkMigrationReady=").append(zkMigrationReady); + bld.append(", listeners=[").append( + listeners.keySet().stream().sorted(). + map(n -> listeners.get(n).toString()). + collect(Collectors.joining(", "))); + bld.append("], supportedFeatures={").append( + supportedFeatures.keySet().stream().sorted(). + map(k -> k + ": " + supportedFeatures.get(k)). + collect(Collectors.joining(", "))); + bld.append("}"); + bld.append(")"); + return bld.toString(); + } +} diff --git a/metadata/src/main/java/org/apache/kafka/metadata/migration/KRaftMigrationDriver.java b/metadata/src/main/java/org/apache/kafka/metadata/migration/KRaftMigrationDriver.java index a5aca03126b61..aa60390cc06d4 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/migration/KRaftMigrationDriver.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/migration/KRaftMigrationDriver.java @@ -174,7 +174,8 @@ private void recoverMigrationStateFromZK() { } private boolean isControllerQuorumReadyForMigration() { - Optional notReadyMsg = this.quorumFeatures.reasonAllControllersZkMigrationNotReady(); + Optional notReadyMsg = this.quorumFeatures.reasonAllControllersZkMigrationNotReady( + image.features().metadataVersion(), image.cluster().controllers()); if (notReadyMsg.isPresent()) { log.warn("Still waiting for all controller nodes ready to begin the migration. Not ready due to:" + notReadyMsg.get()); return false; @@ -687,7 +688,7 @@ public void run() throws Exception { transitionTo(MigrationDriverState.SYNC_KRAFT_TO_ZK); } catch (Throwable t) { MigrationManifest partialManifest = manifestBuilder.build(); - log.error("Aborting the metadata migration from ZooKeeper to KRaft. {}.", partialManifest); + log.error("Aborting the metadata migration from ZooKeeper to KRaft. {}.", partialManifest, t); zkRecordConsumer.abortMigration(); // This terminates the controller via fatal fault handler super.handleException(t); } diff --git a/metadata/src/main/java/org/apache/kafka/metadata/publisher/FeaturesPublisher.java b/metadata/src/main/java/org/apache/kafka/metadata/publisher/FeaturesPublisher.java index 8be90ec87f5e9..e48eeefbe1289 100644 --- a/metadata/src/main/java/org/apache/kafka/metadata/publisher/FeaturesPublisher.java +++ b/metadata/src/main/java/org/apache/kafka/metadata/publisher/FeaturesPublisher.java @@ -17,18 +17,27 @@ package org.apache.kafka.metadata.publisher; +import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.image.MetadataDelta; import org.apache.kafka.image.MetadataImage; import org.apache.kafka.image.loader.LoaderManifest; import org.apache.kafka.image.publisher.MetadataPublisher; import org.apache.kafka.server.common.Features; +import org.slf4j.Logger; import static org.apache.kafka.server.common.MetadataVersion.MINIMUM_KRAFT_VERSION; public class FeaturesPublisher implements MetadataPublisher { + private final Logger log; private volatile Features features = Features.fromKRaftVersion(MINIMUM_KRAFT_VERSION); + public FeaturesPublisher( + LogContext logContext + ) { + log = logContext.logger(FeaturesPublisher.class); + } + public Features features() { return features; } @@ -45,10 +54,14 @@ public void onMetadataUpdate( LoaderManifest manifest ) { if (delta.featuresDelta() != null) { - features = new Features(newImage.features().metadataVersion(), + Features newFeatures = new Features(newImage.features().metadataVersion(), newImage.features().finalizedVersions(), newImage.provenance().lastContainedOffset(), true); + if (!newFeatures.equals(features)) { + log.info("Loaded new metadata {}.", newFeatures); + features = newFeatures; + } } } } diff --git a/metadata/src/main/resources/common/metadata/RegisterControllerRecord.json b/metadata/src/main/resources/common/metadata/RegisterControllerRecord.json new file mode 100644 index 0000000000000..c3bedfe796e59 --- /dev/null +++ b/metadata/src/main/resources/common/metadata/RegisterControllerRecord.json @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 27, + "type": "metadata", + "name": "RegisterControllerRecord", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "ControllerId", "type": "int32", "versions": "0+", + "about": "The controller id." }, + { "name": "IncarnationId", "type": "uuid", "versions": "0+", + "about": "The incarnation ID of the controller process" }, + { "name": "ZkMigrationReady", "type": "bool", "versions": "0+", + "about": "Set if the required configurations for ZK migration are present." }, + { "name": "EndPoints", "type": "[]ControllerEndpoint", "versions": "0+", + "about": "The endpoints that can be used to communicate with this controller.", "fields": [ + { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, + "about": "The name of the endpoint." }, + { "name": "Host", "type": "string", "versions": "0+", + "about": "The hostname." }, + { "name": "Port", "type": "uint16", "versions": "0+", + "about": "The port." }, + { "name": "SecurityProtocol", "type": "int16", "versions": "0+", + "about": "The security protocol." } + ]}, + { "name": "Features", "type": "[]ControllerFeature", + "about": "The features on this controller", "versions": "0+", "fields": [ + { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, + "about": "The feature name." }, + { "name": "MinSupportedVersion", "type": "int16", "versions": "0+", + "about": "The minimum supported feature level." }, + { "name": "MaxSupportedVersion", "type": "int16", "versions": "0+", + "about": "The maximum supported feature level." } + ]} + ] +} diff --git a/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java index c7dcbc5bfcd7c..1749ec3e87a33 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java @@ -17,13 +17,13 @@ package org.apache.kafka.controller; -import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.common.Endpoint; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.InconsistentClusterIdException; import org.apache.kafka.common.errors.StaleBrokerEpochException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.BrokerRegistrationRequestData; +import org.apache.kafka.common.message.ControllerRegistrationRequestData; import org.apache.kafka.common.metadata.BrokerRegistrationChangeRecord; import org.apache.kafka.common.metadata.FenceBrokerRecord; import org.apache.kafka.common.metadata.RegisterBrokerRecord; @@ -75,7 +75,7 @@ public void testReplay(MetadataVersion metadataVersion) { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). - setQuorumFeatures(new QuorumFeatures(0, new ApiVersions(), + setQuorumFeatures(new QuorumFeatures(0, QuorumFeatures.defaultFeatureMap(), Collections.singletonList(0))). setMetadataVersion(MetadataVersion.latest()). @@ -136,7 +136,7 @@ public void testReplayRegisterBrokerRecord() { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). - setQuorumFeatures(new QuorumFeatures(0, new ApiVersions(), + setQuorumFeatures(new QuorumFeatures(0, QuorumFeatures.defaultFeatureMap(), Collections.singletonList(0))). setMetadataVersion(MetadataVersion.latest()). @@ -189,7 +189,7 @@ public void testReplayBrokerRegistrationChangeRecord() { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). - setQuorumFeatures(new QuorumFeatures(0, new ApiVersions(), + setQuorumFeatures(new QuorumFeatures(0, QuorumFeatures.defaultFeatureMap(), Collections.singletonList(0))). setMetadataVersion(MetadataVersion.latest()). @@ -244,7 +244,7 @@ public void testRegistrationWithIncorrectClusterId() throws Exception { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). - setQuorumFeatures(new QuorumFeatures(0, new ApiVersions(), + setQuorumFeatures(new QuorumFeatures(0, QuorumFeatures.defaultFeatureMap(), Collections.singletonList(0))). setMetadataVersion(MetadataVersion.latest()). @@ -273,7 +273,7 @@ public void testRegisterBrokerRecordVersion(MetadataVersion metadataVersion) { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). - setQuorumFeatures(new QuorumFeatures(0, new ApiVersions(), + setQuorumFeatures(new QuorumFeatures(0, QuorumFeatures.defaultFeatureMap(), Collections.singletonList(0))). setMetadataVersion(metadataVersion). @@ -329,7 +329,7 @@ public void testUnregister() throws Exception { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). - setQuorumFeatures(new QuorumFeatures(0, new ApiVersions(), + setQuorumFeatures(new QuorumFeatures(0, QuorumFeatures.defaultFeatureMap(), Collections.singletonList(0))). setMetadataVersion(MetadataVersion.latest()). @@ -363,7 +363,7 @@ public void testPlaceReplicas(int numUsableBrokers) throws Exception { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). - setQuorumFeatures(new QuorumFeatures(0, new ApiVersions(), + setQuorumFeatures(new QuorumFeatures(0, QuorumFeatures.defaultFeatureMap(), Collections.singletonList(0))). setMetadataVersion(MetadataVersion.latest()). @@ -416,7 +416,7 @@ public void testRegistrationsToRecords(MetadataVersion metadataVersion) throws E SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). - setQuorumFeatures(new QuorumFeatures(0, new ApiVersions(), + setQuorumFeatures(new QuorumFeatures(0, QuorumFeatures.defaultFeatureMap(), Collections.singletonList(0))). setMetadataVersion(metadataVersion). @@ -492,7 +492,7 @@ public void testRegistrationWithUnsupportedMetadataVersion() { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); FeatureControlManager featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). - setQuorumFeatures(new QuorumFeatures(0, new ApiVersions(), + setQuorumFeatures(new QuorumFeatures(0, Collections.singletonMap(MetadataVersion.FEATURE_NAME, VersionRange.of( MetadataVersion.IBP_3_1_IV0.featureLevel(), MetadataVersion.IBP_3_3_IV0.featureLevel())), @@ -536,4 +536,19 @@ public void testRegistrationWithUnsupportedMetadataVersion() { 123L, featureControl.finalizedFeatures(Long.MAX_VALUE))).getMessage()); } + + @Test + public void testRegisterControlWithOlderMetadataVersion() { + FeatureControlManager featureControl = new FeatureControlManager.Builder(). + setMetadataVersion(MetadataVersion.IBP_3_3_IV0). + build(); + ClusterControlManager clusterControl = new ClusterControlManager.Builder(). + setClusterId("fPZv1VBsRFmnlRvmGcOW9w"). + setFeatureControlManager(featureControl). + build(); + clusterControl.activate(); + assertEquals("The current MetadataVersion is too old to support controller registrations.", + assertThrows(UnsupportedVersionException.class, () -> clusterControl.registerController( + new ControllerRegistrationRequestData().setControllerId(1))).getMessage()); + } } diff --git a/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java index bd4db8450412d..3781a136d1229 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java @@ -17,16 +17,17 @@ package org.apache.kafka.controller; +import java.util.AbstractMap.SimpleImmutableEntry; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; -import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.admin.FeatureUpdate; import org.apache.kafka.common.metadata.FeatureLevelRecord; import org.apache.kafka.common.protocol.Errors; @@ -45,7 +46,6 @@ import org.junit.jupiter.api.Timeout; import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -78,7 +78,7 @@ private static Map versionMap(Object... args) { public static QuorumFeatures features(Object... args) { Map features = QuorumFeatures.defaultFeatureMap(); features.putAll(rangeMap(args)); - return new QuorumFeatures(0, new ApiVersions(), features, emptyList()); + return new QuorumFeatures(0, features, emptyList()); } private static Map updateMap(Object... args) { @@ -107,10 +107,10 @@ public void testUpdateFeatures() { "Invalid update version 3 for feature foo. Local controller 0 only supports versions 1-2"))), manager.updateFeatures(updateMap("foo", 3), Collections.singletonMap("foo", FeatureUpdate.UpgradeType.SAFE_DOWNGRADE), - Collections.emptyMap(), false)); + false)); ControllerResult> result = manager.updateFeatures( updateMap("foo", 2, "bar", 1), Collections.emptyMap(), - Collections.emptyMap(), false); + false); Map expectedMap = new HashMap<>(); expectedMap.put("foo", ApiError.NONE); expectedMap.put("bar", new ApiError(Errors.INVALID_UPDATE_VERSION, @@ -143,63 +143,70 @@ public void testReplay() { manager.finalizedFeatures(123)); } + static ClusterFeatureSupportDescriber createFakeClusterFeatureSupportDescriber( + List>> brokerRanges, + List>> controllerRanges + ) { + return new ClusterFeatureSupportDescriber() { + @Override + public Iterator>> brokerSupported() { + return brokerRanges.iterator(); + } + + @Override + public Iterator>> controllerSupported() { + return controllerRanges.iterator(); + } + }; + } + @Test public void testUpdateFeaturesErrorCases() { LogContext logContext = new LogContext(); SnapshotRegistry snapshotRegistry = new SnapshotRegistry(logContext); FeatureControlManager manager = new FeatureControlManager.Builder(). setLogContext(logContext). - setQuorumFeatures(features("foo", 1, 5, "bar", 1, 2)). + setQuorumFeatures(features("foo", 1, 5, "bar", 0, 3)). setSnapshotRegistry(snapshotRegistry). + setClusterFeatureSupportDescriber(createFakeClusterFeatureSupportDescriber( + Arrays.asList(new SimpleImmutableEntry<>(5, Collections.singletonMap("bar", VersionRange.of(0, 3)))), + Arrays.asList())). build(); - assertEquals( - ControllerResult.atomicOf( - emptyList(), - Collections.singletonMap( - "foo", - new ApiError( - Errors.INVALID_UPDATE_VERSION, - "Invalid update version 3 for feature foo. Broker 5 does not support this feature." - ) - ) - ), - manager.updateFeatures( - updateMap("foo", 3), - Collections.singletonMap("foo", FeatureUpdate.UpgradeType.SAFE_DOWNGRADE), - Collections.singletonMap(5, rangeMap()), - false) - ); + assertEquals(ControllerResult.atomicOf(emptyList(), + Collections.singletonMap("foo", new ApiError(Errors.INVALID_UPDATE_VERSION, + "Invalid update version 3 for feature foo. Broker 5 does not support this feature."))), + manager.updateFeatures(updateMap("foo", 3), + Collections.singletonMap("foo", FeatureUpdate.UpgradeType.SAFE_DOWNGRADE), + false)); ControllerResult> result = manager.updateFeatures( - updateMap("foo", 3), Collections.emptyMap(), Collections.emptyMap(), false); - assertEquals(Collections.singletonMap("foo", ApiError.NONE), result.response()); + updateMap("bar", 3), Collections.emptyMap(), false); + assertEquals(Collections.singletonMap("bar", ApiError.NONE), result.response()); manager.replay((FeatureLevelRecord) result.records().get(0).message()); snapshotRegistry.getOrCreateSnapshot(3); assertEquals(ControllerResult.atomicOf(emptyList(), Collections. - singletonMap("foo", new ApiError(Errors.INVALID_UPDATE_VERSION, - "Invalid update version 2 for feature foo. Can't downgrade the version of this feature " + + singletonMap("bar", new ApiError(Errors.INVALID_UPDATE_VERSION, + "Invalid update version 2 for feature bar. Can't downgrade the version of this feature " + "without setting the upgrade type to either safe or unsafe downgrade."))), - manager.updateFeatures(updateMap("foo", 2), - Collections.emptyMap(), Collections.emptyMap(), false)); + manager.updateFeatures(updateMap("bar", 2), Collections.emptyMap(), false)); assertEquals( ControllerResult.atomicOf( Collections.singletonList( new ApiMessageAndVersion( new FeatureLevelRecord() - .setName("foo") + .setName("bar") .setFeatureLevel((short) 2), (short) 0 ) ), - Collections.singletonMap("foo", ApiError.NONE) + Collections.singletonMap("bar", ApiError.NONE) ), manager.updateFeatures( - updateMap("foo", 2), - Collections.singletonMap("foo", FeatureUpdate.UpgradeType.SAFE_DOWNGRADE), - Collections.emptyMap(), + updateMap("bar", 2), + Collections.singletonMap("bar", FeatureUpdate.UpgradeType.SAFE_DOWNGRADE), false) ); } @@ -215,8 +222,7 @@ public void testReplayRecords() throws Exception { setMetadataVersion(MetadataVersion.IBP_3_3_IV0). build(); ControllerResult> result = manager. - updateFeatures(updateMap("foo", 5, "bar", 1), - Collections.emptyMap(), Collections.emptyMap(), false); + updateFeatures(updateMap("foo", 5, "bar", 1), Collections.emptyMap(), false); RecordTestUtils.replayAll(manager, result.records()); assertEquals(MetadataVersion.IBP_3_3_IV0, manager.metadataVersion()); assertEquals(Optional.of((short) 5), manager.finalizedFeatures(Long.MAX_VALUE).get("foo")); @@ -251,7 +257,6 @@ public void testCannotDowngradeToVersionBeforeMinimumSupportedKraftVersion() { manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_2_IV0.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), - emptyMap(), true)); } @@ -265,7 +270,6 @@ public void testCannotDowngradeToHigherVersion() { manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV3.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.SAFE_DOWNGRADE), - emptyMap(), true)); } @@ -279,7 +283,6 @@ public void testCannotUnsafeDowngradeToHigherVersion() { manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV3.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), - emptyMap(), true)); } @@ -294,7 +297,6 @@ public void testCannotUpgradeToLowerVersion() { manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV0.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UPGRADE), - emptyMap(), true)); } @@ -306,7 +308,6 @@ public void testCanUpgradeToHigherVersion() { manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV3.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UPGRADE), - emptyMap(), true)); } @@ -321,7 +322,6 @@ public void testCannotUseSafeDowngradeIfMetadataChanged() { manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV0.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.SAFE_DOWNGRADE), - emptyMap(), true)); } @@ -334,7 +334,6 @@ public void testUnsafeDowngradeIsTemporarilyDisabled() { manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV0.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), - emptyMap(), true)); } @@ -347,7 +346,6 @@ public void testCanUseUnsafeDowngradeIfMetadataChanged() { manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV0.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), - emptyMap(), true)); } @@ -364,7 +362,6 @@ public void testCanUseSafeDowngradeIfMetadataDidNotChange() { manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_0_IV1.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.SAFE_DOWNGRADE), - emptyMap(), true)); } @@ -381,7 +378,6 @@ public void testCannotDowngradeBefore3_3_IV0() { manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_2_IV0.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), - emptyMap(), true)); } @@ -389,15 +385,17 @@ public void testCannotDowngradeBefore3_3_IV0() { public void testCreateFeatureLevelRecords() { Map localSupportedFeatures = new HashMap<>(); localSupportedFeatures.put(MetadataVersion.FEATURE_NAME, VersionRange.of( - MetadataVersion.IBP_3_0_IV1.featureLevel(), MetadataVersion.latest().featureLevel())); + MetadataVersion.IBP_3_0_IV1.featureLevel(), MetadataVersion.latest().featureLevel())); localSupportedFeatures.put("foo", VersionRange.of(0, 2)); FeatureControlManager manager = new FeatureControlManager.Builder(). - setQuorumFeatures(new QuorumFeatures(0, new ApiVersions(), localSupportedFeatures, emptyList())). + setQuorumFeatures(new QuorumFeatures(0, localSupportedFeatures, emptyList())). + setClusterFeatureSupportDescriber(createFakeClusterFeatureSupportDescriber( + Arrays.asList(new SimpleImmutableEntry<>(1, Collections.singletonMap("foo", VersionRange.of(0, 3)))), + Arrays.asList())). build(); ControllerResult> result = manager.updateFeatures( Collections.singletonMap("foo", (short) 1), Collections.singletonMap("foo", FeatureUpdate.UpgradeType.UPGRADE), - Collections.singletonMap(1, Collections.singletonMap("foo", VersionRange.of(0, 3))), false); assertEquals(ControllerResult.atomicOf(Arrays.asList(new ApiMessageAndVersion( new FeatureLevelRecord().setName("foo").setFeatureLevel((short) 1), (short) 0)), @@ -408,7 +406,6 @@ public void testCreateFeatureLevelRecords() { ControllerResult> result2 = manager.updateFeatures( Collections.singletonMap("foo", (short) 0), Collections.singletonMap("foo", FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), - Collections.singletonMap(1, Collections.singletonMap("foo", VersionRange.of(0, 3))), false); assertEquals(ControllerResult.atomicOf(Arrays.asList(new ApiMessageAndVersion( new FeatureLevelRecord().setName("foo").setFeatureLevel((short) 0), (short) 0)), @@ -434,7 +431,6 @@ public void testNoMetadataVersionChangeDuringMigration() { manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_5_IV1.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UPGRADE), - emptyMap(), true)); assertEquals(ControllerResult.of(Collections.emptyList(), @@ -443,7 +439,6 @@ public void testNoMetadataVersionChangeDuringMigration() { manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV0.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.SAFE_DOWNGRADE), - emptyMap(), true)); // Complete the migration @@ -451,7 +446,6 @@ public void testNoMetadataVersionChangeDuringMigration() { ControllerResult> result = manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_5_IV1.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UPGRADE), - emptyMap(), false); assertEquals(Errors.NONE, result.response().get(MetadataVersion.FEATURE_NAME).error()); RecordTestUtils.replayAll(manager, result.records()); diff --git a/metadata/src/test/java/org/apache/kafka/controller/ProducerIdControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ProducerIdControlManagerTest.java index fb02e0ebb4070..900e8d2c7af84 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ProducerIdControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ProducerIdControlManagerTest.java @@ -18,7 +18,6 @@ package org.apache.kafka.controller; import java.util.Collections; -import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.common.errors.StaleBrokerEpochException; import org.apache.kafka.common.errors.UnknownServerException; import org.apache.kafka.common.metadata.ProducerIdsRecord; @@ -49,7 +48,7 @@ public void setUp() { snapshotRegistry = new SnapshotRegistry(new LogContext()); featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). - setQuorumFeatures(new QuorumFeatures(0, new ApiVersions(), + setQuorumFeatures(new QuorumFeatures(0, QuorumFeatures.defaultFeatureMap(), Collections.singletonList(0))). setMetadataVersion(MetadataVersion.latest()). diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java index 74add61021c1b..3a70312ca1c7a 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java @@ -117,6 +117,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -166,7 +167,7 @@ public void testConfigurationOperations() throws Throwable { ) { controlEnv.activeController().registerBroker(ANONYMOUS_CONTEXT, new BrokerRegistrationRequestData(). - setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_3_6_IV2)). + setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_3_7_IV0)). setBrokerId(0). setClusterId(logEnv.clusterId())).get(); testConfigurationOperations(controlEnv.activeController()); @@ -207,7 +208,7 @@ public void testDelayedConfigurationOperations() throws Throwable { ) { controlEnv.activeController().registerBroker(ANONYMOUS_CONTEXT, new BrokerRegistrationRequestData(). - setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_3_6_IV2)). + setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_3_7_IV0)). setBrokerId(0). setClusterId(logEnv.clusterId())).get(); testDelayedConfigurationOperations(logEnv, controlEnv.activeController()); @@ -544,7 +545,7 @@ public void testUnregisterBroker() throws Throwable { setBrokerId(0). setClusterId(active.clusterId()). setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwBA")). - setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_3_6_IV2)). + setFeatures(brokerFeatures(MetadataVersion.IBP_3_0_IV1, MetadataVersion.IBP_3_7_IV0)). setListeners(listeners)); assertEquals(5L, reply.get().epoch()); CreateTopicsRequestData createTopicsRequestData = @@ -1491,6 +1492,7 @@ public void testActivationRecordsPartialTransactionNoSupport() { setAddingReplicas(Collections.emptyList()).setLeader(1).setLeaderEpoch(0). setPartitionEpoch(0), (short) 0) )); + @Test public void testFailoverDuringMigrationTransaction() throws Exception { try ( @@ -1534,4 +1536,62 @@ public void testFailoverDuringMigrationTransaction() throws Exception { } } + + @ParameterizedTest + @EnumSource(value = MetadataVersion.class, names = {"IBP_3_4_IV0", "IBP_3_5_IV0", "IBP_3_6_IV0", "IBP_3_6_IV1"}) + public void testBrokerHeartbeatDuringMigration(MetadataVersion metadataVersion) throws Exception { + try ( + LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv.Builder(1).build(); + ) { + QuorumControllerTestEnv.Builder controlEnvBuilder = new QuorumControllerTestEnv.Builder(logEnv). + setControllerBuilderInitializer(controllerBuilder -> + controllerBuilder + .setZkMigrationEnabled(true) + .setMaxIdleIntervalNs(OptionalLong.of(TimeUnit.MILLISECONDS.toNanos(100))) + ). + setBootstrapMetadata(BootstrapMetadata.fromVersion(metadataVersion, "test")); + QuorumControllerTestEnv controlEnv = controlEnvBuilder.build(); + QuorumController active = controlEnv.activeController(true); + + // Register a ZK broker + BrokerRegistrationReply reply = active.registerBroker(ANONYMOUS_CONTEXT, + new BrokerRegistrationRequestData(). + setBrokerId(0). + setRack(null). + setClusterId(active.clusterId()). + setIsMigratingZkBroker(true). + setFeatures(brokerFeatures(metadataVersion, metadataVersion)). + setIncarnationId(Uuid.fromString("kxAT73dKQsitIedpiPtwB0")). + setListeners(new ListenerCollection(Arrays.asList(new Listener(). + setName("PLAINTEXT").setHost("localhost"). + setPort(9092)).iterator()))).get(); + + // Start migration + ZkRecordConsumer migrationConsumer = active.zkRecordConsumer(); + migrationConsumer.beginMigration().get(30, TimeUnit.SECONDS); + + // Interleave migration batches with heartbeats. Ensure the heartbeat events use the correct + // offset when adding to the purgatory. Otherwise, we get errors like: + // There is already a deferred event with offset 292. We should not add one with an offset of 241 which is lower than that. + for (int i = 0; i < 100; i++) { + Uuid topicId = Uuid.randomUuid(); + String topicName = "testBrokerHeartbeatDuringMigration" + i; + Future migrationFuture = migrationConsumer.acceptBatch( + Arrays.asList( + new ApiMessageAndVersion(new TopicRecord().setTopicId(topicId).setName(topicName), (short) 0), + new ApiMessageAndVersion(new PartitionRecord().setTopicId(topicId).setPartitionId(0).setIsr(Arrays.asList(0, 1, 2)), (short) 0))); + active.processBrokerHeartbeat(ANONYMOUS_CONTEXT, new BrokerHeartbeatRequestData(). + setWantFence(false).setBrokerEpoch(reply.epoch()).setBrokerId(0). + setCurrentMetadataOffset(100000L + i)); + migrationFuture.get(); + } + + // Ensure that we can complete a heartbeat even though we leave migration transaction hanging + assertEquals(new BrokerHeartbeatReply(true, false, false, false), + active.processBrokerHeartbeat(ANONYMOUS_CONTEXT, new BrokerHeartbeatRequestData(). + setWantFence(false).setBrokerEpoch(reply.epoch()).setBrokerId(0). + setCurrentMetadataOffset(100100L)).get()); + + } + } } diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java index 0ce1fb7b5b4bf..4750fb61faeac 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTestEnv.java @@ -105,7 +105,7 @@ private QuorumControllerTestEnv( builder.setRaftClient(logEnv.logManagers().get(nodeId)); builder.setBootstrapMetadata(bootstrapMetadata); builder.setLeaderImbalanceCheckIntervalNs(leaderImbalanceCheckIntervalNs); - builder.setQuorumFeatures(new QuorumFeatures(nodeId, apiVersions, QuorumFeatures.defaultFeatureMap(), nodeIds)); + builder.setQuorumFeatures(new QuorumFeatures(nodeId, QuorumFeatures.defaultFeatureMap(), nodeIds)); sessionTimeoutMillis.ifPresent(timeout -> { builder.setSessionTimeoutNs(NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS)); }); diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumFeaturesTest.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumFeaturesTest.java index 4e669aecafc00..10ebf9c574f69 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumFeaturesTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumFeaturesTest.java @@ -17,23 +17,20 @@ package org.apache.kafka.controller; -import org.apache.kafka.clients.ApiVersions; -import org.apache.kafka.clients.NodeApiVersions; -import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey; +import org.apache.kafka.common.Endpoint; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.apache.kafka.metadata.ControllerRegistration; import org.apache.kafka.metadata.VersionRange; +import org.apache.kafka.server.common.MetadataVersion; import org.junit.jupiter.api.Test; -import java.util.AbstractMap.SimpleImmutableEntry; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Optional; -import static java.util.Collections.emptyMap; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -41,92 +38,92 @@ public class QuorumFeaturesTest { private final static Map LOCAL; + private final static QuorumFeatures QUORUM_FEATURES; + static { Map local = new HashMap<>(); local.put("foo", VersionRange.of(0, 3)); local.put("bar", VersionRange.of(0, 4)); local.put("baz", VersionRange.of(2, 2)); LOCAL = Collections.unmodifiableMap(local); - } - - @Test - public void testDefaultSupportedLevels() { - QuorumFeatures quorumFeatures = new QuorumFeatures(0, new ApiVersions(), emptyMap(), Arrays.asList(0, 1, 2)); - assertEquals(Optional.empty(), quorumFeatures.reasonNotSupported("foo", (short) 0)); - assertEquals(Optional.of("Local controller 0 does not support this feature."), - quorumFeatures.reasonNotSupported("foo", (short) 1)); + QUORUM_FEATURES = new QuorumFeatures(0, LOCAL, Arrays.asList(0, 1, 2)); } @Test public void testLocalSupportedFeature() { - QuorumFeatures quorumFeatures = new QuorumFeatures(0, new ApiVersions(), LOCAL, Arrays.asList(0, 1, 2)); - assertEquals(VersionRange.of(0, 3), quorumFeatures.localSupportedFeature("foo")); - assertEquals(VersionRange.of(0, 4), quorumFeatures.localSupportedFeature("bar")); - assertEquals(VersionRange.of(2, 2), quorumFeatures.localSupportedFeature("baz")); - assertEquals(VersionRange.of(0, 0), quorumFeatures.localSupportedFeature("quux")); + assertEquals(VersionRange.of(0, 3), QUORUM_FEATURES.localSupportedFeature("foo")); + assertEquals(VersionRange.of(0, 4), QUORUM_FEATURES.localSupportedFeature("bar")); + assertEquals(VersionRange.of(2, 2), QUORUM_FEATURES.localSupportedFeature("baz")); + assertEquals(VersionRange.of(0, 0), QUORUM_FEATURES.localSupportedFeature("quux")); } @Test public void testReasonNotSupported() { - ApiVersions apiVersions = new ApiVersions(); - QuorumFeatures quorumFeatures = new QuorumFeatures(0, apiVersions, LOCAL, Arrays.asList(0, 1, 2)); assertEquals(Optional.of("Local controller 0 only supports versions 0-3"), - quorumFeatures.reasonNotSupported("foo", (short) 10)); - apiVersions.update("1", nodeApiVersions(Arrays.asList( - new SimpleImmutableEntry<>("foo", VersionRange.of(1, 3)), - new SimpleImmutableEntry<>("bar", VersionRange.of(1, 3)), - new SimpleImmutableEntry<>("baz", VersionRange.of(1, 2))))); - assertEquals(Optional.empty(), quorumFeatures.reasonNotSupported("bar", (short) 3)); - assertEquals(Optional.of("Controller 1 only supports versions 1-3"), - quorumFeatures.reasonNotSupported("bar", (short) 4)); - } - - private static NodeApiVersions nodeApiVersions(List> entries) { - List features = new ArrayList<>(); - entries.forEach(entry -> { - features.add(new SupportedFeatureKey(). - setName(entry.getKey()). - setMinVersion(entry.getValue().min()). - setMaxVersion(entry.getValue().max())); - }); - return new NodeApiVersions(Collections.emptyList(), features, false); + QuorumFeatures.reasonNotSupported((short) 10, + "Local controller 0", VersionRange.of(0, 3))); + assertEquals(Optional.empty(), + QuorumFeatures.reasonNotSupported((short) 3, + "Local controller 0", VersionRange.of(0, 3))); } @Test public void testIsControllerId() { - QuorumFeatures quorumFeatures = new QuorumFeatures(0, new ApiVersions(), LOCAL, Arrays.asList(0, 1, 2)); - assertTrue(quorumFeatures.isControllerId(0)); - assertTrue(quorumFeatures.isControllerId(1)); - assertTrue(quorumFeatures.isControllerId(2)); - assertFalse(quorumFeatures.isControllerId(3)); + assertTrue(QUORUM_FEATURES.isControllerId(0)); + assertTrue(QUORUM_FEATURES.isControllerId(1)); + assertTrue(QUORUM_FEATURES.isControllerId(2)); + assertFalse(QUORUM_FEATURES.isControllerId(3)); } @Test - public void testZkMigrationReady() { - ApiVersions apiVersions = new ApiVersions(); - QuorumFeatures quorumFeatures = new QuorumFeatures(0, apiVersions, LOCAL, Arrays.asList(0, 1, 2)); - - // create apiVersion with zkMigrationEnabled flag set for node 0, the other 2 nodes have no apiVersions info - apiVersions.update("0", new NodeApiVersions(Collections.emptyList(), Collections.emptyList(), true)); - assertTrue(quorumFeatures.reasonAllControllersZkMigrationNotReady().isPresent()); - assertTrue(quorumFeatures.reasonAllControllersZkMigrationNotReady().get().contains("Missing apiVersion from nodes: [1, 2]")); + public void testZkMigrationNotReadyIfMetadataVersionTooLow() { + assertEquals(Optional.of("Metadata version too low at 3.0-IV1"), + QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( + MetadataVersion.IBP_3_0_IV1, Collections.emptyMap())); + } - // create apiVersion with zkMigrationEnabled flag set for node 1, the other 1 node have no apiVersions info - apiVersions.update("1", new NodeApiVersions(Collections.emptyList(), Collections.emptyList(), true)); - assertTrue(quorumFeatures.reasonAllControllersZkMigrationNotReady().isPresent()); - assertTrue(quorumFeatures.reasonAllControllersZkMigrationNotReady().get().contains("Missing apiVersion from nodes: [2]")); + @Test + public void testZkMigrationReadyIfControllerRegistrationNotSupported() { + assertEquals(Optional.empty(), + QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( + MetadataVersion.IBP_3_4_IV0, Collections.emptyMap())); + } - // create apiVersion with zkMigrationEnabled flag disabled for node 2, should still be not ready - apiVersions.update("2", NodeApiVersions.create()); - assertTrue(quorumFeatures.reasonAllControllersZkMigrationNotReady().isPresent()); - assertTrue(quorumFeatures.reasonAllControllersZkMigrationNotReady().get().contains("Nodes don't enable `zookeeper.metadata.migration.enable`: [2]")); + @Test + public void testZkMigrationNotReadyIfNotAllControllersRegistered() { + assertEquals(Optional.of("No registration found for controller 0"), + QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( + MetadataVersion.IBP_3_7_IV0, Collections.emptyMap())); + } - // update zkMigrationEnabled flag to enabled for node 2, should be ready now - apiVersions.update("2", new NodeApiVersions(Collections.emptyList(), Collections.emptyList(), true)); - assertFalse(quorumFeatures.reasonAllControllersZkMigrationNotReady().isPresent()); + @Test + public void testZkMigrationNotReadyIfControllerNotReady() { + assertEquals(Optional.of("Controller 0 has not enabled zookeeper.metadata.migration.enable"), + QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( + MetadataVersion.IBP_3_7_IV0, Collections.singletonMap(0, + new ControllerRegistration.Builder(). + setId(0). + setZkMigrationReady(false). + setIncarnationId(Uuid.fromString("kCBJaDGNQk6x3y5xbtQOpg")). + setListeners(Collections.singletonMap("CONTROLLER", + new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "localhost", 9093))). + build()))); + } - // create apiVersion with zkMigrationEnabled flag disabled for a non-controller, and expect we fill filter it out - apiVersions.update("3", NodeApiVersions.create()); - assertFalse(quorumFeatures.reasonAllControllersZkMigrationNotReady().isPresent()); + @Test + public void testZkMigrationReadyIfAllControllersReady() { + Map controllers = new HashMap<>(); + QUORUM_FEATURES.quorumNodeIds().forEach(id -> { + controllers.put(id, + new ControllerRegistration.Builder(). + setId(id). + setZkMigrationReady(true). + setIncarnationId(Uuid.fromString("kCBJaDGNQk6x3y5xbtQOpg")). + setListeners(Collections.singletonMap("CONTROLLER", + new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "localhost", 9093))). + build()); + }); + assertEquals(Optional.empty(), QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( + MetadataVersion.IBP_3_7_IV0, controllers)); } } diff --git a/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java index 41bffdba92fc6..1cd2a2d425836 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java @@ -17,7 +17,6 @@ package org.apache.kafka.controller; -import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.common.ElectionType; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; @@ -207,7 +206,7 @@ private ReplicationControlTestContext( this.time = time; this.featureControl = new FeatureControlManager.Builder(). setSnapshotRegistry(snapshotRegistry). - setQuorumFeatures(new QuorumFeatures(0, new ApiVersions(), + setQuorumFeatures(new QuorumFeatures(0, QuorumFeatures.defaultFeatureMap(), Collections.singletonList(0))). setMetadataVersion(metadataVersion). diff --git a/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java b/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java index e12e1143c8812..75c3c39428b02 100644 --- a/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/ClusterImageTest.java @@ -21,6 +21,9 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.metadata.BrokerRegistrationChangeRecord; import org.apache.kafka.common.metadata.FenceBrokerRecord; +import org.apache.kafka.common.metadata.RegisterControllerRecord; +import org.apache.kafka.common.metadata.RegisterControllerRecord.ControllerEndpoint; +import org.apache.kafka.common.metadata.RegisterControllerRecord.ControllerEndpointCollection; import org.apache.kafka.common.metadata.UnfenceBrokerRecord; import org.apache.kafka.common.metadata.UnregisterBrokerRecord; import org.apache.kafka.common.security.auth.SecurityProtocol; @@ -28,6 +31,7 @@ import org.apache.kafka.image.writer.RecordListWriter; import org.apache.kafka.metadata.BrokerRegistration; import org.apache.kafka.metadata.BrokerRegistrationInControlledShutdownChange; +import org.apache.kafka.metadata.ControllerRegistration; import org.apache.kafka.metadata.RecordTestUtils; import org.apache.kafka.metadata.VersionRange; import org.apache.kafka.server.common.ApiMessageAndVersion; @@ -46,7 +50,6 @@ import static org.apache.kafka.common.metadata.MetadataRecordType.FENCE_BROKER_RECORD; import static org.apache.kafka.common.metadata.MetadataRecordType.UNFENCE_BROKER_RECORD; -import static org.apache.kafka.common.metadata.MetadataRecordType.UNREGISTER_BROKER_RECORD; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -88,7 +91,15 @@ public class ClusterImageTest { Optional.of("arack"), false, false)); - IMAGE1 = new ClusterImage(map1); + Map cmap1 = new HashMap<>(); + cmap1.put(1000, new ControllerRegistration.Builder(). + setId(1000). + setIncarnationId(Uuid.fromString("9ABu6HEgRuS-hjHLgC4cHw")). + setZkMigrationReady(false). + setListeners(Collections.singletonMap("PLAINTEXT", + new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 19092))). + setSupportedFeatures(Collections.emptyMap()).build()); + IMAGE1 = new ClusterImage(map1, cmap1); DELTA1_RECORDS = new ArrayList<>(); DELTA1_RECORDS.add(new ApiMessageAndVersion(new UnfenceBrokerRecord(). @@ -97,11 +108,24 @@ public class ClusterImageTest { setId(1).setEpoch(1001), FENCE_BROKER_RECORD.highestSupportedVersion())); DELTA1_RECORDS.add(new ApiMessageAndVersion(new BrokerRegistrationChangeRecord(). setBrokerId(0).setBrokerEpoch(1000).setInControlledShutdown( - BrokerRegistrationInControlledShutdownChange.IN_CONTROLLED_SHUTDOWN.value()), - FENCE_BROKER_RECORD.highestSupportedVersion())); + BrokerRegistrationInControlledShutdownChange.IN_CONTROLLED_SHUTDOWN.value()), + (short) 0)); DELTA1_RECORDS.add(new ApiMessageAndVersion(new UnregisterBrokerRecord(). setBrokerId(2).setBrokerEpoch(123), - UNREGISTER_BROKER_RECORD.highestSupportedVersion())); + (short) 0)); + + ControllerEndpointCollection endpointsFor1001 = new ControllerEndpointCollection(); + new ControllerEndpointCollection().add(new ControllerEndpoint(). + setHost("localhost"). + setName("PLAINTEXT"). + setPort(19093). + setSecurityProtocol(SecurityProtocol.PLAINTEXT.id)); + DELTA1_RECORDS.add(new ApiMessageAndVersion(new RegisterControllerRecord(). + setControllerId(1001). + setIncarnationId(Uuid.fromString("FdEHF-IqScKfYyjZ1CjfNQ")). + setZkMigrationReady(true). + setEndPoints(endpointsFor1001), + (short) 0)); DELTA1 = new ClusterDelta(IMAGE1); RecordTestUtils.replayAll(DELTA1, DELTA1_RECORDS); @@ -123,7 +147,15 @@ public class ClusterImageTest { Optional.empty(), true, false)); - IMAGE2 = new ClusterImage(map2); + Map cmap2 = new HashMap<>(cmap1); + cmap2.put(1001, new ControllerRegistration.Builder(). + setId(1001). + setIncarnationId(Uuid.fromString("FdEHF-IqScKfYyjZ1CjfNQ")). + setZkMigrationReady(true). + setListeners(Collections.singletonMap("PLAINTEXT", + new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 19093))). + setSupportedFeatures(Collections.emptyMap()).build()); + IMAGE2 = new ClusterImage(map2, cmap2); } @Test diff --git a/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageBrokersNodeTest.java b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageBrokersNodeTest.java new file mode 100644 index 0000000000000..b25f7f6011020 --- /dev/null +++ b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageBrokersNodeTest.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image.node; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.image.ClusterImage; +import org.apache.kafka.metadata.BrokerRegistration; +import org.apache.kafka.metadata.VersionRange; +import org.apache.kafka.server.common.MetadataVersion; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + + +@Timeout(value = 40) +public class ClusterImageBrokersNodeTest { + private static final ClusterImage TEST_IMAGE = new ClusterImage( + Collections.singletonMap(1, new BrokerRegistration(1, + 1001, + Uuid.fromString("MJkaH0j0RwuC3W2GHQHtWA"), + Collections.emptyList(), + Collections.singletonMap(MetadataVersion.FEATURE_NAME, VersionRange.of(1, 4)), + Optional.empty(), + false, + false)), + Collections.emptyMap()); + + private final static ClusterImageBrokersNode NODE = new ClusterImageBrokersNode(TEST_IMAGE); + + @Test + public void testChildNames() { + assertEquals(Arrays.asList("1"), NODE.childNames()); + } + + @Test + public void testNode1Child() { + MetadataNode child = NODE.child("1"); + assertNotNull(child); + assertEquals("BrokerRegistration(id=1, epoch=1001, " + + "incarnationId=MJkaH0j0RwuC3W2GHQHtWA, " + + "listeners=[], " + + "supportedFeatures={metadata.version: 1-4}, " + + "rack=Optional.empty, " + + "fenced=false, " + + "inControlledShutdown=false, " + + "isMigratingZkBroker=false)", child.stringify()); + } + + @Test + public void testUnknownChild() { + assertNull(NODE.child("2")); + } +} diff --git a/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageControllersNodeTest.java b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageControllersNodeTest.java new file mode 100644 index 0000000000000..3d347ec3178f4 --- /dev/null +++ b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageControllersNodeTest.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image.node; + +import org.apache.kafka.common.Uuid; +import org.apache.kafka.image.ClusterImage; +import org.apache.kafka.metadata.ControllerRegistration; +import org.apache.kafka.metadata.VersionRange; +import org.apache.kafka.server.common.MetadataVersion; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.util.Arrays; +import java.util.Collections; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + + +@Timeout(value = 40) +public class ClusterImageControllersNodeTest { + private static final ClusterImage TEST_IMAGE = new ClusterImage( + Collections.emptyMap(), + Collections.singletonMap(2, new ControllerRegistration.Builder(). + setId(2). + setIncarnationId(Uuid.fromString("adGo6sTPS0uJshjvdTUmqQ")). + setZkMigrationReady(false). + setSupportedFeatures(Collections.singletonMap( + MetadataVersion.FEATURE_NAME, VersionRange.of(1, 4))). + setListeners(Collections.emptyMap()). + build())); + + private final static ClusterImageControllersNode NODE = new ClusterImageControllersNode(TEST_IMAGE); + + @Test + public void testChildNames() { + assertEquals(Arrays.asList("2"), NODE.childNames()); + } + + @Test + public void testNode1Child() { + MetadataNode child = NODE.child("2"); + assertNotNull(child); + assertEquals("ControllerRegistration(id=2, " + + "incarnationId=adGo6sTPS0uJshjvdTUmqQ, " + + "zkMigrationReady=false, " + + "listeners=[], " + + "supportedFeatures={metadata.version: 1-4})", + child.stringify()); + } + + @Test + public void testUnknownChild() { + assertNull(NODE.child("1")); + } +} diff --git a/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageNodeTest.java b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageNodeTest.java new file mode 100644 index 0000000000000..f56e13100b8e9 --- /dev/null +++ b/metadata/src/test/java/org/apache/kafka/image/node/ClusterImageNodeTest.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image.node; + +import org.apache.kafka.image.ClusterImage; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.util.Arrays; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + + +@Timeout(value = 40) +public class ClusterImageNodeTest { + private final static ClusterImageNode NODE = new ClusterImageNode(ClusterImage.EMPTY); + + @Test + public void testChildNames() { + assertEquals(Arrays.asList("brokers", "controllers"), NODE.childNames()); + } + + @Test + public void testBrokersChild() { + MetadataNode child = NODE.child("brokers"); + assertNotNull(child); + assertEquals(ClusterImageBrokersNode.class, child.getClass()); + } + + @Test + public void testControllersChild() { + MetadataNode child = NODE.child("controllers"); + assertNotNull(child); + assertEquals(ClusterImageControllersNode.class, child.getClass()); + } + + @Test + public void testUnknownChild() { + assertNull(NODE.child("unknown")); + } +} diff --git a/metadata/src/test/java/org/apache/kafka/image/publisher/ControllerRegistrationsPublisherTest.java b/metadata/src/test/java/org/apache/kafka/image/publisher/ControllerRegistrationsPublisherTest.java new file mode 100644 index 0000000000000..4df46a2e8db84 --- /dev/null +++ b/metadata/src/test/java/org/apache/kafka/image/publisher/ControllerRegistrationsPublisherTest.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.image.publisher; + +import org.apache.kafka.common.metadata.FeatureLevelRecord; +import org.apache.kafka.image.MetadataDelta; +import org.apache.kafka.image.MetadataImage; +import org.apache.kafka.image.MetadataProvenance; +import org.apache.kafka.image.loader.LogDeltaManifest; +import org.apache.kafka.image.loader.SnapshotManifest; +import org.apache.kafka.metadata.RecordTestUtils; +import org.apache.kafka.raft.LeaderAndEpoch; +import org.apache.kafka.server.common.MetadataVersion; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.OptionalInt; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + + +@Timeout(value = 40) +public class ControllerRegistrationsPublisherTest { + @Test + public void testInitialControllers() { + ControllerRegistrationsPublisher publisher = new ControllerRegistrationsPublisher(); + assertEquals(Collections.emptyMap(), publisher.controllers()); + } + + @Test + public void testName() { + ControllerRegistrationsPublisher publisher = new ControllerRegistrationsPublisher(); + assertEquals("ControllerRegistrationsPublisher", publisher.name()); + } + + private static final MetadataDelta TEST_DELTA; + + private static final MetadataImage TEST_IMAGE; + + private static final MetadataProvenance PROVENANCE = new MetadataProvenance(100L, 10, 2000L); + + static { + TEST_DELTA = new MetadataDelta.Builder().build(); + TEST_DELTA.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(MetadataVersion.IBP_3_6_IV2.featureLevel())); + TEST_DELTA.replay(RecordTestUtils.createTestControllerRegistration(0, true)); + TEST_DELTA.replay(RecordTestUtils.createTestControllerRegistration(1, false)); + TEST_DELTA.replay(RecordTestUtils.createTestControllerRegistration(2, false)); + TEST_IMAGE = TEST_DELTA.apply(PROVENANCE); + } + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testOnMetadataUpdate(boolean fromSnapshot) { + ControllerRegistrationsPublisher publisher = new ControllerRegistrationsPublisher(); + if (fromSnapshot) { + publisher.onMetadataUpdate(TEST_DELTA, TEST_IMAGE, + new SnapshotManifest(new MetadataProvenance(100L, 10, 2000L), 100L)); + } else { + publisher.onMetadataUpdate(TEST_DELTA, TEST_IMAGE, + LogDeltaManifest.newBuilder(). + provenance(PROVENANCE). + leaderAndEpoch(new LeaderAndEpoch(OptionalInt.of(1), 200)). + numBatches(3). + elapsedNs(1000L). + numBytes(234). + build()); + } + System.out.println("TEST_IMAGE.cluster = " + TEST_IMAGE.cluster()); + assertEquals(new HashSet<>(Arrays.asList(0, 1, 2)), publisher.controllers().keySet()); + assertTrue(publisher.controllers().get(0).zkMigrationReady()); + assertFalse(publisher.controllers().get(1).zkMigrationReady()); + assertFalse(publisher.controllers().get(2).zkMigrationReady()); + } +} diff --git a/metadata/src/test/java/org/apache/kafka/metadata/ControllerRegistrationTest.java b/metadata/src/test/java/org/apache/kafka/metadata/ControllerRegistrationTest.java new file mode 100644 index 0000000000000..ff1b6328a5f47 --- /dev/null +++ b/metadata/src/test/java/org/apache/kafka/metadata/ControllerRegistrationTest.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.metadata; + +import org.apache.kafka.common.Endpoint; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.metadata.RegisterControllerRecord; +import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.apache.kafka.image.writer.ImageWriterOptions; +import org.apache.kafka.server.common.ApiMessageAndVersion; +import org.apache.kafka.server.common.MetadataVersion; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + + +@Timeout(value = 40) +public class ControllerRegistrationTest { + static Map doubleMap(K k1, V v1, K k2, V v2) { + HashMap map = new HashMap<>(); + map.put(k1, v1); + map.put(k2, v2); + return Collections.unmodifiableMap(map); + } + + private static final List REGISTRATIONS = Arrays.asList( + new ControllerRegistration.Builder(). + setId(0). + setIncarnationId(Uuid.fromString("ycRmGrOFQru7HXf6fOybZQ")). + setZkMigrationReady(true). + setListeners(doubleMap( + "PLAINTEXT", new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9107), + "SSL", new Endpoint("SSL", SecurityProtocol.SSL, "localhost", 9207))). + setSupportedFeatures(Collections.singletonMap(MetadataVersion.FEATURE_NAME, VersionRange.of(1, 10))). + build(), + new ControllerRegistration.Builder(). + setId(1). + setIncarnationId(Uuid.fromString("ubT_wuD6R3uopZ_lV76dQg")). + setZkMigrationReady(true). + setListeners(doubleMap( + "PLAINTEXT", new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9108), + "SSL", new Endpoint("SSL", SecurityProtocol.SSL, "localhost", 9208))). + setSupportedFeatures(Collections.singletonMap(MetadataVersion.FEATURE_NAME, VersionRange.of(1, 10))). + build(), + new ControllerRegistration.Builder(). + setId(2). + setIncarnationId(Uuid.fromString("muQS341gRIeNh9Ps7reDSw")). + setZkMigrationReady(false). + setListeners(doubleMap( + "PLAINTEXT", new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9109), + "SSL", new Endpoint("SSL", SecurityProtocol.SSL, "localhost", 9209))). + setSupportedFeatures(Collections.singletonMap(MetadataVersion.FEATURE_NAME, VersionRange.of(1, 10))). + build() + ); + + @Test + public void testValues() { + assertEquals(0, REGISTRATIONS.get(0).id()); + assertEquals(1, REGISTRATIONS.get(1).id()); + assertEquals(2, REGISTRATIONS.get(2).id()); + } + + @Test + public void testEquals() { + assertNotEquals(REGISTRATIONS.get(0), REGISTRATIONS.get(1)); + assertNotEquals(REGISTRATIONS.get(1), REGISTRATIONS.get(0)); + assertNotEquals(REGISTRATIONS.get(0), REGISTRATIONS.get(2)); + assertNotEquals(REGISTRATIONS.get(2), REGISTRATIONS.get(0)); + assertEquals(REGISTRATIONS.get(0), REGISTRATIONS.get(0)); + assertEquals(REGISTRATIONS.get(1), REGISTRATIONS.get(1)); + assertEquals(REGISTRATIONS.get(2), REGISTRATIONS.get(2)); + } + + @Test + public void testToString() { + assertEquals("ControllerRegistration(id=1, " + + "incarnationId=ubT_wuD6R3uopZ_lV76dQg, " + + "zkMigrationReady=true, " + + "listeners=[" + + "Endpoint(listenerName='PLAINTEXT', securityProtocol=PLAINTEXT, host='localhost', port=9108), " + + "Endpoint(listenerName='SSL', securityProtocol=SSL, host='localhost', port=9208)]" + + ", supportedFeatures={metadata.version: 1-10})", + REGISTRATIONS.get(1).toString()); + } + + @Test + public void testFromRecordAndToRecord() { + testRoundTrip(REGISTRATIONS.get(0)); + testRoundTrip(REGISTRATIONS.get(1)); + testRoundTrip(REGISTRATIONS.get(2)); + } + + private void testRoundTrip(ControllerRegistration registration) { + ApiMessageAndVersion messageAndVersion = registration. + toRecord(new ImageWriterOptions.Builder().build()); + ControllerRegistration registration2 = new ControllerRegistration.Builder( + (RegisterControllerRecord) messageAndVersion.message()).build(); + assertEquals(registration, registration2); + ApiMessageAndVersion messageAndVersion2 = registration2. + toRecord(new ImageWriterOptions.Builder().build()); + assertEquals(messageAndVersion, messageAndVersion2); + } + + @Test + public void testToNode() { + assertEquals(Optional.empty(), REGISTRATIONS.get(0).node("NONEXISTENT")); + assertEquals(Optional.of(new Node(0, "localhost", 9107, null)), + REGISTRATIONS.get(0).node("PLAINTEXT")); + assertEquals(Optional.of(new Node(0, "localhost", 9207, null)), + REGISTRATIONS.get(0).node("SSL")); + } +} diff --git a/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java b/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java index ef5aada9f5a05..14f626e93ebe8 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java @@ -18,21 +18,25 @@ package org.apache.kafka.metadata; import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.metadata.RegisterControllerRecord; import org.apache.kafka.common.metadata.TopicRecord; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.ObjectSerializationCache; +import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.raft.Batch; import org.apache.kafka.raft.BatchReader; import org.apache.kafka.raft.internals.MemoryBatchReader; import org.apache.kafka.server.common.ApiMessageAndVersion; +import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.server.util.MockRandom; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; @@ -316,4 +320,36 @@ public static ApiMessageAndVersion testRecord(int index) { new TopicRecord().setName("test" + index). setTopicId(new Uuid(random.nextLong(), random.nextLong())), (short) 0); } + + public static RegisterControllerRecord createTestControllerRegistration( + int id, + boolean zkMigrationReady + ) { + return new RegisterControllerRecord(). + setControllerId(id). + setIncarnationId(new Uuid(3465346L, id)). + setZkMigrationReady(zkMigrationReady). + setEndPoints(new RegisterControllerRecord.ControllerEndpointCollection( + Arrays.asList( + new RegisterControllerRecord.ControllerEndpoint(). + setName("CONTROLLER"). + setHost("localhost"). + setPort(8000 + id). + setSecurityProtocol(SecurityProtocol.PLAINTEXT.id), + new RegisterControllerRecord.ControllerEndpoint(). + setName("CONTROLLER_SSL"). + setHost("localhost"). + setPort(9000 + id). + setSecurityProtocol(SecurityProtocol.SSL.id) + ).iterator() + )). + setFeatures(new RegisterControllerRecord.ControllerFeatureCollection( + Arrays.asList( + new RegisterControllerRecord.ControllerFeature(). + setName(MetadataVersion.FEATURE_NAME). + setMinSupportedVersion(MetadataVersion.MINIMUM_KRAFT_VERSION.featureLevel()). + setMaxSupportedVersion(MetadataVersion.IBP_3_6_IV1.featureLevel()) + ).iterator() + )); + } } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/migration/KRaftMigrationDriverTest.java b/metadata/src/test/java/org/apache/kafka/metadata/migration/KRaftMigrationDriverTest.java index d2c1323720881..1bca3e2f2b2e8 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/migration/KRaftMigrationDriverTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/migration/KRaftMigrationDriverTest.java @@ -16,14 +16,12 @@ */ package org.apache.kafka.metadata.migration; -import org.apache.kafka.clients.ApiVersions; -import org.apache.kafka.clients.NodeApiVersions; -import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.metadata.BrokerRegistrationChangeRecord; import org.apache.kafka.common.metadata.ConfigRecord; +import org.apache.kafka.common.metadata.FeatureLevelRecord; import org.apache.kafka.common.metadata.RegisterBrokerRecord; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -49,10 +47,10 @@ import org.apache.kafka.raft.LeaderAndEpoch; import org.apache.kafka.raft.OffsetAndEpoch; import org.apache.kafka.server.common.ApiMessageAndVersion; +import org.apache.kafka.server.common.MetadataVersion; import org.apache.kafka.server.fault.MockFaultHandler; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -83,16 +81,9 @@ import static org.junit.jupiter.api.Assertions.assertTrue; public class KRaftMigrationDriverTest { - List controllerNodes = Arrays.asList( - new Node(4, "host4", 0), - new Node(5, "host5", 0), - new Node(6, "host6", 0) - ); - ApiVersions apiVersions = new ApiVersions(); - QuorumFeatures quorumFeatures = QuorumFeatures.create(4, - apiVersions, + private final static QuorumFeatures QUORUM_FEATURES = new QuorumFeatures(4, QuorumFeatures.defaultFeatureMap(), - controllerNodes); + Arrays.asList(4, 5, 6)); static class MockControllerMetrics extends QuorumControllerMetrics { final AtomicBoolean closed = new AtomicBoolean(false); @@ -126,19 +117,12 @@ KRaftMigrationDriver.Builder defaultTestBuilder() { .setZkRecordConsumer(new NoOpRecordConsumer()) .setInitialZkLoadHandler(metadataPublisher -> { }) .setFaultHandler(new MockFaultHandler("test")) - .setQuorumFeatures(quorumFeatures) + .setQuorumFeatures(QUORUM_FEATURES) .setConfigSchema(KafkaConfigSchema.EMPTY) .setControllerMetrics(metrics) .setTime(mockTime); } - @BeforeEach - public void setup() { - apiVersions.update("4", new NodeApiVersions(Collections.emptyList(), Collections.emptyList(), true)); - apiVersions.update("5", new NodeApiVersions(Collections.emptyList(), Collections.emptyList(), true)); - apiVersions.update("6", new NodeApiVersions(Collections.emptyList(), Collections.emptyList(), true)); - } - static class NoOpRecordConsumer implements ZkRecordConsumer { @Override public CompletableFuture beginMigration() { @@ -245,8 +229,9 @@ CompletableFuture enqueueMetadataChangeEventWithFuture( * Don't send RPCs to brokers for every metadata change, only when brokers or topics change. * This is a regression test for KAFKA-14668 */ - @Test - public void testOnlySendNeededRPCsToBrokers() throws Exception { + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testOnlySendNeededRPCsToBrokers(boolean registerControllers) throws Exception { CountingMetadataPropagator metadataPropagator = new CountingMetadataPropagator(); CapturingConfigMigrationClient configClient = new CapturingConfigMigrationClient(); CapturingMigrationClient migrationClient = CapturingMigrationClient.newBuilder() @@ -263,6 +248,7 @@ public void testOnlySendNeededRPCsToBrokers() throws Exception { MetadataDelta delta = new MetadataDelta(image); driver.start(); + setupDeltaForMigration(delta, registerControllers); delta.replay(ZkMigrationState.PRE_MIGRATION.toRecord().message()); delta.replay(zkBrokerRecord(1)); delta.replay(zkBrokerRecord(2)); @@ -343,6 +329,7 @@ public ZkMigrationLeadershipState claimControllerLeadership(ZkMigrationLeadershi try (KRaftMigrationDriver driver = builder.build()) { MetadataImage image = MetadataImage.EMPTY; MetadataDelta delta = new MetadataDelta(image); + setupDeltaForMigration(delta, true); driver.start(); delta.replay(ZkMigrationState.PRE_MIGRATION.toRecord().message()); @@ -369,11 +356,48 @@ public ZkMigrationLeadershipState claimControllerLeadership(ZkMigrationLeadershi } } - @Test - public void testShouldNotMoveToNextStateIfControllerNodesAreNotReadyToMigrate() throws Exception { + private void setupDeltaForMigration( + MetadataDelta delta, + boolean registerControllers + ) { + if (registerControllers) { + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(MetadataVersion.IBP_3_7_IV0.featureLevel())); + for (int id : QUORUM_FEATURES.quorumNodeIds()) { + delta.replay(RecordTestUtils.createTestControllerRegistration(id, true)); + } + } else { + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(MetadataVersion.IBP_3_6_IV2.featureLevel())); + } + } + + private void setupDeltaWithControllerRegistrations( + MetadataDelta delta, + List notReadyIds, + List readyIds + ) { + delta.replay(new FeatureLevelRecord(). + setName(MetadataVersion.FEATURE_NAME). + setFeatureLevel(MetadataVersion.IBP_3_7_IV0.featureLevel())); + delta.replay(ZkMigrationState.PRE_MIGRATION.toRecord().message()); + for (int id : notReadyIds) { + delta.replay(RecordTestUtils.createTestControllerRegistration(id, false)); + } + for (int id : readyIds) { + delta.replay(RecordTestUtils.createTestControllerRegistration(id, true)); + } + } + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testShouldNotMoveToNextStateIfControllerNodesAreNotReadyToMigrate( + boolean allNodePresent + ) throws Exception { CountingMetadataPropagator metadataPropagator = new CountingMetadataPropagator(); CapturingMigrationClient migrationClient = CapturingMigrationClient.newBuilder().setBrokersInZk(1).build(); - apiVersions.remove("6"); KRaftMigrationDriver.Builder builder = defaultTestBuilder() .setZkMigrationClient(migrationClient) @@ -383,7 +407,11 @@ public void testShouldNotMoveToNextStateIfControllerNodesAreNotReadyToMigrate() MetadataDelta delta = new MetadataDelta(image); driver.start(); - delta.replay(ZkMigrationState.PRE_MIGRATION.toRecord().message()); + if (allNodePresent) { + setupDeltaWithControllerRegistrations(delta, Arrays.asList(4, 5, 6), Arrays.asList()); + } else { + setupDeltaWithControllerRegistrations(delta, Arrays.asList(), Arrays.asList(4, 5)); + } delta.replay(zkBrokerRecord(1)); MetadataProvenance provenance = new MetadataProvenance(100, 1, 1); image = delta.apply(provenance); @@ -393,16 +421,24 @@ public void testShouldNotMoveToNextStateIfControllerNodesAreNotReadyToMigrate() driver.onControllerChange(newLeader); driver.onMetadataUpdate(delta, image, logDeltaManifestBuilder(provenance, newLeader).build()); - // Current apiVersions are missing the controller node 6, should stay at WAIT_FOR_CONTROLLER_QUORUM state + // Not all controller nodes are ready. So we should stay at WAIT_FOR_CONTROLLER_QUORUM state. TestUtils.waitForCondition(() -> driver.migrationState().get(1, TimeUnit.MINUTES).equals(MigrationDriverState.WAIT_FOR_CONTROLLER_QUORUM), "Waiting for KRaftMigrationDriver to enter WAIT_FOR_CONTROLLER_QUORUM state"); - // Current apiVersions of node 6 has no zkMigrationReady set, should still stay at WAIT_FOR_CONTROLLER_QUORUM state - apiVersions.update("6", NodeApiVersions.create()); + // Controller nodes don't have zkMigrationReady set. Should still stay at WAIT_FOR_CONTROLLER_QUORUM state. assertEquals(MigrationDriverState.WAIT_FOR_CONTROLLER_QUORUM, driver.migrationState().get(1, TimeUnit.MINUTES)); - // all controller nodes are zkMigrationReady, should be able to move to next state - apiVersions.update("6", new NodeApiVersions(Collections.emptyList(), Collections.emptyList(), true)); + // Update so that all controller nodes are zkMigrationReady. Now we should be able to move to the next state. + delta = new MetadataDelta(image); + setupDeltaWithControllerRegistrations(delta, Arrays.asList(), Arrays.asList(4, 5, 6)); + image = delta.apply(new MetadataProvenance(200, 1, 2)); + driver.onMetadataUpdate(delta, image, new LogDeltaManifest.Builder(). + provenance(image.provenance()). + leaderAndEpoch(newLeader). + numBatches(1). + elapsedNs(100). + numBytes(42). + build()); TestUtils.waitForCondition(() -> driver.migrationState().get(1, TimeUnit.MINUTES).equals(MigrationDriverState.DUAL_WRITE), "Waiting for KRaftMigrationDriver to enter DUAL_WRITE state"); } @@ -507,6 +543,7 @@ public void testTopicDualWriteSnapshot() throws Exception { MetadataDelta delta = new MetadataDelta(image); driver.start(); + setupDeltaForMigration(delta, true); delta.replay(ZkMigrationState.PRE_MIGRATION.toRecord().message()); delta.replay(zkBrokerRecord(0)); delta.replay(zkBrokerRecord(1)); @@ -560,6 +597,7 @@ public void testTopicDualWriteDelta() throws Exception { MetadataDelta delta = new MetadataDelta(image); driver.start(); + setupDeltaForMigration(delta, true); delta.replay(ZkMigrationState.PRE_MIGRATION.toRecord().message()); delta.replay(zkBrokerRecord(0)); delta.replay(zkBrokerRecord(1)); @@ -613,6 +651,7 @@ public void testControllerFailover() throws Exception { MetadataDelta delta = new MetadataDelta(image); driver.start(); + setupDeltaForMigration(delta, true); delta.replay(ZkMigrationState.PRE_MIGRATION.toRecord().message()); delta.replay(zkBrokerRecord(0)); delta.replay(zkBrokerRecord(1)); @@ -678,6 +717,7 @@ public CompletableFuture beginMigration() { MetadataDelta delta = new MetadataDelta(image); driver.start(); + setupDeltaForMigration(delta, true); delta.replay(ZkMigrationState.PRE_MIGRATION.toRecord().message()); delta.replay(zkBrokerRecord(1)); delta.replay(zkBrokerRecord(2)); diff --git a/raft/src/main/java/org/apache/kafka/raft/FileBasedStateStore.java b/raft/src/main/java/org/apache/kafka/raft/FileBasedStateStore.java index d567b15ece313..019fd1471790c 100644 --- a/raft/src/main/java/org/apache/kafka/raft/FileBasedStateStore.java +++ b/raft/src/main/java/org/apache/kafka/raft/FileBasedStateStore.java @@ -64,6 +64,7 @@ public class FileBasedStateStore implements QuorumStateStore { private final File stateFile; static final String DATA_VERSION = "data_version"; + static final short HIGHEST_SUPPORTED_VERSION = 0; public FileBasedStateStore(final File stateFile) { this.stateFile = stateFile; @@ -144,21 +145,27 @@ private void writeElectionStateToFile(final File stateFile, QuorumStateData stat log.trace("Writing tmp quorum state {}", temp.getAbsolutePath()); - try (final FileOutputStream fileOutputStream = new FileOutputStream(temp); - final BufferedWriter writer = new BufferedWriter( - new OutputStreamWriter(fileOutputStream, StandardCharsets.UTF_8))) { - short version = state.highestSupportedVersion(); - - ObjectNode jsonState = (ObjectNode) QuorumStateDataJsonConverter.write(state, version); - jsonState.set(DATA_VERSION, new ShortNode(version)); - writer.write(jsonState.toString()); - writer.flush(); - fileOutputStream.getFD().sync(); + try { + try (final FileOutputStream fileOutputStream = new FileOutputStream(temp); + final BufferedWriter writer = new BufferedWriter( + new OutputStreamWriter(fileOutputStream, StandardCharsets.UTF_8) + ) + ) { + ObjectNode jsonState = (ObjectNode) QuorumStateDataJsonConverter.write(state, HIGHEST_SUPPORTED_VERSION); + jsonState.set(DATA_VERSION, new ShortNode(HIGHEST_SUPPORTED_VERSION)); + writer.write(jsonState.toString()); + writer.flush(); + fileOutputStream.getFD().sync(); + } Utils.atomicMoveWithFallback(temp.toPath(), stateFile.toPath()); } catch (IOException e) { throw new UncheckedIOException( - String.format("Error while writing the Quorum status from the file %s", - stateFile.getAbsolutePath()), e); + String.format( + "Error while writing the Quorum status from the file %s", + stateFile.getAbsolutePath() + ), + e + ); } finally { // cleanup the temp file when the write finishes (either success or fail). deleteFileIfExists(temp); diff --git a/raft/src/test/java/org/apache/kafka/raft/FileBasedStateStoreTest.java b/raft/src/test/java/org/apache/kafka/raft/FileBasedStateStoreTest.java index 841991f8d63c1..66bb9c3a15d8e 100644 --- a/raft/src/test/java/org/apache/kafka/raft/FileBasedStateStoreTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/FileBasedStateStoreTest.java @@ -19,7 +19,9 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.kafka.common.errors.UnsupportedVersionException; +import org.apache.kafka.common.protocol.types.TaggedFields; import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.raft.generated.QuorumStateData; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; @@ -107,6 +109,20 @@ public void testCantReadVersionQuorumState() throws IOException { assertCantReadQuorumStateVersion(jsonString); } + @Test + public void testSupportedVersion() { + // If the next few checks fail, please check that they are compatible with previous releases of KRaft + + // Check that FileBasedStateStore supports the latest version + assertEquals(FileBasedStateStore.HIGHEST_SUPPORTED_VERSION, QuorumStateData.HIGHEST_SUPPORTED_VERSION); + // Check that the supported versions haven't changed + assertEquals(0, QuorumStateData.HIGHEST_SUPPORTED_VERSION); + assertEquals(0, QuorumStateData.LOWEST_SUPPORTED_VERSION); + // For the latest version check that the number of tagged fields hasn't changed + TaggedFields taggedFields = (TaggedFields) QuorumStateData.SCHEMA_0.get(6).def.type; + assertEquals(0, taggedFields.numFields()); + } + public void assertCantReadQuorumStateVersion(String jsonString) throws IOException { final File stateFile = TestUtils.tempFile(); stateStore = new FileBasedStateStore(stateFile); diff --git a/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersion.java b/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersion.java index 92f064f290b04..44764e9c641b9 100644 --- a/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersion.java +++ b/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersion.java @@ -183,7 +183,10 @@ public enum MetadataVersion { IBP_3_6_IV1(13, "3.6", "IV1", true), // Add KRaft support for Delegation Tokens - IBP_3_6_IV2(14, "3.6", "IV2", true); + IBP_3_6_IV2(14, "3.6", "IV2", true), + + // Implement KIP-919 controller registration. + IBP_3_7_IV0(15, "3.7", "IV0", true); // NOTES when adding a new version: // Update the default version in @ClusterTest annotation to point to the latest version @@ -320,6 +323,19 @@ public short registerBrokerRecordVersion() { } } + public short registerControllerRecordVersion() { + if (isAtLeast(MetadataVersion.IBP_3_7_IV0)) { + return (short) 0; + } else { + throw new RuntimeException("Controller registration is not supported in " + + "MetadataVersion " + this); + } + } + + public boolean isControllerRegistrationSupported() { + return this.isAtLeast(MetadataVersion.IBP_3_7_IV0); + } + public short fetchRequestVersion() { if (this.isAtLeast(IBP_3_5_IV1)) { return 15; diff --git a/storage/api/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteStorageManager.java b/storage/api/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteStorageManager.java index fa819979b2cba..9d06617bd669a 100644 --- a/storage/api/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteStorageManager.java +++ b/storage/api/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteStorageManager.java @@ -121,15 +121,15 @@ InputStream fetchLogSegment(RemoteLogSegmentMetadata remoteLogSegmentMetadata, /** * Returns the index for the respective log segment of {@link RemoteLogSegmentMetadata}. + *

    + * Note: The transaction index may not exist because of no transactional records. + * In this case, it should still return an InputStream with empty content, instead of returning {@code null}. * * @param remoteLogSegmentMetadata metadata about the remote log segment. * @param indexType type of the index to be fetched for the segment. * @return input stream of the requested index. * @throws RemoteStorageException if there are any errors while fetching the index. * @throws RemoteResourceNotFoundException the requested index is not found in the remote storage - * (e.g. Transaction index may not exist because segments created prior to version 2.8.0 will not have transaction index associated with them.). - * The caller of this function are encouraged to re-create the indexes from the segment - * as the suggested way of handling this error if the index is expected to be existed. */ InputStream fetchIndex(RemoteLogSegmentMetadata remoteLogSegmentMetadata, IndexType indexType) throws RemoteStorageException; diff --git a/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java b/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java index f636363971325..32dcfe3731183 100644 --- a/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java +++ b/storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java @@ -171,7 +171,7 @@ public final class RemoteLogManagerConfig { REMOTE_STORAGE_MANAGER_CLASS_NAME_DOC) .define(REMOTE_STORAGE_MANAGER_CLASS_PATH_PROP, STRING, null, - new ConfigDef.NonEmptyString(), + null, MEDIUM, REMOTE_STORAGE_MANAGER_CLASS_PATH_DOC) .define(REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP, @@ -183,7 +183,7 @@ public final class RemoteLogManagerConfig { .define(REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_PROP, STRING, null, - new ConfigDef.NonEmptyString(), + null, MEDIUM, REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_DOC) .define(REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP, STRING, diff --git a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorage.java b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorage.java index 43c09ccd908eb..64131d155590b 100644 --- a/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorage.java +++ b/storage/src/test/java/org/apache/kafka/server/log/remote/storage/LocalTieredStorage.java @@ -561,4 +561,8 @@ private RemoteLogSegmentFileset.RemoteLogSegmentFileType getLogSegmentFileType(I } return SEGMENT; } + + public int brokerId() { + return brokerId; + } } diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestBuilder.java b/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestBuilder.java index ff489002b389d..5c28c33183fe9 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestBuilder.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestBuilder.java @@ -323,7 +323,7 @@ public TieredStorageTestBuilder deleteRecords(String topic, Integer partition, Long beforeOffset) { TopicPartition topicPartition = new TopicPartition(topic, partition); - actions.add(new DeleteRecordsAction(topicPartition, beforeOffset)); + actions.add(new DeleteRecordsAction(topicPartition, beforeOffset, buildDeleteSegmentSpecList(topic))); return this; } @@ -377,6 +377,10 @@ private ProducableSpec getOrCreateProducable(String topic, private DeleteTopicAction buildDeleteTopicAction(String topic, Boolean shouldDelete) { + return new DeleteTopicAction(topic, buildDeleteSegmentSpecList(topic), shouldDelete); + } + + private List buildDeleteSegmentSpecList(String topic) { List deleteSegmentSpecList = deletables.entrySet() .stream() .filter(e -> e.getKey().topic().equals(topic)) @@ -389,7 +393,7 @@ private DeleteTopicAction buildDeleteTopicAction(String topic, }) .collect(Collectors.toList()); deleteSegmentSpecList.forEach(spec -> deletables.remove(spec.getTopicPartition())); - return new DeleteTopicAction(topic, deleteSegmentSpecList, shouldDelete); + return deleteSegmentSpecList; } } diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestContext.java b/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestContext.java index 1975a1690cfc3..59acae74ad3f9 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestContext.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/TieredStorageTestContext.java @@ -271,11 +271,18 @@ public TopicSpec topicSpec(String topicName) { public LocalTieredStorageSnapshot takeTieredStorageSnapshot() { int aliveBrokerId = harness.aliveBrokers().head().config().brokerId(); - return LocalTieredStorageSnapshot.takeSnapshot(remoteStorageManagers.get(aliveBrokerId)); + return LocalTieredStorageSnapshot.takeSnapshot(remoteStorageManager(aliveBrokerId)); } public LocalTieredStorageHistory tieredStorageHistory(int brokerId) { - return remoteStorageManagers.get(brokerId).getHistory(); + return remoteStorageManager(brokerId).getHistory(); + } + + public LocalTieredStorage remoteStorageManager(int brokerId) { + return remoteStorageManagers.stream() + .filter(rsm -> rsm.brokerId() == brokerId) + .findFirst() + .orElseThrow(() -> new IllegalArgumentException("No remote storage manager found for broker " + brokerId)); } public List remoteStorageManagers() { diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/actions/DeleteRecordsAction.java b/storage/src/test/java/org/apache/kafka/tiered/storage/actions/DeleteRecordsAction.java index 0f81d35a0523b..0f6a756c048b3 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/actions/DeleteRecordsAction.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/actions/DeleteRecordsAction.java @@ -16,36 +16,71 @@ */ package org.apache.kafka.tiered.storage.actions; +import org.apache.kafka.server.log.remote.storage.LocalTieredStorage; +import org.apache.kafka.server.log.remote.storage.LocalTieredStorageCondition; import org.apache.kafka.tiered.storage.TieredStorageTestAction; import org.apache.kafka.tiered.storage.TieredStorageTestContext; import org.apache.kafka.clients.admin.RecordsToDelete; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.tiered.storage.specs.RemoteDeleteSegmentSpec; import java.io.PrintStream; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; + +import static org.apache.kafka.server.log.remote.storage.LocalTieredStorageCondition.expectEvent; +import static org.apache.kafka.server.log.remote.storage.LocalTieredStorageEvent.EventType.DELETE_SEGMENT; public final class DeleteRecordsAction implements TieredStorageTestAction { + private static final int DELETE_WAIT_TIMEOUT_SEC = 10; private final TopicPartition partition; private final Long beforeOffset; + private final List deleteSegmentSpecs; public DeleteRecordsAction(TopicPartition partition, - Long beforeOffset) { + Long beforeOffset, + List deleteSegmentSpecs) { this.partition = partition; this.beforeOffset = beforeOffset; + this.deleteSegmentSpecs = deleteSegmentSpecs; } @Override - public void doExecute(TieredStorageTestContext context) throws InterruptedException, ExecutionException { + public void doExecute(TieredStorageTestContext context) + throws InterruptedException, ExecutionException, TimeoutException { + List tieredStorages = context.remoteStorageManagers(); + List tieredStorageConditions = deleteSegmentSpecs.stream() + .filter(spec -> spec.getEventType() == DELETE_SEGMENT) + .map(spec -> expectEvent( + tieredStorages, + spec.getEventType(), + spec.getSourceBrokerId(), + spec.getTopicPartition(), + false, + spec.getEventCount())) + .collect(Collectors.toList()); + Map recordsToDeleteMap = Collections.singletonMap(partition, RecordsToDelete.beforeOffset(beforeOffset)); context.admin().deleteRecords(recordsToDeleteMap).all().get(); + + if (!tieredStorageConditions.isEmpty()) { + tieredStorageConditions.stream() + .reduce(LocalTieredStorageCondition::and) + .get() + .waitUntilTrue(DELETE_WAIT_TIMEOUT_SEC, TimeUnit.SECONDS); + } } @Override public void describe(PrintStream output) { output.printf("delete-records partition: %s, before-offset: %d%n", partition, beforeOffset); + deleteSegmentSpecs.forEach(spec -> output.println(" " + spec)); } } diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/integration/BaseReassignReplicaTest.java b/storage/src/test/java/org/apache/kafka/tiered/storage/integration/BaseReassignReplicaTest.java index afac651303649..d0ccae89aab49 100644 --- a/storage/src/test/java/org/apache/kafka/tiered/storage/integration/BaseReassignReplicaTest.java +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/integration/BaseReassignReplicaTest.java @@ -55,8 +55,8 @@ protected void writeTestSpecifications(TieredStorageTestBuilder builder) { final String topicA = "topicA"; final String topicB = "topicB"; final Integer p0 = 0; - final Integer partitionCount = 5; - final Integer replicationFactor = 2; + final Integer partitionCount = 1; + final Integer replicationFactor = 1; final Integer maxBatchCountPerSegment = 1; final Map> replicaAssignment = null; final boolean enableRemoteLogStorage = true; @@ -66,13 +66,15 @@ protected void writeTestSpecifications(TieredStorageTestBuilder builder) { } builder - // create topicA with 5 partitions, 2 RF and ensure that the user-topic-partitions are mapped to - // metadata partitions - .createTopic(topicA, partitionCount, replicationFactor, maxBatchCountPerSegment, + // create topicA with 50 partitions and 2 RF. Using 50 partitions to ensure that the user-partitions + // are mapped to all the __remote_log_metadata partitions. This is required to ensure that + // TBRLMM able to handle the assignment of the newly created replica to one of the already assigned + // metadata partition + .createTopic(topicA, 50, 2, maxBatchCountPerSegment, replicaAssignment, enableRemoteLogStorage) .expectUserTopicMappedToMetadataPartitions(topicA, metadataPartitions) // create topicB with 1 partition and 1 RF - .createTopic(topicB, 1, 1, maxBatchCountPerSegment, + .createTopic(topicB, partitionCount, replicationFactor, maxBatchCountPerSegment, mkMap(mkEntry(p0, Collections.singletonList(broker0))), enableRemoteLogStorage) // send records to partition 0 .expectSegmentToBeOffloaded(broker0, topicB, p0, 0, new KeyValueSpec("k0", "v0")) diff --git a/storage/src/test/java/org/apache/kafka/tiered/storage/integration/DeleteSegmentsDueToLogStartOffsetBreachTest.java b/storage/src/test/java/org/apache/kafka/tiered/storage/integration/DeleteSegmentsDueToLogStartOffsetBreachTest.java new file mode 100644 index 0000000000000..5f10df5c68c4d --- /dev/null +++ b/storage/src/test/java/org/apache/kafka/tiered/storage/integration/DeleteSegmentsDueToLogStartOffsetBreachTest.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.tiered.storage.integration; + +import org.apache.kafka.tiered.storage.TieredStorageTestBuilder; +import org.apache.kafka.tiered.storage.TieredStorageTestHarness; +import org.apache.kafka.tiered.storage.specs.KeyValueSpec; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.apache.kafka.server.log.remote.storage.LocalTieredStorageEvent.EventType.DELETE_SEGMENT; +import static org.apache.kafka.common.utils.Utils.mkEntry; +import static org.apache.kafka.common.utils.Utils.mkMap; + +public final class DeleteSegmentsDueToLogStartOffsetBreachTest extends TieredStorageTestHarness { + + @Override + public int brokerCount() { + return 2; + } + + @Override + protected void writeTestSpecifications(TieredStorageTestBuilder builder) { + final Integer broker0 = 0; + final Integer broker1 = 1; + final String topicA = "topicA"; + final Integer p0 = 0; + final Integer partitionCount = 1; + final Integer replicationFactor = 2; + final Integer maxBatchCountPerSegment = 2; + final Map> replicaAssignment = mkMap(mkEntry(p0, Arrays.asList(broker0, broker1))); + final boolean enableRemoteLogStorage = true; + final int beginEpoch = 0; + final long startOffset = 3; + final long beforeOffset = 3L; + final long beforeOffset1 = 7L; + + // Create topicA with 1 partition and 2 RF + builder.createTopic(topicA, partitionCount, replicationFactor, maxBatchCountPerSegment, replicaAssignment, + enableRemoteLogStorage) + // produce events to partition 0 and expect 2 segments to be offloaded + .expectSegmentToBeOffloaded(broker0, topicA, p0, 0, new KeyValueSpec("k0", "v0"), + new KeyValueSpec("k1", "v1")) + .expectSegmentToBeOffloaded(broker0, topicA, p0, 2, new KeyValueSpec("k2", "v2"), + new KeyValueSpec("k3", "v3")) + .expectEarliestLocalOffsetInLogDirectory(topicA, p0, 4L) + .produce(topicA, p0, new KeyValueSpec("k0", "v0"), new KeyValueSpec("k1", "v1"), + new KeyValueSpec("k2", "v2"), new KeyValueSpec("k3", "v3"), new KeyValueSpec("k4", "v4")) + // Use DELETE_RECORDS API to delete the records upto offset 3 and expect one remote segment to be deleted + .expectDeletionInRemoteStorage(broker0, topicA, p0, DELETE_SEGMENT, 1) + .deleteRecords(topicA, p0, beforeOffset) + // expect that the leader epoch checkpoint is updated + // Comment out this line if it's FLAKY since the leader-epoch is not deterministic in ZK mode. + .expectLeaderEpochCheckpoint(broker0, topicA, p0, beginEpoch, startOffset) + // consume from the offset-3 of the topic to read data from local and remote storage + .expectFetchFromTieredStorage(broker0, topicA, p0, 1) + .consume(topicA, p0, 3L, 2, 1) + + // switch leader to change the leader-epoch from 0 to 1 + .expectLeader(topicA, p0, broker1, true) + // produce some more messages and move the log-start-offset such that earliest-epoch changes from 0 to 1 + .expectSegmentToBeOffloaded(broker1, topicA, p0, 4, new KeyValueSpec("k4", "v4"), + new KeyValueSpec("k5", "v5")) + .expectSegmentToBeOffloaded(broker1, topicA, p0, 6, new KeyValueSpec("k6", "v6"), + new KeyValueSpec("k7", "v7")) + .expectEarliestLocalOffsetInLogDirectory(topicA, p0, 8L) + .produce(topicA, p0, new KeyValueSpec("k5", "v5"), new KeyValueSpec("k6", "v6"), + new KeyValueSpec("k7", "v7"), new KeyValueSpec("k8", "v8"), new KeyValueSpec("k9", "v9")) + // Use DELETE_RECORDS API to delete the records upto offset 7 and expect 2 remote segments to be deleted + .expectDeletionInRemoteStorage(broker1, topicA, p0, DELETE_SEGMENT, 2) + .deleteRecords(topicA, p0, beforeOffset1) + // consume from the topic with fetch-offset 7 to read data from local and remote storage + .expectFetchFromTieredStorage(broker1, topicA, p0, 1) + .consume(topicA, p0, 7L, 3, 1); + } +} diff --git a/tests/kafkatest/services/kafka/kafka.py b/tests/kafkatest/services/kafka/kafka.py index e67bde8571ce2..ab48d1091d73b 100644 --- a/tests/kafkatest/services/kafka/kafka.py +++ b/tests/kafkatest/services/kafka/kafka.py @@ -1777,8 +1777,7 @@ def get_offset_shell(self, time=None, topic=None, partitions=None, topic_partiti node = self.nodes[0] cmd = fix_opts_for_new_jvm(node) - cmd += self.path.script("kafka-run-class.sh", node) - cmd += " kafka.tools.GetOffsetShell" + cmd += self.path.script("kafka-get-offsets.sh", node) cmd += " --bootstrap-server %s" % self.bootstrap_servers(self.security_protocol) if time: diff --git a/tools/src/main/java/org/apache/kafka/tools/GetOffsetShell.java b/tools/src/main/java/org/apache/kafka/tools/GetOffsetShell.java new file mode 100644 index 0000000000000..99551ca0545a4 --- /dev/null +++ b/tools/src/main/java/org/apache/kafka/tools/GetOffsetShell.java @@ -0,0 +1,396 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.tools; + +import joptsimple.OptionException; +import joptsimple.OptionSpec; +import joptsimple.OptionSpecBuilder; +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.admin.ListOffsetsResult; +import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo; +import org.apache.kafka.clients.admin.ListTopicsOptions; +import org.apache.kafka.clients.admin.OffsetSpec; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.requests.ListOffsetsRequest; +import org.apache.kafka.common.requests.ListOffsetsResponse; +import org.apache.kafka.common.utils.Exit; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.server.util.CommandDefaultOptions; +import org.apache.kafka.server.util.CommandLineUtils; +import org.apache.kafka.server.util.PartitionFilter; +import org.apache.kafka.server.util.PartitionFilter.PartitionRangeFilter; +import org.apache.kafka.server.util.PartitionFilter.PartitionsSetFilter; +import org.apache.kafka.server.util.PartitionFilter.UniquePartitionFilter; +import org.apache.kafka.server.util.TopicFilter.IncludeList; +import org.apache.kafka.server.util.TopicPartitionFilter; +import org.apache.kafka.server.util.TopicPartitionFilter.CompositeTopicPartitionFilter; +import org.apache.kafka.server.util.TopicPartitionFilter.TopicFilterAndPartitionFilter; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.ExecutionException; +import java.util.function.IntFunction; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +public class GetOffsetShell { + private static final Pattern TOPIC_PARTITION_PATTERN = Pattern.compile("([^:,]*)(?::(?:([0-9]*)|(?:([0-9]*)-([0-9]*))))?"); + + public static void main(String... args) { + Exit.exit(mainNoExit(args)); + } + + static int mainNoExit(String... args) { + try { + execute(args); + return 0; + } catch (TerseException e) { + System.err.println("Error occurred: " + e.getMessage()); + return 1; + } catch (Throwable e) { + System.err.println("Error occurred: " + e.getMessage()); + System.err.println(Utils.stackTrace(e)); + return 1; + } + } + + static void execute(String... args) throws IOException, ExecutionException, InterruptedException, TerseException { + GetOffsetShell getOffsetShell = new GetOffsetShell(); + + GetOffsetShellOptions options = new GetOffsetShellOptions(args); + + Map partitionOffsets = getOffsetShell.fetchOffsets(options); + + for (Map.Entry entry : partitionOffsets.entrySet()) { + TopicPartition topic = entry.getKey(); + + System.out.println(String.join(":", new String[]{topic.topic(), String.valueOf(topic.partition()), entry.getValue().toString()})); + } + } + + private static class GetOffsetShellOptions extends CommandDefaultOptions { + private final OptionSpec brokerListOpt; + private final OptionSpec bootstrapServerOpt; + private final OptionSpec topicPartitionsOpt; + private final OptionSpec topicOpt; + private final OptionSpec partitionsOpt; + private final OptionSpec timeOpt; + private final OptionSpec commandConfigOpt; + private final OptionSpec effectiveBrokerListOpt; + private final OptionSpecBuilder excludeInternalTopicsOpt; + + public GetOffsetShellOptions(String[] args) throws TerseException { + super(args); + + brokerListOpt = parser.accepts("broker-list", "DEPRECATED, use --bootstrap-server instead; ignored if --bootstrap-server is specified. The server(s) to connect to in the form HOST1:PORT1,HOST2:PORT2.") + .withRequiredArg() + .describedAs("HOST1:PORT1,...,HOST3:PORT3") + .ofType(String.class); + bootstrapServerOpt = parser.accepts("bootstrap-server", "REQUIRED. The server(s) to connect to in the form HOST1:PORT1,HOST2:PORT2.") + .requiredUnless("broker-list") + .withRequiredArg() + .describedAs("HOST1:PORT1,...,HOST3:PORT3") + .ofType(String.class); + topicPartitionsOpt = parser.accepts("topic-partitions", "Comma separated list of topic-partition patterns to get the offsets for, with the format of '" + TOPIC_PARTITION_PATTERN + "'." + + " The first group is an optional regex for the topic name, if omitted, it matches any topic name." + + " The section after ':' describes a 'partition' pattern, which can be: a number, a range in the format of 'NUMBER-NUMBER' (lower inclusive, upper exclusive), an inclusive lower bound in the format of 'NUMBER-', an exclusive upper bound in the format of '-NUMBER' or may be omitted to accept all partitions.") + .withRequiredArg() + .describedAs("topic1:1,topic2:0-3,topic3,topic4:5-,topic5:-3") + .ofType(String.class); + topicOpt = parser.accepts("topic", "The topic to get the offsets for. It also accepts a regular expression. If not present, all authorized topics are queried. Cannot be used if --topic-partitions is present.") + .withRequiredArg() + .describedAs("topic") + .ofType(String.class); + partitionsOpt = parser.accepts("partitions", "Comma separated list of partition ids to get the offsets for. If not present, all partitions of the authorized topics are queried. Cannot be used if --topic-partitions is present.") + .withRequiredArg() + .describedAs("partition ids") + .ofType(String.class); + timeOpt = parser.accepts("time", "timestamp of the offsets before that. [Note: No offset is returned, if the timestamp greater than recently committed record timestamp is given.]") + .withRequiredArg() + .describedAs(" / -1 or latest / -2 or earliest / -3 or max-timestamp") + .ofType(String.class) + .defaultsTo("latest"); + commandConfigOpt = parser.accepts("command-config", "Property file containing configs to be passed to Admin Client.") + .withRequiredArg() + .describedAs("config file") + .ofType(String.class); + excludeInternalTopicsOpt = parser.accepts("exclude-internal-topics", "By default, internal topics are included. If specified, internal topics are excluded."); + + if (args.length == 0) { + CommandLineUtils.printUsageAndExit(parser, "An interactive shell for getting topic-partition offsets."); + } + + try { + options = parser.parse(args); + } catch (OptionException e) { + throw new TerseException(e.getMessage()); + } + + if (options.has(bootstrapServerOpt)) { + effectiveBrokerListOpt = bootstrapServerOpt; + } else { + effectiveBrokerListOpt = brokerListOpt; + } + + CommandLineUtils.checkRequiredArgs(parser, options, effectiveBrokerListOpt); + + String brokerList = options.valueOf(effectiveBrokerListOpt); + + try { + ToolsUtils.validateBootstrapServer(brokerList); + } catch (IllegalArgumentException e) { + CommandLineUtils.printUsageAndExit(parser, e.getMessage()); + } + } + + public boolean hasTopicPartitionsOpt() { + return options.has(topicPartitionsOpt); + } + + public String topicPartitionsOpt() { + return options.valueOf(topicPartitionsOpt); + } + + public boolean hasTopicOpt() { + return options.has(topicOpt); + } + + public String topicOpt() { + return options.valueOf(topicOpt); + } + + public boolean hasPartitionsOpt() { + return options.has(partitionsOpt); + } + + public String partitionsOpt() { + return options.valueOf(partitionsOpt); + } + + public String timeOpt() { + return options.valueOf(timeOpt); + } + + public boolean hasCommandConfigOpt() { + return options.has(commandConfigOpt); + } + + public String commandConfigOpt() { + return options.valueOf(commandConfigOpt); + } + + public String effectiveBrokerListOpt() { + return options.valueOf(effectiveBrokerListOpt); + } + + public boolean hasExcludeInternalTopicsOpt() { + return options.has(excludeInternalTopicsOpt); + } + } + + public Map fetchOffsets(GetOffsetShellOptions options) throws IOException, ExecutionException, InterruptedException, TerseException { + String clientId = "GetOffsetShell"; + String brokerList = options.effectiveBrokerListOpt(); + + if (options.hasTopicPartitionsOpt() && (options.hasTopicOpt() || options.hasPartitionsOpt())) { + throw new TerseException("--topic-partitions cannot be used with --topic or --partitions"); + } + + boolean excludeInternalTopics = options.hasExcludeInternalTopicsOpt(); + OffsetSpec offsetSpec = parseOffsetSpec(options.timeOpt()); + + TopicPartitionFilter topicPartitionFilter; + + if (options.hasTopicPartitionsOpt()) { + topicPartitionFilter = createTopicPartitionFilterWithPatternList(options.topicPartitionsOpt()); + } else { + topicPartitionFilter = createTopicPartitionFilterWithTopicAndPartitionPattern(options.topicOpt(), options.partitionsOpt()); + } + + Properties config = options.hasCommandConfigOpt() ? Utils.loadProps(options.commandConfigOpt()) : new Properties(); + config.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); + config.setProperty(AdminClientConfig.CLIENT_ID_CONFIG, clientId); + + try (Admin adminClient = Admin.create(config)) { + List partitionInfos = listPartitionInfos(adminClient, topicPartitionFilter, excludeInternalTopics); + + if (partitionInfos.isEmpty()) { + throw new TerseException("Could not match any topic-partitions with the specified filters"); + } + + Map timestampsToSearch = partitionInfos.stream().collect(Collectors.toMap(tp -> tp, tp -> offsetSpec)); + + ListOffsetsResult listOffsetsResult = adminClient.listOffsets(timestampsToSearch); + + TreeMap partitionOffsets = new TreeMap<>(Comparator.comparing(TopicPartition::toString)); + + for (TopicPartition partition : partitionInfos) { + ListOffsetsResultInfo partitionInfo; + + try { + partitionInfo = listOffsetsResult.partitionResult(partition).get(); + } catch (ExecutionException e) { + if (e.getCause() instanceof KafkaException) { + System.err.println("Skip getting offsets for topic-partition " + partition.toString() + " due to error: " + e.getMessage()); + } else { + throw e; + } + + continue; + } + + if (partitionInfo.offset() != ListOffsetsResponse.UNKNOWN_OFFSET) { + partitionOffsets.put(partition, partitionInfo.offset()); + } + } + + return partitionOffsets; + } + } + + private OffsetSpec parseOffsetSpec(String listOffsetsTimestamp) throws TerseException { + switch (listOffsetsTimestamp) { + case "earliest": + return OffsetSpec.earliest(); + case "latest": + return OffsetSpec.latest(); + case "max-timestamp": + return OffsetSpec.maxTimestamp(); + default: + long timestamp; + + try { + timestamp = Long.parseLong(listOffsetsTimestamp); + } catch (NumberFormatException e) { + throw new TerseException("Malformed time argument " + listOffsetsTimestamp + ". " + + "Please use -1 or latest / -2 or earliest / -3 or max-timestamp, or a specified long format timestamp"); + } + + if (timestamp == ListOffsetsRequest.EARLIEST_TIMESTAMP) { + return OffsetSpec.earliest(); + } else if (timestamp == ListOffsetsRequest.LATEST_TIMESTAMP) { + return OffsetSpec.latest(); + } else if (timestamp == ListOffsetsRequest.MAX_TIMESTAMP) { + return OffsetSpec.maxTimestamp(); + } else { + return OffsetSpec.forTimestamp(timestamp); + } + } + } + + /** + * Creates a topic-partition filter based on a list of patterns. + * Expected format: + * List: TopicPartitionPattern(, TopicPartitionPattern)* + * TopicPartitionPattern: TopicPattern(:PartitionPattern)? | :PartitionPattern + * TopicPattern: REGEX + * PartitionPattern: NUMBER | NUMBER-(NUMBER)? | -NUMBER + */ + public TopicPartitionFilter createTopicPartitionFilterWithPatternList(String topicPartitions) { + List ruleSpecs = Arrays.asList(topicPartitions.split(",")); + List rules = ruleSpecs.stream().map(ruleSpec -> { + try { + return parseRuleSpec(ruleSpec); + } catch (TerseException e) { + throw new RuntimeException(e); + } + }).collect(Collectors.toList()); + + return new CompositeTopicPartitionFilter(rules); + } + + /** + * Creates a topic-partition filter based on a topic pattern and a set of partition ids. + */ + public TopicPartitionFilter createTopicPartitionFilterWithTopicAndPartitionPattern(String topicOpt, String partitionIds) throws TerseException { + return new TopicFilterAndPartitionFilter( + new IncludeList(topicOpt != null ? topicOpt : ".*"), + new PartitionsSetFilter(createPartitionSet(partitionIds)) + ); + } + + private Set createPartitionSet(String partitionsString) throws TerseException { + Set partitions; + + if (partitionsString == null || partitionsString.isEmpty()) { + partitions = Collections.emptySet(); + } else { + try { + partitions = Arrays.stream(partitionsString.split(",")).map(Integer::parseInt).collect(Collectors.toSet()); + } catch (NumberFormatException e) { + throw new TerseException("--partitions expects a comma separated list of numeric " + + "partition ids, but received: " + partitionsString); + } + } + + return partitions; + } + + /** + * Return the partition infos. Filter them with topicPartitionFilter. + */ + private List listPartitionInfos( + Admin client, + TopicPartitionFilter topicPartitionFilter, + boolean excludeInternalTopics + ) throws ExecutionException, InterruptedException { + ListTopicsOptions listTopicsOptions = new ListTopicsOptions().listInternal(!excludeInternalTopics); + Set topics = client.listTopics(listTopicsOptions).names().get(); + Set filteredTopics = topics.stream().filter(topicPartitionFilter::isTopicAllowed).collect(Collectors.toSet()); + + return client.describeTopics(filteredTopics).allTopicNames().get().entrySet().stream().flatMap( + topic -> topic.getValue().partitions().stream().map( + tp -> new TopicPartition(topic.getKey(), tp.partition()) + ).filter(topicPartitionFilter::isTopicPartitionAllowed) + ).collect(Collectors.toList()); + } + + private TopicPartitionFilter parseRuleSpec(String ruleSpec) throws TerseException, RuntimeException { + Matcher matcher = TOPIC_PARTITION_PATTERN.matcher(ruleSpec); + + if (!matcher.matches()) + throw new TerseException("Invalid rule specification: " + ruleSpec); + + IntFunction group = (int g) -> (matcher.group(g) != null && !matcher.group(g).isEmpty()) ? matcher.group(g) : null; + + IncludeList topicFilter = group.apply(1) != null ? new IncludeList(group.apply(1)) : new IncludeList(".*"); + + PartitionFilter partitionFilter; + + if (group.apply(2) != null) { + partitionFilter = new UniquePartitionFilter(Integer.parseInt(group.apply(2))); + } else { + int lowerRange = group.apply(3) != null ? Integer.parseInt(group.apply(3)) : 0; + int upperRange = group.apply(4) != null ? Integer.parseInt(group.apply(4)) : Integer.MAX_VALUE; + + partitionFilter = new PartitionRangeFilter(lowerRange, upperRange); + } + + return new TopicPartitionFilter.TopicFilterAndPartitionFilter(topicFilter, partitionFilter); + } +} diff --git a/tools/src/main/java/org/apache/kafka/tools/ReplicaVerificationTool.java b/tools/src/main/java/org/apache/kafka/tools/ReplicaVerificationTool.java index 446c5fd67bf9d..c3e1cc1f7a080 100644 --- a/tools/src/main/java/org/apache/kafka/tools/ReplicaVerificationTool.java +++ b/tools/src/main/java/org/apache/kafka/tools/ReplicaVerificationTool.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.tools; -import joptsimple.OptionParser; import joptsimple.OptionSpec; import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.ClientRequest; @@ -62,7 +61,6 @@ import java.net.SocketTimeoutException; import java.text.SimpleDateFormat; -import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; @@ -312,33 +310,19 @@ private static class ReplicaVerificationToolOptions extends CommandDefaultOption } CommandLineUtils.checkRequiredArgs(parser, options, brokerListOpt); CommandLineUtils.checkInvalidArgs(parser, options, topicsIncludeOpt, topicWhiteListOpt); + } String brokerHostsAndPorts() { String brokerList = options.valueOf(brokerListOpt); - validateBrokerList(parser, brokerList); - return brokerList; - } - void validateBrokerList(OptionParser parser, String brokerList) { - if (parser == null || brokerList == null) { - throw new RuntimeException("No option parser or broker list found"); - } - if (brokerList.isEmpty()) { - CommandLineUtils.printUsageAndExit(parser, "Empty broker list option"); + try { + ToolsUtils.validateBootstrapServer(brokerList); + } catch (IllegalArgumentException e) { + CommandLineUtils.printUsageAndExit(parser, e.getMessage()); } - String[] hostPorts; - if (brokerList.contains(",")) hostPorts = brokerList.split(","); - else hostPorts = new String[]{brokerList}; - - String[] validHostPort = Arrays.stream(hostPorts) - .filter(hostPortData -> Utils.getPort(hostPortData) != null) - .toArray(String[]::new); - - if (validHostPort.length == 0 || validHostPort.length != hostPorts.length) { - CommandLineUtils.printUsageAndExit(parser, "Invalid broker list option"); - } + return brokerList; } TopicFilter.IncludeList topicsIncludeFilter() { diff --git a/tools/src/main/java/org/apache/kafka/tools/ToolsUtils.java b/tools/src/main/java/org/apache/kafka/tools/ToolsUtils.java index 1a391f33d673a..794f1022293c2 100644 --- a/tools/src/main/java/org/apache/kafka/tools/ToolsUtils.java +++ b/tools/src/main/java/org/apache/kafka/tools/ToolsUtils.java @@ -18,8 +18,10 @@ import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.utils.Utils; import java.io.PrintStream; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.TreeMap; @@ -99,4 +101,26 @@ public static void prettyPrintTable( printRow(columnLengths, headers, out); rows.forEach(row -> printRow(columnLengths, row, out)); } + + public static void validateBootstrapServer(String hostPort) throws IllegalArgumentException { + if (hostPort == null || hostPort.trim().isEmpty()) { + throw new IllegalArgumentException("Error while validating the bootstrap address\n"); + } + + String[] hostPorts; + + if (hostPort.contains(",")) { + hostPorts = hostPort.split(","); + } else { + hostPorts = new String[] {hostPort}; + } + + String[] validHostPort = Arrays.stream(hostPorts) + .filter(hostPortData -> Utils.getPort(hostPortData) != null) + .toArray(String[]::new); + + if (validHostPort.length == 0 || validHostPort.length != hostPorts.length) { + throw new IllegalArgumentException("Please provide valid host:port like host1:9091,host2:9092\n"); + } + } } diff --git a/tools/src/test/java/org/apache/kafka/tools/FeatureCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/FeatureCommandTest.java index d0511d9cb1831..2391aeb685482 100644 --- a/tools/src/test/java/org/apache/kafka/tools/FeatureCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/FeatureCommandTest.java @@ -68,7 +68,7 @@ public void testDescribeWithKRaft() { ); // Change expected message to reflect latest MetadataVersion (SupportedMaxVersion increases when adding a new version) assertEquals("Feature: metadata.version\tSupportedMinVersion: 3.0-IV1\t" + - "SupportedMaxVersion: 3.6-IV2\tFinalizedVersionLevel: 3.3-IV1\t", outputWithoutEpoch(commandOutput)); + "SupportedMaxVersion: 3.7-IV0\tFinalizedVersionLevel: 3.3-IV1\t", outputWithoutEpoch(commandOutput)); } @ClusterTest(clusterType = Type.ZK, metadataVersion = MetadataVersion.IBP_3_3_IV1) @@ -127,7 +127,7 @@ public void testDowngradeMetadataVersionWithKRaft() { ); // Change expected message to reflect possible MetadataVersion range 1-N (N increases when adding a new version) assertEquals("Could not disable metadata.version. Invalid update version 0 for feature " + - "metadata.version. Local controller 3000 only supports versions 1-14", commandOutput); + "metadata.version. Local controller 3000 only supports versions 1-15", commandOutput); commandOutput = ToolsTestUtils.captureStandardOut(() -> assertEquals(1, FeatureCommand.mainNoExit("--bootstrap-server", cluster.bootstrapServers(), diff --git a/tools/src/test/java/org/apache/kafka/tools/GetOffsetShellParsingTest.java b/tools/src/test/java/org/apache/kafka/tools/GetOffsetShellParsingTest.java new file mode 100644 index 0000000000000..9980a471c37da --- /dev/null +++ b/tools/src/test/java/org/apache/kafka/tools/GetOffsetShellParsingTest.java @@ -0,0 +1,248 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.tools; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.server.util.TopicPartitionFilter; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class GetOffsetShellParsingTest { + GetOffsetShell getOffsetShell = new GetOffsetShell(); + + @Test + public void testTopicPartitionFilterForTopicName() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList("test"); + + assertTrue(topicPartitionFilter.isTopicAllowed("test")); + assertFalse(topicPartitionFilter.isTopicAllowed("test1")); + assertFalse(topicPartitionFilter.isTopicAllowed("__consumer_offsets")); + + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 0))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 1))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test1", 0))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 0))); + } + + @Test + public void testTopicPartitionFilterForInternalTopicName() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList("__consumer_offsets"); + + assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")); + assertFalse(topicPartitionFilter.isTopicAllowed("test1")); + assertFalse(topicPartitionFilter.isTopicAllowed("test2")); + + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 0))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 1))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test1", 0))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test2", 0))); + } + + + @Test + public void testTopicPartitionFilterForTopicNameList() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList("test,test1,__consumer_offsets"); + + assertTrue(topicPartitionFilter.isTopicAllowed("test")); + assertTrue(topicPartitionFilter.isTopicAllowed("test1")); + assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")); + assertFalse(topicPartitionFilter.isTopicAllowed("test2")); + + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 0))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test1", 1))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 0))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test2", 0))); + } + + + @Test + public void testTopicPartitionFilterForRegex() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList("test.*"); + + assertTrue(topicPartitionFilter.isTopicAllowed("test")); + assertTrue(topicPartitionFilter.isTopicAllowed("test1")); + assertTrue(topicPartitionFilter.isTopicAllowed("test2")); + assertFalse(topicPartitionFilter.isTopicAllowed("__consumer_offsets")); + + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 0))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test1", 1))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test2", 0))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 0))); + } + + @Test + public void testTopicPartitionFilterForPartitionIndexSpec() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList(":0"); + + assertTrue(topicPartitionFilter.isTopicAllowed("test")); + assertTrue(topicPartitionFilter.isTopicAllowed("test1")); + assertTrue(topicPartitionFilter.isTopicAllowed("test2")); + assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")); + + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 0))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test1", 0))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test2", 1))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 0))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 1))); + } + + @Test + public void testTopicPartitionFilterForPartitionRangeSpec() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList(":1-3"); + + assertTrue(topicPartitionFilter.isTopicAllowed("test")); + assertTrue(topicPartitionFilter.isTopicAllowed("test1")); + assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")); + assertTrue(topicPartitionFilter.isTopicAllowed("test2")); + + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 1))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test1", 2))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 2))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test2", 0))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test2", 3))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 3))); + } + + @Test + public void testTopicPartitionFilterForPartitionLowerBoundSpec() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList(":1-"); + + assertTrue(topicPartitionFilter.isTopicAllowed("test")); + assertTrue(topicPartitionFilter.isTopicAllowed("test1")); + assertTrue(topicPartitionFilter.isTopicAllowed("test2")); + assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")); + + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 1))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test1", 2))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 2))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test2", 0))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 0))); + } + + @Test + public void testTopicPartitionFilterForPartitionUpperBoundSpec() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList(":-3"); + assertTrue(topicPartitionFilter.isTopicAllowed("test")); + assertTrue(topicPartitionFilter.isTopicAllowed("test1")); + assertTrue(topicPartitionFilter.isTopicAllowed("test2")); + assertTrue(topicPartitionFilter.isTopicAllowed("test3")); + assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")); + + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 0))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test1", 1))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test2", 2))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 2))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test3", 3))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 3))); + } + + @Test + public void testTopicPartitionFilterComplex() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList("test.*:0,__consumer_offsets:1-2,.*:3"); + + assertTrue(topicPartitionFilter.isTopicAllowed("test")); + assertTrue(topicPartitionFilter.isTopicAllowed("test1")); + assertTrue(topicPartitionFilter.isTopicAllowed("custom")); + assertTrue(topicPartitionFilter.isTopicAllowed("__consumer_offsets")); + + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 0))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test1", 0))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 1))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test1", 1))); + + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("custom", 3))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("custom", 0))); + + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 1))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 3))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 0))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("__consumer_offsets", 2))); + } + + @Test + public void testPartitionFilterForSingleIndex() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList(":1"); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 1))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 0))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 2))); + } + + @Test + public void testPartitionFilterForRange() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList(":1-3"); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 0))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 1))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 2))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 3))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 4))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 5))); + } + + @Test + public void testPartitionFilterForLowerBound() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList(":3-"); + + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 0))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 1))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 2))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 3))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 4))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 5))); + } + + @Test + public void testPartitionFilterForUpperBound() { + TopicPartitionFilter topicPartitionFilter = getOffsetShell.createTopicPartitionFilterWithPatternList(":-3"); + + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 0))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 1))); + assertTrue(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 2))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 3))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 4))); + assertFalse(topicPartitionFilter.isTopicPartitionAllowed(getTopicPartition("test", 5))); + } + + @Test + public void testPartitionsSetFilter() throws TerseException { + TopicPartitionFilter partitionsSetFilter = getOffsetShell.createTopicPartitionFilterWithTopicAndPartitionPattern("topic", "1,3,5"); + + assertFalse(partitionsSetFilter.isTopicPartitionAllowed(getTopicPartition("topic", 0))); + assertFalse(partitionsSetFilter.isTopicPartitionAllowed(getTopicPartition("topic", 2))); + assertFalse(partitionsSetFilter.isTopicPartitionAllowed(getTopicPartition("topic", 4))); + + assertFalse(partitionsSetFilter.isTopicPartitionAllowed(getTopicPartition("topic1", 1))); + assertFalse(partitionsSetFilter.isTopicAllowed("topic1")); + + assertTrue(partitionsSetFilter.isTopicPartitionAllowed(getTopicPartition("topic", 1))); + assertTrue(partitionsSetFilter.isTopicPartitionAllowed(getTopicPartition("topic", 3))); + assertTrue(partitionsSetFilter.isTopicPartitionAllowed(getTopicPartition("topic", 5))); + assertTrue(partitionsSetFilter.isTopicAllowed("topic")); + } + + @Test + public void testInvalidTimeValue() { + assertThrows(TerseException.class, () -> GetOffsetShell.execute("--bootstrap-server", "localhost:9092", "--time", "invalid")); + } + + private TopicPartition getTopicPartition(String topic, Integer partition) { + return new TopicPartition(topic, partition); + } +} diff --git a/tools/src/test/java/org/apache/kafka/tools/GetOffsetShellTest.java b/tools/src/test/java/org/apache/kafka/tools/GetOffsetShellTest.java new file mode 100644 index 0000000000000..417bbe711681a --- /dev/null +++ b/tools/src/test/java/org/apache/kafka/tools/GetOffsetShellTest.java @@ -0,0 +1,376 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.tools; + +import kafka.test.ClusterInstance; +import kafka.test.annotation.ClusterTest; +import kafka.test.annotation.ClusterTestDefaults; +import kafka.test.annotation.Type; +import kafka.test.junit.ClusterTestExtensions; +import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.utils.Exit; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.extension.ExtendWith; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.Properties; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@ExtendWith(value = ClusterTestExtensions.class) +@ClusterTestDefaults(clusterType = Type.ZK) +@Tag("integration") +public class GetOffsetShellTest { + private final int topicCount = 4; + private final int offsetTopicPartitionCount = 4; + private final ClusterInstance cluster; + + public GetOffsetShellTest(ClusterInstance cluster) { + this.cluster = cluster; + } + + private String getTopicName(int i) { + return "topic" + i; + } + + public void setUp() { + cluster.config().serverProperties().put("auto.create.topics.enable", false); + cluster.config().serverProperties().put("offsets.topic.replication.factor", "1"); + cluster.config().serverProperties().put("offsets.topic.num.partitions", String.valueOf(offsetTopicPartitionCount)); + + try (Admin admin = Admin.create(cluster.config().adminClientProperties())) { + List topics = new ArrayList<>(); + + IntStream.range(0, topicCount + 1).forEach(i -> topics.add(new NewTopic(getTopicName(i), i, (short) 1))); + + admin.createTopics(topics); + } + + Properties props = new Properties(); + props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, cluster.config().producerProperties().get("bootstrap.servers")); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + + try (KafkaProducer producer = new KafkaProducer<>(props)) { + IntStream.range(0, topicCount + 1) + .forEach(i -> IntStream.range(0, i * i) + .forEach(msgCount -> producer.send( + new ProducerRecord<>(getTopicName(i), msgCount % i, null, "val" + msgCount))) + ); + } + } + + static class Row { + private String name; + private int partition; + private Long timestamp; + + public Row(String name, int partition, Long timestamp) { + this.name = name; + this.partition = partition; + this.timestamp = timestamp; + } + + @Override + public boolean equals(Object o) { + if (o == this) return true; + + if (!(o instanceof Row)) return false; + + Row r = (Row) o; + + return name.equals(r.name) && partition == r.partition && Objects.equals(timestamp, r.timestamp); + } + + @Override + public int hashCode() { + return Objects.hash(name, partition, timestamp); + } + } + + @ClusterTest + public void testNoFilterOptions() { + setUp(); + + List output = executeAndParse(); + + assertEquals(expectedOffsetsWithInternal(), output); + } + + @ClusterTest + public void testInternalExcluded() { + setUp(); + + List output = executeAndParse("--exclude-internal-topics"); + + assertEquals(expectedTestTopicOffsets(), output); + } + + @ClusterTest + public void testTopicNameArg() { + setUp(); + + IntStream.range(1, topicCount + 1).forEach(i -> { + List offsets = executeAndParse("--topic", getTopicName(i)); + + assertEquals(expectedOffsetsForTopic(i), offsets, () -> "Offset output did not match for " + getTopicName(i)); + }); + } + + @ClusterTest + public void testTopicPatternArg() { + setUp(); + + List offsets = executeAndParse("--topic", "topic.*"); + + assertEquals(expectedTestTopicOffsets(), offsets); + } + + @ClusterTest + public void testPartitionsArg() { + setUp(); + + List offsets = executeAndParse("--partitions", "0,1"); + + assertEquals(expectedOffsetsWithInternal().stream().filter(r -> r.partition <= 1).collect(Collectors.toList()), offsets); + } + + @ClusterTest + public void testTopicPatternArgWithPartitionsArg() { + setUp(); + + List offsets = executeAndParse("--topic", "topic.*", "--partitions", "0,1"); + + assertEquals(expectedTestTopicOffsets().stream().filter(r -> r.partition <= 1).collect(Collectors.toList()), offsets); + } + + @ClusterTest + public void testTopicPartitionsArg() { + setUp(); + + List offsets = executeAndParse("--topic-partitions", "topic1:0,topic2:1,topic(3|4):2,__.*:3"); + List expected = Arrays.asList( + new Row("__consumer_offsets", 3, 0L), + new Row("topic1", 0, 1L), + new Row("topic2", 1, 2L), + new Row("topic3", 2, 3L), + new Row("topic4", 2, 4L) + ); + + assertEquals(expected, offsets); + } + + @ClusterTest + public void testGetLatestOffsets() { + setUp(); + + for (String time : new String[] {"-1", "latest"}) { + List offsets = executeAndParse("--topic-partitions", "topic.*:0", "--time", time); + List expected = Arrays.asList( + new Row("topic1", 0, 1L), + new Row("topic2", 0, 2L), + new Row("topic3", 0, 3L), + new Row("topic4", 0, 4L) + ); + + assertEquals(expected, offsets); + } + } + + @ClusterTest + public void testGetEarliestOffsets() { + setUp(); + + for (String time : new String[] {"-2", "earliest"}) { + List offsets = executeAndParse("--topic-partitions", "topic.*:0", "--time", time); + List expected = Arrays.asList( + new Row("topic1", 0, 0L), + new Row("topic2", 0, 0L), + new Row("topic3", 0, 0L), + new Row("topic4", 0, 0L) + ); + + assertEquals(expected, offsets); + } + } + + @ClusterTest + public void testGetOffsetsByMaxTimestamp() { + setUp(); + + for (String time : new String[] {"-3", "max-timestamp"}) { + List offsets = executeAndParse("--topic-partitions", "topic.*", "--time", time); + + offsets.forEach( + row -> assertTrue(row.timestamp >= 0 && row.timestamp <= Integer.parseInt(row.name.replace("topic", ""))) + ); + } + } + + @ClusterTest + public void testGetOffsetsByTimestamp() { + setUp(); + + String time = String.valueOf(System.currentTimeMillis() / 2); + + List offsets = executeAndParse("--topic-partitions", "topic.*:0", "--time", time); + List expected = Arrays.asList( + new Row("topic1", 0, 0L), + new Row("topic2", 0, 0L), + new Row("topic3", 0, 0L), + new Row("topic4", 0, 0L) + ); + + assertEquals(expected, offsets); + } + + @ClusterTest + public void testNoOffsetIfTimestampGreaterThanLatestRecord() { + setUp(); + + String time = String.valueOf(System.currentTimeMillis() * 2); + + List offsets = executeAndParse("--topic-partitions", "topic.*", "--time", time); + + assertEquals(new ArrayList(), offsets); + } + + @ClusterTest + public void testTopicPartitionsArgWithInternalExcluded() { + setUp(); + + List offsets = executeAndParse("--topic-partitions", "topic1:0,topic2:1,topic(3|4):2,__.*:3", "--exclude-internal-topics"); + List expected = Arrays.asList( + new Row("topic1", 0, 1L), + new Row("topic2", 1, 2L), + new Row("topic3", 2, 3L), + new Row("topic4", 2, 4L) + ); + + assertEquals(expected, offsets); + } + + @ClusterTest + public void testTopicPartitionsArgWithInternalIncluded() { + setUp(); + + List offsets = executeAndParse("--topic-partitions", "__.*:0"); + + assertEquals(Arrays.asList(new Row("__consumer_offsets", 0, 0L)), offsets); + } + + @ClusterTest + public void testTopicPartitionsNotFoundForNonExistentTopic() { + assertExitCodeIsOne("--topic", "some_nonexistent_topic"); + } + + @ClusterTest + public void testTopicPartitionsNotFoundForExcludedInternalTopic() { + assertExitCodeIsOne("--topic", "some_nonexistent_topic:*"); + } + + @ClusterTest + public void testTopicPartitionsNotFoundForNonMatchingTopicPartitionPattern() { + assertExitCodeIsOne("--topic-partitions", "__consumer_offsets", "--exclude-internal-topics"); + } + + @ClusterTest + public void testTopicPartitionsFlagWithTopicFlagCauseExit() { + assertExitCodeIsOne("--topic-partitions", "__consumer_offsets", "--topic", "topic1"); + } + + @ClusterTest + public void testTopicPartitionsFlagWithPartitionsFlagCauseExit() { + assertExitCodeIsOne("--topic-partitions", "__consumer_offsets", "--partitions", "0"); + } + + private void assertExitCodeIsOne(String... args) { + final int[] exitStatus = new int[1]; + + Exit.setExitProcedure((statusCode, message) -> { + exitStatus[0] = statusCode; + + throw new RuntimeException(); + }); + + try { + GetOffsetShell.main(addBootstrapServer(args)); + } catch (RuntimeException ignored) { + + } finally { + Exit.resetExitProcedure(); + } + + assertEquals(1, exitStatus[0]); + } + + private List expectedOffsetsWithInternal() { + List consOffsets = IntStream.range(0, offsetTopicPartitionCount + 1) + .mapToObj(i -> new Row("__consumer_offsets", i, 0L)) + .collect(Collectors.toList()); + + return Stream.concat(consOffsets.stream(), expectedTestTopicOffsets().stream()).collect(Collectors.toList()); + } + + private List expectedTestTopicOffsets() { + List offsets = new ArrayList<>(topicCount + 1); + + for (int i = 0; i < topicCount + 1; i++) { + offsets.addAll(expectedOffsetsForTopic(i)); + } + + return offsets; + } + + private List expectedOffsetsForTopic(int i) { + String name = getTopicName(i); + + return IntStream.range(0, i).mapToObj(p -> new Row(name, p, (long) i)).collect(Collectors.toList()); + } + + private List executeAndParse(String... args) { + String out = ToolsTestUtils.captureStandardOut(() -> GetOffsetShell.mainNoExit(addBootstrapServer(args))); + + return Arrays.stream(out.split(System.lineSeparator())) + .map(i -> i.split(":")) + .filter(i -> i.length >= 2) + .map(line -> new Row(line[0], Integer.parseInt(line[1]), (line.length == 2 || line[2].isEmpty()) ? null : Long.parseLong(line[2]))) + .collect(Collectors.toList()); + } + + private String[] addBootstrapServer(String... args) { + ArrayList newArgs = new ArrayList<>(Arrays.asList(args)); + newArgs.add("--bootstrap-server"); + newArgs.add(cluster.bootstrapServers()); + + return newArgs.toArray(new String[0]); + } +} diff --git a/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsCommandArgsTest.java b/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsCommandArgsTest.java new file mode 100644 index 0000000000000..7b267646d496b --- /dev/null +++ b/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsCommandArgsTest.java @@ -0,0 +1,292 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.tools.reassign; + +import kafka.admin.ReassignPartitionsCommand; +import org.apache.kafka.common.utils.Exit; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@Timeout(60) +public class ReassignPartitionsCommandArgsTest { + public static final String MISSING_BOOTSTRAP_SERVER_MSG = "Please specify --bootstrap-server"; + + @BeforeEach + public void setUp() { + Exit.setExitProcedure((statusCode, message) -> { + throw new IllegalArgumentException(message); + }); + } + + @AfterEach + public void tearDown() { + Exit.resetExitProcedure(); + } + + ///// Test valid argument parsing + @Test + public void shouldCorrectlyParseValidMinimumGenerateOptions() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--generate", + "--broker-list", "101,102", + "--topics-to-move-json-file", "myfile.json"}; + ReassignPartitionsCommand.validateAndParseArgs(args); + } + + @Test + public void shouldCorrectlyParseValidMinimumExecuteOptions() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--execute", + "--reassignment-json-file", "myfile.json"}; + ReassignPartitionsCommand.validateAndParseArgs(args); + } + + @Test + public void shouldCorrectlyParseValidMinimumVerifyOptions() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--verify", + "--reassignment-json-file", "myfile.json"}; + ReassignPartitionsCommand.validateAndParseArgs(args); + } + + @Test + public void shouldAllowThrottleOptionOnExecute() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--execute", + "--throttle", "100", + "--reassignment-json-file", "myfile.json"}; + ReassignPartitionsCommand.validateAndParseArgs(args); + } + + @Test + public void shouldUseDefaultsIfEnabled() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--execute", + "--reassignment-json-file", "myfile.json"}; + ReassignPartitionsCommand.ReassignPartitionsCommandOptions opts = ReassignPartitionsCommand.validateAndParseArgs(args); + assertEquals(10000L, opts.options.valueOf(opts.timeoutOpt())); + assertEquals(-1L, opts.options.valueOf(opts.interBrokerThrottleOpt())); + } + + @Test + public void testList() { + String[] args = new String[] { + "--list", + "--bootstrap-server", "localhost:1234"}; + ReassignPartitionsCommand.validateAndParseArgs(args); + } + + @Test + public void testCancelWithPreserveThrottlesOption() { + String[] args = new String[] { + "--cancel", + "--bootstrap-server", "localhost:1234", + "--reassignment-json-file", "myfile.json", + "--preserve-throttles"}; + ReassignPartitionsCommand.validateAndParseArgs(args); + } + + ///// Test handling missing or invalid actions + @Test + public void shouldFailIfNoArgs() { + String[] args = new String[0]; + shouldFailWith(ReassignPartitionsCommand.helpText(), args); + } + + @Test + public void shouldFailIfBlankArg() { + String[] args = new String[] {" "}; + shouldFailWith("Command must include exactly one action", args); + } + + @Test + public void shouldFailIfMultipleActions() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--execute", + "--verify", + "--reassignment-json-file", "myfile.json" + }; + shouldFailWith("Command must include exactly one action", args); + } + + ///// Test --execute + @Test + public void shouldNotAllowExecuteWithTopicsOption() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--execute", + "--reassignment-json-file", "myfile.json", + "--topics-to-move-json-file", "myfile.json"}; + shouldFailWith("Option \"[topics-to-move-json-file]\" can't be used with action \"[execute]\"", args); + } + + @Test + public void shouldNotAllowExecuteWithBrokerList() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--execute", + "--reassignment-json-file", "myfile.json", + "--broker-list", "101,102" + }; + shouldFailWith("Option \"[broker-list]\" can't be used with action \"[execute]\"", args); + } + + @Test + public void shouldNotAllowExecuteWithoutReassignmentOption() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--execute"}; + shouldFailWith("Missing required argument \"[reassignment-json-file]\"", args); + } + + @Test + public void testMissingBootstrapServerArgumentForExecute() { + String[] args = new String[] { + "--execute"}; + shouldFailWith(MISSING_BOOTSTRAP_SERVER_MSG, args); + } + + ///// Test --generate + @Test + public void shouldNotAllowGenerateWithoutBrokersAndTopicsOptions() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--generate"}; + shouldFailWith("Missing required argument \"[topics-to-move-json-file]\"", args); + } + + @Test + public void shouldNotAllowGenerateWithoutBrokersOption() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--topics-to-move-json-file", "myfile.json", + "--generate"}; + shouldFailWith("Missing required argument \"[broker-list]\"", args); + } + + @Test + public void shouldNotAllowGenerateWithoutTopicsOption() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--broker-list", "101,102", + "--generate"}; + shouldFailWith("Missing required argument \"[topics-to-move-json-file]\"", args); + } + + @Test + public void shouldNotAllowGenerateWithThrottleOption() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--generate", + "--broker-list", "101,102", + "--throttle", "100", + "--topics-to-move-json-file", "myfile.json"}; + shouldFailWith("Option \"[throttle]\" can't be used with action \"[generate]\"", args); + } + + @Test + public void shouldNotAllowGenerateWithReassignmentOption() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--generate", + "--broker-list", "101,102", + "--topics-to-move-json-file", "myfile.json", + "--reassignment-json-file", "myfile.json"}; + shouldFailWith("Option \"[reassignment-json-file]\" can't be used with action \"[generate]\"", args); + } + + @Test + public void shouldPrintHelpTextIfHelpArg() { + String[] args = new String[] {"--help"}; + // note, this is not actually a failed case, it's just we share the same `printUsageAndExit` method when wrong arg received + shouldFailWith(ReassignPartitionsCommand.helpText(), args); + } + + ///// Test --verify + @Test + public void shouldNotAllowVerifyWithoutReassignmentOption() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--verify"}; + shouldFailWith("Missing required argument \"[reassignment-json-file]\"", args); + } + + @Test + public void shouldNotAllowBrokersListWithVerifyOption() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--verify", + "--broker-list", "100,101", + "--reassignment-json-file", "myfile.json"}; + shouldFailWith("Option \"[broker-list]\" can't be used with action \"[verify]\"", args); + } + + @Test + public void shouldNotAllowThrottleWithVerifyOption() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--verify", + "--throttle", "100", + "--reassignment-json-file", "myfile.json"}; + shouldFailWith("Option \"[throttle]\" can't be used with action \"[verify]\"", args); + } + + @Test + public void shouldNotAllowTopicsOptionWithVerify() { + String[] args = new String[] { + "--bootstrap-server", "localhost:1234", + "--verify", + "--reassignment-json-file", "myfile.json", + "--topics-to-move-json-file", "myfile.json"}; + shouldFailWith("Option \"[topics-to-move-json-file]\" can't be used with action \"[verify]\"", args); + } + + private void shouldFailWith(String msg, String[] args) { + Throwable e = assertThrows(Exception.class, () -> ReassignPartitionsCommand.validateAndParseArgs(args), + () -> "Should have failed with [" + msg + "] but no failure occurred."); + assertTrue(e.getMessage().startsWith(msg), "Expected exception with message:\n[" + msg + "]\nbut was\n[" + e.getMessage() + "]"); + } + + ///// Test --cancel + @Test + public void shouldNotAllowCancelWithoutBootstrapServerOption() { + String[] args = new String[] { + "--cancel"}; + shouldFailWith(MISSING_BOOTSTRAP_SERVER_MSG, args); + } + + @Test + public void shouldNotAllowCancelWithoutReassignmentJsonFile() { + String[] args = new String[] { + "--cancel", + "--bootstrap-server", "localhost:1234", + "--preserve-throttles"}; + shouldFailWith("Missing required argument \"[reassignment-json-file]\"", args); + } +}