Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[SERIALIZATION] Serialize Borker info in Clusterinfo #1721

Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
132 changes: 113 additions & 19 deletions common/src/main/java/org/astraea/common/ByteUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,21 @@
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.astraea.common.admin.Broker;
import org.astraea.common.admin.ClusterInfo;
import org.astraea.common.admin.Config;
import org.astraea.common.admin.NodeInfo;
import org.astraea.common.admin.Replica;
import org.astraea.common.admin.Topic;
import org.astraea.common.admin.TopicPartition;
import org.astraea.common.generated.BeanObjectOuterClass;
import org.astraea.common.generated.ClusterInfoOuterClass;
import org.astraea.common.generated.PrimitiveOuterClass;
import org.astraea.common.generated.admin.BrokerOuterClass;
import org.astraea.common.generated.admin.ClusterInfoOuterClass;
import org.astraea.common.generated.admin.NodeInfoOuterClass;
import org.astraea.common.generated.admin.ReplicaOuterClass;
import org.astraea.common.generated.admin.TopicOuterClass;
import org.astraea.common.generated.admin.TopicPartitionOuterClass;
import org.astraea.common.metrics.BeanObject;

public final class ByteUtils {
Expand Down Expand Up @@ -185,21 +191,64 @@ public static byte[] toBytes(BeanObject value) {
public static byte[] toBytes(ClusterInfo value) {
return ClusterInfoOuterClass.ClusterInfo.newBuilder()
.setClusterId(value.clusterId())
.addAllNodeInfo(
value.nodes().stream()
.addAllBroker(
value.brokers().stream()
.map(
nodeInfo ->
ClusterInfoOuterClass.ClusterInfo.NodeInfo.newBuilder()
.setId(nodeInfo.id())
.setHost(nodeInfo.host())
.setPort(nodeInfo.port())
broker ->
BrokerOuterClass.Broker.newBuilder()
.setId(broker.id())
.setHost(broker.host())
.setPort(broker.port())
.setIsController(broker.isController())
.putAllConfig(broker.config().raw())
.addAllDatafolder(
broker.dataFolders().stream()
.map(
dataFolder ->
BrokerOuterClass.Broker.Datafolder.newBuilder()
.setPath(dataFolder.path())
.putAllPartitionSizes(
dataFolder.partitionSizes().entrySet().stream()
.collect(
Collectors.toMap(
entry -> entry.getKey().toString(),
Map.Entry::getValue)))
.putAllOrphanPartitionSizes(
dataFolder
.orphanPartitionSizes()
.entrySet()
.stream()
.collect(
Collectors.toMap(
entry -> entry.getKey().toString(),
Map.Entry::getValue)))
.build())
.toList())
.addAllTopicPartitions(
broker.topicPartitions().stream()
.map(
tp ->
TopicPartitionOuterClass.TopicPartition.newBuilder()
.setPartition(tp.partition())
.setTopic(tp.topic())
.build())
.toList())
.addAllTopicPartitionLeaders(
broker.topicPartitionLeaders().stream()
.map(
tp ->
TopicPartitionOuterClass.TopicPartition.newBuilder()
.setPartition(tp.partition())
.setTopic(tp.topic())
.build())
.toList())
.build())
.collect(Collectors.toList()))
.toList())
.addAllTopic(
value.topics().values().stream()
.map(
topicClass ->
ClusterInfoOuterClass.ClusterInfo.Topic.newBuilder()
TopicOuterClass.Topic.newBuilder()
.setName(topicClass.name())
.putAllConfig(topicClass.config().raw())
.setInternal(topicClass.internal())
Expand All @@ -208,16 +257,16 @@ public static byte[] toBytes(ClusterInfo value) {
.map(TopicPartition::partition)
.collect(Collectors.toList()))
.build())
.collect(Collectors.toList()))
.toList())
.addAllReplica(
value.replicas().stream()
.map(
replica ->
ClusterInfoOuterClass.ClusterInfo.Replica.newBuilder()
ReplicaOuterClass.Replica.newBuilder()
.setTopic(replica.topic())
.setPartition(replica.partition())
.setNodeInfo(
ClusterInfoOuterClass.ClusterInfo.NodeInfo.newBuilder()
NodeInfoOuterClass.NodeInfo.newBuilder()
.setId(replica.nodeInfo().id())
.setHost(replica.nodeInfo().host())
.setPort(replica.nodeInfo().port())
Expand All @@ -231,7 +280,7 @@ public static byte[] toBytes(ClusterInfo value) {
.setIsPreferredLeader(replica.isPreferredLeader())
.setPath(replica.path())
.build())
.collect(Collectors.toList()))
.toList())
.build()
.toByteArray();
}
Expand Down Expand Up @@ -326,10 +375,55 @@ public static ClusterInfo readClusterInfo(byte[] bytes) {
var outerClusterInfo = ClusterInfoOuterClass.ClusterInfo.parseFrom(bytes);
return ClusterInfo.of(
outerClusterInfo.getClusterId(),
outerClusterInfo.getNodeInfoList().stream()
outerClusterInfo.getBrokerList().stream()
.map(
nodeInfo -> NodeInfo.of(nodeInfo.getId(), nodeInfo.getHost(), nodeInfo.getPort()))
.collect(Collectors.toList()),
broker -> {
var host = broker.getHost();
var port = broker.getPort();
var id = broker.getId();
var isController = broker.getIsController();
var config = new Config(broker.getConfigMap());
var dataFolders =
broker.getDatafolderList().stream()
.map(
datafolder -> {
var path = datafolder.getPath();
var partitionSizes =
datafolder.getPartitionSizesMap().entrySet().stream()
.collect(
Collectors.toMap(
entry -> TopicPartition.of(entry.getKey()),
Map.Entry::getValue));
var orphanPartitionSizes =
datafolder.getOrphanPartitionSizesMap().entrySet().stream()
.collect(
Collectors.toMap(
entry -> TopicPartition.of(entry.getKey()),
Map.Entry::getValue));
return new Broker.DataFolder(
path, partitionSizes, orphanPartitionSizes);
})
.toList();
var topicPartitions =
broker.getTopicPartitionsList().stream()
.map(tp -> TopicPartition.of(tp.getTopic(), tp.getPartition()))
.collect(Collectors.toSet());
var topicPartitionLeaders =
broker.getTopicPartitionLeadersList().stream()
.map(tp -> TopicPartition.of(tp.getTopic(), tp.getPartition()))
.collect(Collectors.toSet());
return (NodeInfo)
new Broker(
id,
host,
port,
isController,
config,
dataFolders,
topicPartitions,
topicPartitionLeaders);
})
.toList(),
outerClusterInfo.getTopicList().stream()
.map(
protoTopic ->
Expand All @@ -341,7 +435,7 @@ public String name() {

@Override
public Config config() {
return Config.of(protoTopic.getConfigMap());
return new Config(protoTopic.getConfigMap());
}

@Override
Expand Down Expand Up @@ -377,7 +471,7 @@ public Set<TopicPartition> topicPartitions() {
.isPreferredLeader(replica.getIsPreferredLeader())
.path(replica.getPath())
.build())
.collect(Collectors.toList()));
.toList());
} catch (InvalidProtocolBufferException ex) {
throw new SerializationException(ex);
}
Expand Down
130 changes: 32 additions & 98 deletions common/src/main/java/org/astraea/common/admin/Broker.java
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,27 @@
import java.util.stream.Collectors;
import org.apache.kafka.common.requests.DescribeLogDirsResponse;

public interface Broker extends NodeInfo {
public record Broker(
int id,
String host,
int port,
boolean isController,
// config used by this node
Config config,
// the disk folder used to stored data by this node
List<DataFolder> dataFolders,
Set<TopicPartition> topicPartitions,
// partition leaders hosted by this broker
Set<TopicPartition> topicPartitionLeaders)
implements NodeInfo {

static Broker of(
boolean isController,
org.apache.kafka.common.Node nodeInfo,
Map<String, String> configs,
Map<String, DescribeLogDirsResponse.LogDirInfo> dirs,
Collection<org.apache.kafka.clients.admin.TopicDescription> topics) {
var config = Config.of(configs);
var config = new Config(configs);
var partitionsFromTopicDesc =
topics.stream()
.flatMap(
Expand Down Expand Up @@ -60,26 +72,9 @@ static Broker of(
tpAndSize -> !partitionsFromTopicDesc.contains(tpAndSize.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));

return (DataFolder)
new DataFolder() {

@Override
public String path() {
return path;
}

@Override
public Map<TopicPartition, Long> partitionSizes() {
return partitionSizes;
}

@Override
public Map<TopicPartition, Long> orphanPartitionSizes() {
return orphanPartitionSizes;
}
};
return new DataFolder(path, partitionSizes, orphanPartitionSizes);
})
.collect(Collectors.toList());
.toList();
var topicPartitionLeaders =
topics.stream()
.flatMap(
Expand All @@ -88,83 +83,22 @@ public Map<TopicPartition, Long> orphanPartitionSizes() {
.filter(p -> p.leader() != null && p.leader().id() == nodeInfo.id())
.map(p -> TopicPartition.of(topic.name(), p.partition())))
.collect(Collectors.toUnmodifiableSet());
return new Broker() {
@Override
public String host() {
return nodeInfo.host();
}

@Override
public int port() {
return nodeInfo.port();
}

@Override
public int id() {
return nodeInfo.id();
}

@Override
public boolean isController() {
return isController;
}

@Override
public Config config() {
return config;
}

@Override
public List<DataFolder> dataFolders() {
return folders;
}

@Override
public Set<TopicPartition> topicPartitions() {
return partitionsFromTopicDesc;
}

@Override
public Set<TopicPartition> topicPartitionLeaders() {
return topicPartitionLeaders;
}
};
return new Broker(
nodeInfo.id(),
nodeInfo.host(),
nodeInfo.port(),
isController,
config,
folders,
partitionsFromTopicDesc,
topicPartitionLeaders);
}

boolean isController();

/**
* @return config used by this node
*/
Config config();

/**
* @return the disk folder used to stored data by this node
*/
List<DataFolder> dataFolders();

Set<TopicPartition> topicPartitions();

/**
* @return partition leaders hosted by this broker
*/
Set<TopicPartition> topicPartitionLeaders();

interface DataFolder {

/**
* @return the path on the local disk
*/
String path();

/**
* @return topic partition hosed by this node and size of files
*/
Map<TopicPartition, Long> partitionSizes();

/**
* @return topic partition located by this node but not traced by cluster
*/
Map<TopicPartition, Long> orphanPartitionSizes();
}
public record DataFolder(
// the path on the local disk
String path,
// topic partition hosed by this node and size of files
Map<TopicPartition, Long> partitionSizes,
// topic partition located by this node but not traced by cluster
Map<TopicPartition, Long> orphanPartitionSizes) {}
}
Loading