diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 75b8009e7c..6a2d0b4ad9 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -57,8 +57,32 @@ jobs: with: submodules: 'true' - - name: Compile - run: dotnet build --no-incremental --configuration Release /p:Platform="Any CPU" src\KafkaBridge.sln + - name: Pre compile + run: dotnet build --no-incremental --configuration Release /p:Platform="Any CPU" src\net\KafkaBridge.sln + + - name: Set up Apache Maven Central + uses: actions/setup-java@v1 + with: # running setup-java again overwrites the settings.xml + java-version: 11 + server-id: ossrh # Value of the distributionManagement/repository/id field of the pom.xml + server-username: MAVEN_USERNAME # env variable for username in deploy + server-password: MAVEN_CENTRAL_TOKEN # env variable for token in deploy + gpg-private-key: ${{ secrets.MAVEN_GPG_PRIVATE_KEY }} # Value of the GPG private key to import + gpg-passphrase: MAVEN_GPG_PASSPHRASE # env variable for GPG private key passphrase + + - name: Install local file to be used within Javadoc plugin of generated POM + run: mvn install:install-file -DgroupId=JCOBridge -DartifactId=JCOBridge -Dversion=2.4.3 -Dpackaging=jar -Dfile=./bin/net5.0/JCOBridge.jar + shell: bash + + - name: Create Jars + run: mvn --file ./src/java/kafkabridge/pom.xml package + env: + MAVEN_USERNAME: ${{ secrets.MAVEN_USERNAME }} + MAVEN_CENTRAL_TOKEN: ${{ secrets.MAVEN_CENTRAL_TOKEN }} + MAVEN_GPG_PASSPHRASE: ${{ secrets.MAVEN_GPG_PASSPHRASE }} + + - name: Recompile to create nuget packages + run: dotnet build --no-incremental --configuration Release /p:Platform="Any CPU" src\net\KafkaBridge.sln - name: Clear documentation folder run: Remove-Item .\docs\* -Recurse -Force -Exclude _config.yml @@ -66,7 +90,7 @@ jobs: - name: Build documentation run: | choco install docfx - cd src\Documentation + cd src\net\Documentation docfx - uses: actions/upload-artifact@v2 diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index bed1d7d08d..792d3ed79e 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -25,8 +25,32 @@ jobs: with: submodules: 'true' - - name: Compile - run: dotnet build --no-incremental --configuration Release /p:Platform="Any CPU" src\KafkaBridge.sln + - name: Pre compile + run: dotnet build --no-incremental --configuration Release /p:Platform="Any CPU" src\net\KafkaBridge.sln + + - name: Set up Apache Maven Central + uses: actions/setup-java@v1 + with: # running setup-java again overwrites the settings.xml + java-version: 11 + server-id: ossrh # Value of the distributionManagement/repository/id field of the pom.xml + server-username: MAVEN_USERNAME # env variable for username in deploy + server-password: MAVEN_CENTRAL_TOKEN # env variable for token in deploy + gpg-private-key: ${{ secrets.MAVEN_GPG_PRIVATE_KEY }} # Value of the GPG private key to import + gpg-passphrase: MAVEN_GPG_PASSPHRASE # env variable for GPG private key passphrase + + - name: Install local file to be used within Javadoc plugin of generated POM + run: mvn install:install-file -DgroupId=JCOBridge -DartifactId=JCOBridge -Dversion=2.4.3 -Dpackaging=jar -Dfile=./bin/net5.0/JCOBridge.jar + shell: bash + + - name: Create Jars + run: mvn --file ./src/java/kafkabridge/pom.xml package + env: + MAVEN_USERNAME: ${{ secrets.MAVEN_USERNAME }} + MAVEN_CENTRAL_TOKEN: ${{ secrets.MAVEN_CENTRAL_TOKEN }} + MAVEN_GPG_PASSPHRASE: ${{ secrets.MAVEN_GPG_PASSPHRASE }} + + - name: Recompile to create nuget packages + run: dotnet build --no-incremental --configuration Release /p:Platform="Any CPU" src\net\KafkaBridge.sln - name: Clear documentation folder run: Remove-Item .\docs\* -Recurse -Force -Exclude _config.yml @@ -34,5 +58,5 @@ jobs: - name: Build documentation run: | choco install docfx - cd src\Documentation + cd src\net\Documentation docfx diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 78a2170f8d..9186919193 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -27,8 +27,32 @@ jobs: with: submodules: 'true' - - name: Build NuGet Packages - run: dotnet build --no-incremental --configuration Release /p:Platform="Any CPU" src\KafkaBridge.sln + - name: Pre compile + run: dotnet build --no-incremental --configuration Release /p:Platform="Any CPU" src\net\KafkaBridge.sln + + - name: Set up Apache Maven Central + uses: actions/setup-java@v1 + with: # running setup-java again overwrites the settings.xml + java-version: 11 + server-id: ossrh # Value of the distributionManagement/repository/id field of the pom.xml + server-username: MAVEN_USERNAME # env variable for username in deploy + server-password: MAVEN_CENTRAL_TOKEN # env variable for token in deploy + gpg-private-key: ${{ secrets.MAVEN_GPG_PRIVATE_KEY }} # Value of the GPG private key to import + gpg-passphrase: MAVEN_GPG_PASSPHRASE # env variable for GPG private key passphrase + + - name: Install local file to be used within Javadoc plugin of generated POM + run: mvn install:install-file -DgroupId=JCOBridge -DartifactId=JCOBridge -Dversion=2.4.3 -Dpackaging=jar -Dfile=./bin/net5.0/JCOBridge.jar + shell: bash + + - name: Create Jars + run: mvn --file ./src/java/kafkabridge/pom.xml package + env: + MAVEN_USERNAME: ${{ secrets.MAVEN_USERNAME }} + MAVEN_CENTRAL_TOKEN: ${{ secrets.MAVEN_CENTRAL_TOKEN }} + MAVEN_GPG_PASSPHRASE: ${{ secrets.MAVEN_GPG_PASSPHRASE }} + + - name: Recompile to create nuget packages + run: dotnet build --no-incremental --configuration Release /p:Platform="Any CPU" src\net\KafkaBridge.sln - uses: nuget/setup-nuget@v1 with: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3f218fa980..09911bce91 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,9 +14,15 @@ The project is organized in this folder structure: * **docs** (website) * **sc** - * **KafkaBridge** (The folder containing the source and project of the reflector) + * **config**: contains the configuration files copied from the oficial Apache Kafka delivery + * **java** + * **kafkabridge**: contains the JVM side implementation of some classes managed from .NET side; it is structured as a complete Maven project + * **net** + * **KafkaBridge**: The folder containing the source and project of the Apache Kafka files ported on .NET + * **KafkaBridgeCLI**: The folder containing the source and project of the CLI for Apache Kafka + * **templates**: The folder containing the source and project to generate the NuGet template package * **tests** - * **KafkaBridgeTest** (The folder containing the source and project of the KafkaBridge test) + * **KafkaBridgeTest**: The folder containing the source and project of the KafkaBridge test # How Can I Contribute? diff --git a/README.md b/README.md index 2e08b47408..3b7b236a8e 100644 --- a/README.md +++ b/README.md @@ -11,8 +11,8 @@ This project adheres to the Contributor [Covenant code of conduct](CODE_OF_CONDU This project aims to create a library able to direct access the features available in the [Apache Kafka binary distribution](https://kafka.apache.org/downloads). There are many client libraries written to manage communication with Apache Kafka. Conversely, this poject use directly the Java packages released from Apache Foundation giving more than one benefit: * all implemented features are availables; -* avoids protocol implementation from any third party; -* can access any feature made available. +* avoids protocol implementation from any third party; +* can access any feature made available from Apache Kafka: one of the most important one is Kafka Stream which does not have any C# implementation. The benefits comes from tow main points related to JCOBridge: * its ablitity to manage a direct access to the JVM from any .NET application: any Java / Scala class behind Apache Kafka can be directly managed; * using the dynamic code feature of JCOBridge it is possible to write a Java/Scala/Kotlin/etc seamless language directly inside a standard .NET application written in C#/VB.NET diff --git a/src/Documentation/articles/actualstate.md b/src/Documentation/articles/actualstate.md deleted file mode 100644 index fb443243fa..0000000000 --- a/src/Documentation/articles/actualstate.md +++ /dev/null @@ -1,8 +0,0 @@ -# KafkaBridge development state - -This release comes with few ready made classes: - -* The command line interface classes (i.e. the executables Apache Kafka classes), the ones available under the _bin_ folder of any Apache Kafka binary distribution, can be managed using the KafkaBridgeCLI, e.g. ConsoleConsumer, ConsoleProducer and so on. -* Basic Producer/Consumer classes -* Kafka Admin Client with basic topic creation/deletion and queries to the cluster -* Light version of Kafka stream diff --git a/src/Documentation/articles/usage.md b/src/Documentation/articles/usage.md deleted file mode 100644 index 8fbac596cd..0000000000 --- a/src/Documentation/articles/usage.md +++ /dev/null @@ -1,3 +0,0 @@ -# KafkaBridge usage - -TBD \ No newline at end of file diff --git a/src/Documentation/articles/usageCLI.md b/src/Documentation/articles/usageCLI.md deleted file mode 100644 index 8a175b244e..0000000000 --- a/src/Documentation/articles/usageCLI.md +++ /dev/null @@ -1,10 +0,0 @@ -# KafkaBridgeCLI usage - -To use the CLI interface (KafkaBridgeCLI) runs a command like the following: - -> KafkaBridgeCLI -ClassToRun ConsoleConsumer --bootstrap-server SERVER-ADDRESS:9093 --topic topic_name --from-beginning - -KafkaBridgeCLI accepts the following command-line switch: - -* **ClassToRun**: represents the class to be launched -* **KafkaLocation**: represents the path to the root folder of Apache Kafka binary distribution; default value consider that KafkaBridgeCLI is installed within the bin folder of Apache Kafka binary distribution; \ No newline at end of file diff --git a/src/KafkaBridge/BridgedClasses/Common/Config/TopicConfig.cs b/src/KafkaBridge/BridgedClasses/Common/Config/TopicConfig.cs deleted file mode 100644 index 06daf93105..0000000000 --- a/src/KafkaBridge/BridgedClasses/Common/Config/TopicConfig.cs +++ /dev/null @@ -1,26 +0,0 @@ -/* -* Copyright 2021 MASES s.r.l. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* Refer to LICENSE for more information. -*/ - -namespace MASES.KafkaBridge.Common.Config -{ - public class TopicConfig : JCOBridge.C2JBridge.JVMBridgeBase - { - public override bool IsStatic => true; - public override string ClassName => "org.apache.kafka.common.config.TopicConfig"; - } -} diff --git a/src/config/README.md b/src/config/README.md new file mode 100644 index 0000000000..970bb563a6 --- /dev/null +++ b/src/config/README.md @@ -0,0 +1,3 @@ +# Configuration files + +This folder contains the copy of the official configuration files from Apache Kafka delivery \ No newline at end of file diff --git a/src/config/connect-console-sink.properties b/src/config/connect-console-sink.properties new file mode 100644 index 0000000000..e240a8f0dd --- /dev/null +++ b/src/config/connect-console-sink.properties @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name=local-console-sink +connector.class=org.apache.kafka.connect.file.FileStreamSinkConnector +tasks.max=1 +topics=connect-test \ No newline at end of file diff --git a/src/config/connect-console-source.properties b/src/config/connect-console-source.properties new file mode 100644 index 0000000000..d0e20690e7 --- /dev/null +++ b/src/config/connect-console-source.properties @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name=local-console-source +connector.class=org.apache.kafka.connect.file.FileStreamSourceConnector +tasks.max=1 +topic=connect-test \ No newline at end of file diff --git a/src/config/connect-distributed.properties b/src/config/connect-distributed.properties new file mode 100644 index 0000000000..cedad9a682 --- /dev/null +++ b/src/config/connect-distributed.properties @@ -0,0 +1,89 @@ +## +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +# This file contains some of the configurations for the Kafka Connect distributed worker. This file is intended +# to be used with the examples, and some settings may differ from those used in a production system, especially +# the `bootstrap.servers` and those specifying replication factors. + +# A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. +bootstrap.servers=localhost:9092 + +# unique name for the cluster, used in forming the Connect cluster group. Note that this must not conflict with consumer group IDs +group.id=connect-cluster + +# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will +# need to configure these based on the format they want their data in when loaded from or stored into Kafka +key.converter=org.apache.kafka.connect.json.JsonConverter +value.converter=org.apache.kafka.connect.json.JsonConverter +# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply +# it to +key.converter.schemas.enable=true +value.converter.schemas.enable=true + +# Topic to use for storing offsets. This topic should have many partitions and be replicated and compacted. +# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create +# the topic before starting Kafka Connect if a specific topic configuration is needed. +# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value. +# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able +# to run this example on a single-broker cluster and so here we instead set the replication factor to 1. +offset.storage.topic=connect-offsets +offset.storage.replication.factor=1 +#offset.storage.partitions=25 + +# Topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated, +# and compacted topic. Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create +# the topic before starting Kafka Connect if a specific topic configuration is needed. +# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value. +# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able +# to run this example on a single-broker cluster and so here we instead set the replication factor to 1. +config.storage.topic=connect-configs +config.storage.replication.factor=1 + +# Topic to use for storing statuses. This topic can have multiple partitions and should be replicated and compacted. +# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create +# the topic before starting Kafka Connect if a specific topic configuration is needed. +# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value. +# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able +# to run this example on a single-broker cluster and so here we instead set the replication factor to 1. +status.storage.topic=connect-status +status.storage.replication.factor=1 +#status.storage.partitions=5 + +# Flush much faster than normal, which is useful for testing/debugging +offset.flush.interval.ms=10000 + +# List of comma-separated URIs the REST API will listen on. The supported protocols are HTTP and HTTPS. +# Specify hostname as 0.0.0.0 to bind to all interfaces. +# Leave hostname empty to bind to default interface. +# Examples of legal listener lists: HTTP://myhost:8083,HTTPS://myhost:8084" +#listeners=HTTP://:8083 + +# The Hostname & Port that will be given out to other workers to connect to i.e. URLs that are routable from other servers. +# If not set, it uses the value for "listeners" if configured. +#rest.advertised.host.name= +#rest.advertised.port= +#rest.advertised.listener= + +# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins +# (connectors, converters, transformations). The list should consist of top level directories that include +# any combination of: +# a) directories immediately containing jars with plugins and their dependencies +# b) uber-jars with plugins and their dependencies +# c) directories immediately containing the package directory structure of classes of plugins and their dependencies +# Examples: +# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors, +#plugin.path= diff --git a/src/config/connect-file-sink.properties b/src/config/connect-file-sink.properties new file mode 100644 index 0000000000..594ccc6e95 --- /dev/null +++ b/src/config/connect-file-sink.properties @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name=local-file-sink +connector.class=FileStreamSink +tasks.max=1 +file=test.sink.txt +topics=connect-test \ No newline at end of file diff --git a/src/config/connect-file-source.properties b/src/config/connect-file-source.properties new file mode 100644 index 0000000000..599cf4cb2a --- /dev/null +++ b/src/config/connect-file-source.properties @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name=local-file-source +connector.class=FileStreamSource +tasks.max=1 +file=test.txt +topic=connect-test \ No newline at end of file diff --git a/src/config/connect-log4j.properties b/src/config/connect-log4j.properties new file mode 100644 index 0000000000..157d5931b6 --- /dev/null +++ b/src/config/connect-log4j.properties @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +log4j.rootLogger=INFO, stdout, connectAppender + +# Send the logs to the console. +# +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + +# Send the logs to a file, rolling the file at midnight local time. For example, the `File` option specifies the +# location of the log files (e.g. ${kafka.logs.dir}/connect.log), and at midnight local time the file is closed +# and copied in the same directory but with a filename that ends in the `DatePattern` option. +# +log4j.appender.connectAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.connectAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.connectAppender.File=${kafka.logs.dir}/connect.log +log4j.appender.connectAppender.layout=org.apache.log4j.PatternLayout + +# The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information +# in the log messages, where appropriate. This makes it easier to identify those log messages that apply to a +# specific connector. +# +connect.log.pattern=[%d] %p %X{connector.context}%m (%c:%L)%n + +log4j.appender.stdout.layout.ConversionPattern=${connect.log.pattern} +log4j.appender.connectAppender.layout.ConversionPattern=${connect.log.pattern} + +log4j.logger.org.apache.zookeeper=ERROR +log4j.logger.org.reflections=ERROR diff --git a/src/config/connect-mirror-maker.properties b/src/config/connect-mirror-maker.properties new file mode 100644 index 0000000000..40afda5e4a --- /dev/null +++ b/src/config/connect-mirror-maker.properties @@ -0,0 +1,59 @@ +# Licensed to the Apache Software Foundation (ASF) under A or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# see org.apache.kafka.clients.consumer.ConsumerConfig for more details + +# Sample MirrorMaker 2.0 top-level configuration file +# Run with ./bin/connect-mirror-maker.sh connect-mirror-maker.properties + +# specify any number of cluster aliases +clusters = A, B + +# connection information for each cluster +# This is a comma separated host:port pairs for each cluster +# for e.g. "A_host1:9092, A_host2:9092, A_host3:9092" +A.bootstrap.servers = A_host1:9092, A_host2:9092, A_host3:9092 +B.bootstrap.servers = B_host1:9092, B_host2:9092, B_host3:9092 + +# enable and configure individual replication flows +A->B.enabled = true + +# regex which defines which topics gets replicated. For eg "foo-.*" +A->B.topics = .* + +B->A.enabled = true +B->A.topics = .* + +# Setting replication factor of newly created remote topics +replication.factor=1 + +############################# Internal Topic Settings ############################# +# The replication factor for mm2 internal topics "heartbeats", "B.checkpoints.internal" and +# "mm2-offset-syncs.B.internal" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +checkpoints.topic.replication.factor=1 +heartbeats.topic.replication.factor=1 +offset-syncs.topic.replication.factor=1 + +# The replication factor for connect internal topics "mm2-configs.B.internal", "mm2-offsets.B.internal" and +# "mm2-status.B.internal" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offset.storage.replication.factor=1 +status.storage.replication.factor=1 +config.storage.replication.factor=1 + +# customize as needed +# replication.policy.separator = _ +# sync.topic.acls.enabled = false +# emit.heartbeats.interval.seconds = 5 diff --git a/src/config/connect-standalone.properties b/src/config/connect-standalone.properties new file mode 100644 index 0000000000..a340a3bf31 --- /dev/null +++ b/src/config/connect-standalone.properties @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# These are defaults. This file just demonstrates how to override some settings. +bootstrap.servers=localhost:9092 + +# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will +# need to configure these based on the format they want their data in when loaded from or stored into Kafka +key.converter=org.apache.kafka.connect.json.JsonConverter +value.converter=org.apache.kafka.connect.json.JsonConverter +# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply +# it to +key.converter.schemas.enable=true +value.converter.schemas.enable=true + +offset.storage.file.filename=/tmp/connect.offsets +# Flush much faster than normal, which is useful for testing/debugging +offset.flush.interval.ms=10000 + +# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins +# (connectors, converters, transformations). The list should consist of top level directories that include +# any combination of: +# a) directories immediately containing jars with plugins and their dependencies +# b) uber-jars with plugins and their dependencies +# c) directories immediately containing the package directory structure of classes of plugins and their dependencies +# Note: symlinks will be followed to discover dependencies or plugins. +# Examples: +# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors, +#plugin.path= diff --git a/src/config/consumer.properties b/src/config/consumer.properties new file mode 100644 index 0000000000..01bb12eb08 --- /dev/null +++ b/src/config/consumer.properties @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# see org.apache.kafka.clients.consumer.ConsumerConfig for more details + +# list of brokers used for bootstrapping knowledge about the rest of the cluster +# format: host1:port1,host2:port2 ... +bootstrap.servers=localhost:9092 + +# consumer group id +group.id=test-consumer-group + +# What to do when there is no initial offset in Kafka or if the current +# offset does not exist any more on the server: latest, earliest, none +#auto.offset.reset= diff --git a/src/config/kraft/README.md b/src/config/kraft/README.md new file mode 100644 index 0000000000..88de71874c --- /dev/null +++ b/src/config/kraft/README.md @@ -0,0 +1,173 @@ +KRaft (aka KIP-500) mode Preview Release +========================================================= + +# Introduction +It is now possible to run Apache Kafka without Apache ZooKeeper! We call this the [Kafka Raft metadata mode](https://cwiki.apache.org/confluence/display/KAFKA/KIP-500%3A+Replace+ZooKeeper+with+a+Self-Managed+Metadata+Quorum), typically shortened to `KRaft mode`. +`KRaft` is intended to be pronounced like `craft` (as in `craftsmanship`). It is currently *PREVIEW AND SHOULD NOT BE USED IN PRODUCTION*, but it +is available for testing in the Kafka 3.0 release. + +When the Kafka cluster is in KRaft mode, it does not store its metadata in ZooKeeper. In fact, you do not have to run ZooKeeper at all, because it stores its metadata in a KRaft quorum of controller nodes. + +KRaft mode has many benefits -- some obvious, and some not so obvious. Clearly, it is nice to manage and configure one service rather than two services. In addition, you can now run a single process Kafka cluster. +Most important of all, KRaft mode is more scalable. We expect to be able to [support many more topics and partitions](https://www.confluent.io/kafka-summit-san-francisco-2019/kafka-needs-no-keeper/) in this mode. + +# Quickstart + +## Warning +KRaft mode in Kafka 3.0 is provided for testing only, *NOT* for production. We do not yet support upgrading existing ZooKeeper-based Kafka clusters into this mode. In fact, when Kafka 3.1 is released, +it may not be possible to upgrade your KRaft clusters from 3.0 to 3.1. There may be bugs, including serious ones. You should *assume that your data could be lost at any time* if you try the preview release of KRaft mode. + +## Generate a cluster ID +The first step is to generate an ID for your new cluster, using the kafka-storage tool: + +~~~~ +$ ./bin/kafka-storage.sh random-uuid +xtzWWN4bTjitpL3kfd9s5g +~~~~ + +## Format Storage Directories +The next step is to format your storage directories. If you are running in single-node mode, you can do this with one command: + +~~~~ +$ ./bin/kafka-storage.sh format -t -c ./config/kraft/server.properties +Formatting /tmp/kraft-combined-logs +~~~~ + +If you are using multiple nodes, then you should run the format command on each node. Be sure to use the same cluster ID for each one. + +## Start the Kafka Server +Finally, you are ready to start the Kafka server on each node. + +~~~~ +$ ./bin/kafka-server-start.sh ./config/kraft/server.properties +[2021-02-26 15:37:11,071] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) +[2021-02-26 15:37:11,294] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util) +[2021-02-26 15:37:11,466] INFO [Log partition=__cluster_metadata-0, dir=/tmp/kraft-combined-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-02-26 15:37:11,509] INFO [raft-expiration-reaper]: Starting (kafka.raft.TimingWheelExpirationService$ExpiredOperationReaper) +[2021-02-26 15:37:11,640] INFO [RaftManager nodeId=1] Completed transition to Unattached(epoch=0, voters=[1], electionTimeoutMs=9037) (org.apache.kafka.raft.QuorumState) +... +~~~~ + +Just like with a ZooKeeper based broker, you can connect to port 9092 (or whatever port you configured) to perform administrative operations or produce or consume data. + +~~~~ +$ ./bin/kafka-topics.sh --create --topic foo --partitions 1 --replication-factor 1 --bootstrap-server localhost:9092 +Created topic foo. +~~~~ + +# Deployment + +## Controller Servers +In KRaft mode, only a small group of specially selected servers can act as controllers (unlike the ZooKeeper-based mode, where any server can become the +Controller). The specially selected controller servers will participate in the metadata quorum. Each controller server is either active, or a hot +standby for the current active controller server. + +You will typically select 3 or 5 servers for this role, depending on factors like cost and the number of concurrent failures your system should withstand +without availability impact. Just like with ZooKeeper, you must keep a majority of the controllers alive in order to maintain availability. So if you have 3 +controllers, you can tolerate 1 failure; with 5 controllers, you can tolerate 2 failures. + +## Process Roles +Each Kafka server now has a new configuration key called `process.roles` which can have the following values: + +* If `process.roles` is set to `broker`, the server acts as a broker in KRaft mode. +* If `process.roles` is set to `controller`, the server acts as a controller in KRaft mode. +* If `process.roles` is set to `broker,controller`, the server acts as both a broker and a controller in KRaft mode. +* If `process.roles` is not set at all then we are assumed to be in ZooKeeper mode. As mentioned earlier, you can't currently transition back and forth between ZooKeeper mode and KRaft mode without reformatting. + +Nodes that act as both brokers and controllers are referred to as "combined" nodes. Combined nodes are simpler to operate for simple use cases and allow you to avoid +some fixed memory overheads associated with JVMs. The key disadvantage is that the controller will be less isolated from the rest of the system. For example, if activity on the broker causes an out of +memory condition, the controller part of the server is not isolated from that OOM condition. + +## Quorum Voters +All nodes in the system must set the `controller.quorum.voters` configuration. This identifies the quorum controller servers that should be used. All the controllers must be enumerated. +This is similar to how, when using ZooKeeper, the `zookeeper.connect` configuration must contain all the ZooKeeper servers. Unlike with the ZooKeeper config, however, `controller.quorum.voters` +also has IDs for each node. The format is id1@host1:port1,id2@host2:port2, etc. + +So if you have 10 brokers and 3 controllers named controller1, controller2, controller3, you might have the following configuration on controller1: +``` +process.roles=controller +node.id=1 +listeners=CONTROLLER://controller1.example.com:9093 +controller.quorum.voters=1@controller1.example.com:9093,2@controller2.example.com:9093,3@controller3.example.com:9093 +``` + +Each broker and each controller must set `controller.quorum.voters`. Note that the node ID supplied in the `controller.quorum.voters` configuration must match that supplied to the server. +So on controller1, node.id must be set to 1, and so forth. Note that there is no requirement for controller IDs to start at 0 or 1. However, the easiest and least confusing way to allocate +node IDs is probably just to give each server a numeric ID, starting from 0. + +Note that clients never need to configure `controller.quorum.voters`; only servers do. + +## Kafka Storage Tool +As described above in the QuickStart section, you must use the `kafka-storage.sh` tool to generate a cluster ID for your new cluster, and then run the format command on each node before starting the node. + +This is different from how Kafka has operated in the past. Previously, Kafka would format blank storage directories automatically, and also generate a new cluster UUID automatically. One reason for the change +is that auto-formatting can sometimes obscure an error condition. For example, under UNIX, if a data directory can't be mounted, it may show up as blank. In this case, auto-formatting would be the wrong thing to do. + +This is particularly important for the metadata log maintained by the controller servers. If two controllers out of three controllers were able to start with blank logs, a leader might be able to be elected with +nothing in the log, which would cause all metadata to be lost. + +# Missing Features +We don't support any kind of upgrade right now, either to or from KRaft mode. This is an important gap that we are working on. + +Finally, the following Kafka features have not yet been fully implemented: + +* Support for certain security features: configuring a KRaft-based Authorizer, setting up SCRAM, delegation tokens, and so forth + (although note that you can use authorizers such as `kafka.security.authorizer.AclAuthorizer` with KRaft clusters, even + if they are ZooKeeper-based: simply define `authorizer.class.name` and configure the authorizer as you normally would). +* Support for some configurations, like enabling unclean leader election by default or dynamically changing broker endpoints +* Support for KIP-112 "JBOD" modes + +We've tried to make it clear when a feature is not supported in the preview release, but you may encounter some rough edges. We will cover these feature gaps incrementally in the `trunk` branch. + +# Debugging +If you encounter an issue, you might want to take a look at the metadata log. + +## kafka-dump-log +One way to view the metadata log is with kafka-dump-log.sh tool, like so: + +~~~~ +$ ./bin/kafka-dump-log.sh --cluster-metadata-decoder --skip-record-metadata --files /tmp/kraft-combined-logs/__cluster_metadata-0/*.log +Dumping /tmp/kraft-combined-logs/__cluster_metadata-0/00000000000000000000.log +Starting offset: 0 +baseOffset: 0 lastOffset: 0 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 1 isTransactional: false isControl: true position: 0 CreateTime: 1614382631640 size: 89 magic: 2 compresscodec: NONE crc: 1438115474 isvalid: true + +baseOffset: 1 lastOffset: 1 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 1 isTransactional: false isControl: false position: 89 CreateTime: 1614382632329 size: 137 magic: 2 compresscodec: NONE crc: 1095855865 isvalid: true + payload: {"type":"REGISTER_BROKER_RECORD","version":0,"data":{"brokerId":1,"incarnationId":"P3UFsWoNR-erL9PK98YLsA","brokerEpoch":0,"endPoints":[{"name":"PLAINTEXT","host":"localhost","port":9092,"securityProtocol":0}],"features":[],"rack":null}} +baseOffset: 2 lastOffset: 2 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 1 isTransactional: false isControl: false position: 226 CreateTime: 1614382632453 size: 83 magic: 2 compresscodec: NONE crc: 455187130 isvalid: true + payload: {"type":"UNFENCE_BROKER_RECORD","version":0,"data":{"id":1,"epoch":0}} +baseOffset: 3 lastOffset: 3 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 1 isTransactional: false isControl: false position: 309 CreateTime: 1614382634484 size: 83 magic: 2 compresscodec: NONE crc: 4055692847 isvalid: true + payload: {"type":"FENCE_BROKER_RECORD","version":0,"data":{"id":1,"epoch":0}} +baseOffset: 4 lastOffset: 4 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 2 isTransactional: false isControl: true position: 392 CreateTime: 1614382671857 size: 89 magic: 2 compresscodec: NONE crc: 1318571838 isvalid: true + +baseOffset: 5 lastOffset: 5 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 2 isTransactional: false isControl: false position: 481 CreateTime: 1614382672440 size: 137 magic: 2 compresscodec: NONE crc: 841144615 isvalid: true + payload: {"type":"REGISTER_BROKER_RECORD","version":0,"data":{"brokerId":1,"incarnationId":"RXRJu7cnScKRZOnWQGs86g","brokerEpoch":4,"endPoints":[{"name":"PLAINTEXT","host":"localhost","port":9092,"securityProtocol":0}],"features":[],"rack":null}} +baseOffset: 6 lastOffset: 6 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 2 isTransactional: false isControl: false position: 618 CreateTime: 1614382672544 size: 83 magic: 2 compresscodec: NONE crc: 4155905922 isvalid: true + payload: {"type":"UNFENCE_BROKER_RECORD","version":0,"data":{"id":1,"epoch":4}} +baseOffset: 7 lastOffset: 8 count: 2 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 2 isTransactional: false isControl: false position: 701 CreateTime: 1614382712158 size: 159 magic: 2 compresscodec: NONE crc: 3726758683 isvalid: true + payload: {"type":"TOPIC_RECORD","version":0,"data":{"name":"foo","topicId":"5zoAlv-xEh9xRANKXt1Lbg"}} + payload: {"type":"PARTITION_RECORD","version":0,"data":{"partitionId":0,"topicId":"5zoAlv-xEh9xRANKXt1Lbg","replicas":[1],"isr":[1],"removingReplicas":null,"addingReplicas":null,"leader":1,"leaderEpoch":0,"partitionEpoch":0}} +~~~~ + +## The Metadata Shell +Another tool for examining the metadata logs is the Kafka metadata shell. Just like the ZooKeeper shell, this allows you to inspect the metadata of the cluster. + +~~~~ +$ ./bin/kafka-metadata-shell.sh --snapshot /tmp/kraft-combined-logs/__cluster_metadata-0/00000000000000000000.log +>> ls / +brokers local metadataQuorum topicIds topics +>> ls /topics +foo +>> cat /topics/foo/0/data +{ + "partitionId" : 0, + "topicId" : "5zoAlv-xEh9xRANKXt1Lbg", + "replicas" : [ 1 ], + "isr" : [ 1 ], + "removingReplicas" : null, + "addingReplicas" : null, + "leader" : 1, + "leaderEpoch" : 0, + "partitionEpoch" : 0 +} +>> exit +~~~~ diff --git a/src/config/kraft/broker.properties b/src/config/kraft/broker.properties new file mode 100644 index 0000000000..dfbd6eca66 --- /dev/null +++ b/src/config/kraft/broker.properties @@ -0,0 +1,128 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. See config/kraft/README.md for details. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=broker + +# The node id associated with this instance's roles +node.id=2 + +# The connect string for the controller quorum +controller.quorum.voters=1@localhost:9093 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. It will get the value returned from +# java.net.InetAddress.getCanonicalHostName() if not configured. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=PLAINTEXT://localhost:9092 +inter.broker.listener.name=PLAINTEXT + +# Hostname and port the broker will advertise to producers and consumers. If not set, +# it uses the value for "listeners" if configured. Otherwise, it will use the value +# returned from java.net.InetAddress.getCanonicalHostName(). +advertised.listeners=PLAINTEXT://localhost:9092 + +# Listener, host name, and port for the controller to advertise to the brokers. If +# this server is a controller, this listener must be configured. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kraft-broker-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 diff --git a/src/config/kraft/controller.properties b/src/config/kraft/controller.properties new file mode 100644 index 0000000000..30fe3e78f3 --- /dev/null +++ b/src/config/kraft/controller.properties @@ -0,0 +1,127 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. See config/kraft/README.md for details. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=controller + +# The node id associated with this instance's roles +node.id=1 + +# The connect string for the controller quorum +controller.quorum.voters=1@localhost:9093 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. It will get the value returned from +# java.net.InetAddress.getCanonicalHostName() if not configured. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=PLAINTEXT://:9093 + +# Hostname and port the broker will advertise to producers and consumers. If not set, +# it uses the value for "listeners" if configured. Otherwise, it will use the value +# returned from java.net.InetAddress.getCanonicalHostName(). +#advertised.listeners=PLAINTEXT://your.host.name:9092 + +# Listener, host name, and port for the controller to advertise to the brokers. If +# this server is a controller, this listener must be configured. +controller.listener.names=PLAINTEXT + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/raft-controller-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 diff --git a/src/config/kraft/server.properties b/src/config/kraft/server.properties new file mode 100644 index 0000000000..8e6406c3b4 --- /dev/null +++ b/src/config/kraft/server.properties @@ -0,0 +1,128 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. See config/kraft/README.md for details. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=broker,controller + +# The node id associated with this instance's roles +node.id=1 + +# The connect string for the controller quorum +controller.quorum.voters=1@localhost:9093 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. It will get the value returned from +# java.net.InetAddress.getCanonicalHostName() if not configured. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=PLAINTEXT://:9092,CONTROLLER://:9093 +inter.broker.listener.name=PLAINTEXT + +# Hostname and port the broker will advertise to producers and consumers. If not set, +# it uses the value for "listeners" if configured. Otherwise, it will use the value +# returned from java.net.InetAddress.getCanonicalHostName(). +advertised.listeners=PLAINTEXT://localhost:9092 + +# Listener, host name, and port for the controller to advertise to the brokers. If +# this server is a controller, this listener must be configured. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kraft-combined-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 diff --git a/src/config/log4j.properties b/src/config/log4j.properties new file mode 100644 index 0000000000..4cbce9d104 --- /dev/null +++ b/src/config/log4j.properties @@ -0,0 +1,91 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Unspecified loggers and loggers with additivity=true output to server.log and stdout +# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise +log4j.rootLogger=INFO, stdout, kafkaAppender + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + +log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log +log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + +log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log +log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + +log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log +log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + +log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log +log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + +log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log +log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + +log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log +log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + +# Change the line below to adjust ZK client logging +log4j.logger.org.apache.zookeeper=INFO + +# Change the two lines below to adjust the general broker logging level (output to server.log and stdout) +log4j.logger.kafka=INFO +log4j.logger.org.apache.kafka=INFO + +# Change to DEBUG or TRACE to enable request logging +log4j.logger.kafka.request.logger=WARN, requestAppender +log4j.additivity.kafka.request.logger=false + +# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output +# related to the handling of requests +#log4j.logger.kafka.network.Processor=TRACE, requestAppender +#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender +#log4j.additivity.kafka.server.KafkaApis=false +log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender +log4j.additivity.kafka.network.RequestChannel$=false + +log4j.logger.kafka.controller=TRACE, controllerAppender +log4j.additivity.kafka.controller=false + +log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender +log4j.additivity.kafka.log.LogCleaner=false + +log4j.logger.state.change.logger=INFO, stateChangeAppender +log4j.additivity.state.change.logger=false + +# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses +log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender +log4j.additivity.kafka.authorizer.logger=false + diff --git a/src/config/producer.properties b/src/config/producer.properties new file mode 100644 index 0000000000..4786b988a2 --- /dev/null +++ b/src/config/producer.properties @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# see org.apache.kafka.clients.producer.ProducerConfig for more details + +############################# Producer Basics ############################# + +# list of brokers used for bootstrapping knowledge about the rest of the cluster +# format: host1:port1,host2:port2 ... +bootstrap.servers=localhost:9092 + +# specify the compression codec for all data generated: none, gzip, snappy, lz4, zstd +compression.type=none + +# name of the partitioner class for partitioning events; default partition spreads data randomly +#partitioner.class= + +# the maximum amount of time the client will wait for the response of a request +#request.timeout.ms= + +# how long `KafkaProducer.send` and `KafkaProducer.partitionsFor` will block for +#max.block.ms= + +# the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together +#linger.ms= + +# the maximum size of a request in bytes +#max.request.size= + +# the default batch size in bytes when batching multiple records sent to a partition +#batch.size= + +# the total bytes of memory the producer can use to buffer records waiting to be sent to the server +#buffer.memory= diff --git a/src/config/server.properties b/src/config/server.properties new file mode 100644 index 0000000000..b1cf5c4541 --- /dev/null +++ b/src/config/server.properties @@ -0,0 +1,136 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# see kafka.server.KafkaConfig for additional details and defaults + +############################# Server Basics ############################# + +# The id of the broker. This must be set to a unique integer for each broker. +broker.id=0 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. It will get the value returned from +# java.net.InetAddress.getCanonicalHostName() if not configured. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +#listeners=PLAINTEXT://:9092 + +# Hostname and port the broker will advertise to producers and consumers. If not set, +# it uses the value for "listeners" if configured. Otherwise, it will use the value +# returned from java.net.InetAddress.getCanonicalHostName(). +#advertised.listeners=PLAINTEXT://your.host.name:9092 + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kafka-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 + +############################# Zookeeper ############################# + +# Zookeeper connection string (see zookeeper docs for details). +# This is a comma separated host:port pairs, each corresponding to a zk +# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". +# You can also append an optional chroot string to the urls to specify the +# root directory for all kafka znodes. +zookeeper.connect=localhost:2181 + +# Timeout in ms for connecting to zookeeper +zookeeper.connection.timeout.ms=18000 + + +############################# Group Coordinator Settings ############################# + +# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. +# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. +# The default value for this is 3 seconds. +# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. +# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. +group.initial.rebalance.delay.ms=0 diff --git a/src/config/tools-log4j.properties b/src/config/tools-log4j.properties new file mode 100644 index 0000000000..b19e343265 --- /dev/null +++ b/src/config/tools-log4j.properties @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +log4j.rootLogger=WARN, stderr + +log4j.appender.stderr=org.apache.log4j.ConsoleAppender +log4j.appender.stderr.layout=org.apache.log4j.PatternLayout +log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n +log4j.appender.stderr.Target=System.err diff --git a/src/config/trogdor.conf b/src/config/trogdor.conf new file mode 100644 index 0000000000..320cbe7560 --- /dev/null +++ b/src/config/trogdor.conf @@ -0,0 +1,25 @@ +{ + "_comment": [ + "Licensed to the Apache Software Foundation (ASF) under one or more", + "contributor license agreements. See the NOTICE file distributed with", + "this work for additional information regarding copyright ownership.", + "The ASF licenses this file to You under the Apache License, Version 2.0", + "(the \"License\"); you may not use this file except in compliance with", + "the License. You may obtain a copy of the License at", + "", + "http://www.apache.org/licenses/LICENSE-2.0", + "", + "Unless required by applicable law or agreed to in writing, software", + "distributed under the License is distributed on an \"AS IS\" BASIS,", + "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", + "See the License for the specific language governing permissions and", + "limitations under the License." + ], + "platform": "org.apache.kafka.trogdor.basic.BasicPlatform", "nodes": { + "node0": { + "hostname": "localhost", + "trogdor.agent.port": 8888, + "trogdor.coordinator.port": 8889 + } + } +} diff --git a/src/config/zookeeper.properties b/src/config/zookeeper.properties new file mode 100644 index 0000000000..90f4332ec3 --- /dev/null +++ b/src/config/zookeeper.properties @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# the directory where the snapshot is stored. +dataDir=/tmp/zookeeper +# the port at which the clients will connect +clientPort=2181 +# disable the per-ip limit on the number of connections since this is a non-production config +maxClientCnxns=0 +# Disable the adminserver by default to avoid port conflicts. +# Set the port to something non-conflicting if choosing to enable this +admin.enableServer=false +# admin.serverPort=8080 diff --git a/src/java/.gitignore b/src/java/.gitignore new file mode 100644 index 0000000000..9c15d60733 --- /dev/null +++ b/src/java/.gitignore @@ -0,0 +1,20 @@ +# Compiled source # +################### +*.class + +# Intermediate files # +################### +*.classpath +*.filelist +*.manifest + +############### +# folder # +############### +/**/target/ +/**/DROP/ +/**/TEMP/ +/**/packages/ +/**/bin/ +/**/obj/ +_site \ No newline at end of file diff --git a/src/java/README.md b/src/java/README.md new file mode 100644 index 0000000000..8600c02054 --- /dev/null +++ b/src/java/README.md @@ -0,0 +1,3 @@ +# KafkaBridge JVM implementation + +This folder contains the project and classes, used from .NET side, not available within the Apache Kafka delivery. \ No newline at end of file diff --git a/src/java/kafkabridge/pom.xml b/src/java/kafkabridge/pom.xml new file mode 100644 index 0000000000..47a2ad2d70 --- /dev/null +++ b/src/java/kafkabridge/pom.xml @@ -0,0 +1,256 @@ + + + 4.0.0 + + com.masesgroup + kafkabridge + mases.kafkabridge + Apache Kafka interface bridging implementation + https://github.com/masesgroup/KafkaBridge + 1.1.2.0 + + + + MIT License + http://www.opensource.org/licenses/mit-license.php + repo + + + + https://github.com/masesgroup/KafkaBridge/issues + GitHub Issues + + + https://github.com/masesgroup/KafkaBridge + scm:git:git://github.com/masesgroup/KafkaBridge.git + scm:git:git@github.com:masesgroup/KafkaBridge.git + + + + info@masesgroup.com + MASES Group + https://github.com/masesgroup + masesgroup + + + + 8 + 8 + ${basedir}/classpathfile.classpath + 3.0.0 + + + + ossrh + https://s01.oss.sonatype.org/content/repositories/snapshots + + + ossrh + Central Repository OSSRH + https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/ + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + 2.9 + + + build-classpath + generate-sources + + build-classpath + + + ${classpathfile} + + + + copy-dependencies + package + + copy-dependencies + + + ${basedir}/../../../bin/jars/ + false + false + true + + + + copy-installed + package + + copy + + + + + ${project.groupId} + ${project.artifactId} + ${project.version} + ${project.packaging} + + + ${basedir}/../../../bin/jars/ + true + + + + + + org.codehaus.gmaven + gmaven-plugin + 1.4 + + + generate-resources + + execute + + + + def file = new File(project.properties.classpathfile) + project.properties.originalClassPath = file.getText() + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.8.1 + + + -cp + ${originalClassPath}${path.separator}${basedir}/../../../bin/net5.0/JCOBridge.jar + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.8.1 + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar-no-fork + + + + **/target/ + **/*.xml + **/*.zip + + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 2.9.1 + + + https://www.jcobridge.com/api-java + + true + 8 + + + JCOBridge + JCOBridge + 2.4.3 + + + + + + attach-javadocs + + jar + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.5 + + + --pinentry-mode + loopback + + + + + sign-artifacts + verify + + sign + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.7 + true + + ossrh + https://s01.oss.sonatype.org/ + true + + + + + + + junit + junit + 4.11 + test + + + org.apache.kafka + kafka-clients + ${kafkaVersion} + + + org.apache.kafka + kafka-streams + ${kafkaVersion} + + + org.apache.kafka + kafka-tools + ${kafkaVersion} + + + org.apache.kafka + kafka_2.13 + ${kafkaVersion} + + + org.apache.kafka + connect-runtime + ${kafkaVersion} + + + \ No newline at end of file diff --git a/src/java/kafkabridge/src/main/java/org/mases/kafkabridge/streams/kstream/KeyValueMapperImpl.java b/src/java/kafkabridge/src/main/java/org/mases/kafkabridge/streams/kstream/KeyValueMapperImpl.java new file mode 100644 index 0000000000..2bbf318769 --- /dev/null +++ b/src/java/kafkabridge/src/main/java/org/mases/kafkabridge/streams/kstream/KeyValueMapperImpl.java @@ -0,0 +1,41 @@ +/* + * MIT License + * + * Copyright (c) 2021 MASES s.r.l. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package org.mases.kafkabridge.streams.kstream; + +import org.apache.kafka.streams.kstream.KeyValueMapper; +import org.mases.jcobridge.*; + +public final class KeyValueMapperImpl extends JCListener implements KeyValueMapper { + public KeyValueMapperImpl(String key) throws JCNativeException { + super(key); + } + + @Override + public Object apply(Object key, Object value) { + raiseEvent("apply", key, value); + Object retVal = getReturnData(); + return retVal; + } +} diff --git a/src/java/kafkabridge/src/main/java/org/mases/kafkabridge/streams/kstream/PredicateImpl.java b/src/java/kafkabridge/src/main/java/org/mases/kafkabridge/streams/kstream/PredicateImpl.java new file mode 100644 index 0000000000..270cdc50bd --- /dev/null +++ b/src/java/kafkabridge/src/main/java/org/mases/kafkabridge/streams/kstream/PredicateImpl.java @@ -0,0 +1,42 @@ +/* + * MIT License + * + * Copyright (c) 2021 MASES s.r.l. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package org.mases.kafkabridge.streams.kstream; + +import org.mases.jcobridge.*; + +import org.apache.kafka.streams.kstream.Predicate; + +public final class PredicateImpl extends JCListener implements Predicate { + public PredicateImpl(String key) throws JCNativeException { + super(key); + } + + @Override + public boolean test(Object e1, Object e2) { + raiseEvent("test", e1, e2); + Object retVal = getReturnData(); + return (boolean) retVal; + } +} diff --git a/src/java/kafkabridge/src/test/java/org/mases/kafkabridge/streams/kstream/KeyValueMapperImplTest.java b/src/java/kafkabridge/src/test/java/org/mases/kafkabridge/streams/kstream/KeyValueMapperImplTest.java new file mode 100644 index 0000000000..83f2cab165 --- /dev/null +++ b/src/java/kafkabridge/src/test/java/org/mases/kafkabridge/streams/kstream/KeyValueMapperImplTest.java @@ -0,0 +1,20 @@ +package org.mases.kafkabridge.streams.kstream; + +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +/** + * Unit test for simple App. + */ +public class KeyValueMapperImplTest +{ + /** + * Rigorous Test :-) + */ + @Test + public void shouldAnswerWithTrue() + { + assertTrue( true ); + } +} \ No newline at end of file diff --git a/src/java/kafkabridge/src/test/java/org/mases/kafkabridge/streams/kstream/PredicateImplTest.java b/src/java/kafkabridge/src/test/java/org/mases/kafkabridge/streams/kstream/PredicateImplTest.java new file mode 100644 index 0000000000..020756faba --- /dev/null +++ b/src/java/kafkabridge/src/test/java/org/mases/kafkabridge/streams/kstream/PredicateImplTest.java @@ -0,0 +1,20 @@ +package org.mases.kafkabridge.streams.kstream; + +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +/** + * Unit test for simple App. + */ +public class PredicateImplTest +{ + /** + * Rigorous Test :-) + */ + @Test + public void shouldAnswerWithTrue() + { + assertTrue( true ); + } +} \ No newline at end of file diff --git a/src/net/.gitignore b/src/net/.gitignore new file mode 100644 index 0000000000..7cb35bc039 --- /dev/null +++ b/src/net/.gitignore @@ -0,0 +1,17 @@ +############### +# files # +############### +*.AssemblyAttributes + + +############### +# folder # +############### +/**/.vs/ +/**/target/ +/**/DROP/ +/**/TEMP/ +/**/packages/ +/**/bin/ +/**/obj/ +_site \ No newline at end of file diff --git a/src/Common/JCOB128x128.png b/src/net/Common/JCOB128x128.png similarity index 100% rename from src/Common/JCOB128x128.png rename to src/net/Common/JCOB128x128.png diff --git a/src/Common/KafkaBridge.snk b/src/net/Common/KafkaBridge.snk similarity index 100% rename from src/Common/KafkaBridge.snk rename to src/net/Common/KafkaBridge.snk diff --git a/src/Documentation/.gitignore b/src/net/Documentation/.gitignore similarity index 100% rename from src/Documentation/.gitignore rename to src/net/Documentation/.gitignore diff --git a/src/Documentation/api/.gitignore b/src/net/Documentation/api/.gitignore similarity index 100% rename from src/Documentation/api/.gitignore rename to src/net/Documentation/api/.gitignore diff --git a/src/Documentation/api/index.md b/src/net/Documentation/api/index.md similarity index 100% rename from src/Documentation/api/index.md rename to src/net/Documentation/api/index.md diff --git a/src/Documentation/apidoc/.gitignore b/src/net/Documentation/apidoc/.gitignore similarity index 100% rename from src/Documentation/apidoc/.gitignore rename to src/net/Documentation/apidoc/.gitignore diff --git a/src/net/Documentation/articles/actualstate.md b/src/net/Documentation/articles/actualstate.md new file mode 100644 index 0000000000..3d9475456c --- /dev/null +++ b/src/net/Documentation/articles/actualstate.md @@ -0,0 +1,8 @@ +# KafkaBridge development state + +This release comes with some ready made classes: + +* The command line interface classes (i.e. the executables Apache Kafka classes), the ones available under the _bin_ folder of any Apache Kafka binary distribution, can be managed using the [KafkaBridgeCLI](usageCLI.md), e.g. ConsoleConsumer, ConsoleProducer and so on. +* Producer/Consumer classes +* Kafka Admin Client with basic topic creation/deletion and queries to the cluster +* Light version of Kafka stream (under development) diff --git a/src/Documentation/articles/intro.md b/src/net/Documentation/articles/intro.md similarity index 100% rename from src/Documentation/articles/intro.md rename to src/net/Documentation/articles/intro.md diff --git a/src/Documentation/articles/roadmap.md b/src/net/Documentation/articles/roadmap.md similarity index 100% rename from src/Documentation/articles/roadmap.md rename to src/net/Documentation/articles/roadmap.md diff --git a/src/Documentation/articles/toc.yml b/src/net/Documentation/articles/toc.yml similarity index 100% rename from src/Documentation/articles/toc.yml rename to src/net/Documentation/articles/toc.yml diff --git a/src/net/Documentation/articles/usage.md b/src/net/Documentation/articles/usage.md new file mode 100644 index 0000000000..094f7a915a --- /dev/null +++ b/src/net/Documentation/articles/usage.md @@ -0,0 +1,61 @@ +# KafkaBridge usage + +To use KafkaBridge classes the developer can write code in .NET using the same classes available in the official Apache Kafka package. +A basic consumer can be like the following one: + +```C# +using MASES.KafkaBridge; +using MASES.KafkaBridge.Clients.Consumer; +using MASES.KafkaBridge.Java.Util; +using System; + +namespace MASES.KafkaBridgeTemplate.KafkaBridgeConsumer +{ + class Program + { + const string theServer = "localhost:9092"; + const string theTopic = "myTopic"; + + static string serverToUse = theServer; + static string topicToUse = theTopic; + + static void Main(string[] args) + { + var appArgs = KafkaBridgeCore.ApplicationArgs; + + if (appArgs.Length != 0) + { + serverToUse = args[0]; + } + + Properties props = new Properties(); + props.Put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, serverToUse); + props.Put(ConsumerConfig.GROUP_ID_CONFIG, "test"); + props.Put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); + props.Put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); + props.Put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); + props.Put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); + + using (var consumer = new KafkaConsumer(props)) + { + consumer.Subscribe(Collections.singleton(topicToUse)); + while (true) + { + var records = consumer.Poll((long)TimeSpan.FromMilliseconds(200).TotalMilliseconds); + foreach (var item in records) + { + Console.WriteLine($"Offset = {item.Offset}, Key = {item.Key}, Value = {item.Value}"); + } + } + } + } + } +} +``` + +The example above can be found in the [templates package](https://www.nuget.org/packages/MASES.KafkaBridge.Templates/). Its behavior is: +* during initialization prepares the properties, +* create a consumer using the properties +* subscribe and starts consume +* when data are received it logs to the console the information. + diff --git a/src/net/Documentation/articles/usageCLI.md b/src/net/Documentation/articles/usageCLI.md new file mode 100644 index 0000000000..3e1f541857 --- /dev/null +++ b/src/net/Documentation/articles/usageCLI.md @@ -0,0 +1,42 @@ +# KafkaBridgeCLI usage + +To use the CLI interface (KafkaBridgeCLI) runs a command like the following: + +> KafkaBridgeCLI -ClassToRun ConsoleConsumer --bootstrap-server SERVER-ADDRESS:9093 --topic topic_name --from-beginning + +KafkaBridgeCLI accepts the following command-line switch: + +* **ClassToRun**: represents the class to be launched; the list is: + * Administration: + * AclCommand + * BrokerApiVersionsCommand + * ConfigCommand + * ConsumerGroupCommand + * DelegationTokenCommand + * DeleteRecordsCommand + * FeatureCommand + * LeaderElectionCommand + * LogDirsCommand + * ReassignPartitionsCommand + * TopicCommand + * ZkSecurityMigrator + * Shell: + * MetadataShell + * Tools: + * ClusterTool + * ConsoleConsumer + * ConsoleProducer + * ConsumerPerformance + * DumpLogSegments + * GetOffsetShell + * MirrorMaker + * ProducerPerformance + * ReplicaVerificationTool + * StorageTool + * StreamsResetter + * TransactionsCommand + * VerifiableConsumer + * VerifiableProducer +* **KafkaLocation**: represents the path to the root folder of Apache Kafka binary distribution; default value consider that KafkaBridgeCLI uses the Apache Kafka jars available under the jars folder prepared from the package; +* **ScalaVersion**: the scala version to be used. The default version (_2.13.6_) is binded to the deafult Apache Kafka version available in the package; +* **Log4JConfiguration**: the log4j configuration file; the default uses the file within the package. \ No newline at end of file diff --git a/src/Documentation/docfx.json b/src/net/Documentation/docfx.json similarity index 98% rename from src/Documentation/docfx.json rename to src/net/Documentation/docfx.json index 931ed5e3f1..c371612d8e 100644 --- a/src/Documentation/docfx.json +++ b/src/net/Documentation/docfx.json @@ -60,7 +60,7 @@ } }, - "dest": "../../docs", + "dest": "../../../docs", "globalMetadataFiles": [], "fileMetadataFiles": [], "template": [ diff --git a/src/Documentation/images/logo.png b/src/net/Documentation/images/logo.png similarity index 100% rename from src/Documentation/images/logo.png rename to src/net/Documentation/images/logo.png diff --git a/src/Documentation/index.md b/src/net/Documentation/index.md similarity index 100% rename from src/Documentation/index.md rename to src/net/Documentation/index.md diff --git a/src/Documentation/toc.yml b/src/net/Documentation/toc.yml similarity index 100% rename from src/Documentation/toc.yml rename to src/net/Documentation/toc.yml diff --git a/src/KafkaBridge.sln b/src/net/KafkaBridge.sln similarity index 100% rename from src/KafkaBridge.sln rename to src/net/KafkaBridge.sln diff --git a/src/KafkaBridge/BridgedClasses/Clients/Consumer/ConsumerConfig.cs b/src/net/KafkaBridge/ClientSide/Admin/AclCommand.cs similarity index 63% rename from src/KafkaBridge/BridgedClasses/Clients/Consumer/ConsumerConfig.cs rename to src/net/KafkaBridge/ClientSide/Admin/AclCommand.cs index be3f7e4456..6121b9657b 100644 --- a/src/KafkaBridge/BridgedClasses/Clients/Consumer/ConsumerConfig.cs +++ b/src/net/KafkaBridge/ClientSide/Admin/AclCommand.cs @@ -16,11 +16,19 @@ * Refer to LICENSE for more information. */ -namespace MASES.KafkaBridge.Clients.Consumer +namespace MASES.KafkaBridge.Admin { - public class ConsumerConfig : JCOBridge.C2JBridge.JVMBridgeBase + /// + /// Class managing AclCommand + /// + public class AclCommand : JCOBridge.C2JBridge.JVMBridgeMain { - public override bool IsStatic => true; - public override string ClassName => "org.apache.kafka.clients.consumer.ConsumerConfig"; + /// + /// Initialize a new + /// + public AclCommand() + : base("kafka.admin.AclCommand") + { + } } } diff --git a/src/KafkaBridge/Admin/BrokerApiVersionsCommand.cs b/src/net/KafkaBridge/ClientSide/Admin/BrokerApiVersionsCommand.cs similarity index 97% rename from src/KafkaBridge/Admin/BrokerApiVersionsCommand.cs rename to src/net/KafkaBridge/ClientSide/Admin/BrokerApiVersionsCommand.cs index ac388685ba..fc3a96c130 100644 --- a/src/KafkaBridge/Admin/BrokerApiVersionsCommand.cs +++ b/src/net/KafkaBridge/ClientSide/Admin/BrokerApiVersionsCommand.cs @@ -16,8 +16,6 @@ * Refer to LICENSE for more information. */ -using MASES.JCOBridge.C2JBridge; - namespace MASES.KafkaBridge.Admin { /// diff --git a/src/KafkaBridge/Admin/ConfigCommand.cs b/src/net/KafkaBridge/ClientSide/Admin/ConfigCommand.cs similarity index 100% rename from src/KafkaBridge/Admin/ConfigCommand.cs rename to src/net/KafkaBridge/ClientSide/Admin/ConfigCommand.cs diff --git a/src/KafkaBridge/Admin/ConsumerGroupCommand.cs b/src/net/KafkaBridge/ClientSide/Admin/ConsumerGroupCommand.cs similarity index 100% rename from src/KafkaBridge/Admin/ConsumerGroupCommand.cs rename to src/net/KafkaBridge/ClientSide/Admin/ConsumerGroupCommand.cs diff --git a/src/KafkaBridge/Admin/DelegationTokenCommand.cs b/src/net/KafkaBridge/ClientSide/Admin/DelegationTokenCommand.cs similarity index 100% rename from src/KafkaBridge/Admin/DelegationTokenCommand.cs rename to src/net/KafkaBridge/ClientSide/Admin/DelegationTokenCommand.cs diff --git a/src/KafkaBridge/Admin/DeleteRecordsCommand.cs b/src/net/KafkaBridge/ClientSide/Admin/DeleteRecordsCommand.cs similarity index 100% rename from src/KafkaBridge/Admin/DeleteRecordsCommand.cs rename to src/net/KafkaBridge/ClientSide/Admin/DeleteRecordsCommand.cs diff --git a/src/net/KafkaBridge/ClientSide/Admin/FeatureCommand.cs b/src/net/KafkaBridge/ClientSide/Admin/FeatureCommand.cs new file mode 100644 index 0000000000..1b2aa15628 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/Admin/FeatureCommand.cs @@ -0,0 +1,34 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Admin +{ + /// + /// Class managing FeatureCommand + /// + public class FeatureCommand : JCOBridge.C2JBridge.JVMBridgeMain + { + /// + /// Initialize a new + /// + public FeatureCommand() + : base("kafka.admin.FeatureCommand") + { + } + } +} diff --git a/src/KafkaBridge/Admin/LeaderElectionCommand.cs b/src/net/KafkaBridge/ClientSide/Admin/LeaderElectionCommand.cs similarity index 100% rename from src/KafkaBridge/Admin/LeaderElectionCommand.cs rename to src/net/KafkaBridge/ClientSide/Admin/LeaderElectionCommand.cs diff --git a/src/KafkaBridge/Admin/LogDirsCommand.cs b/src/net/KafkaBridge/ClientSide/Admin/LogDirsCommand.cs similarity index 100% rename from src/KafkaBridge/Admin/LogDirsCommand.cs rename to src/net/KafkaBridge/ClientSide/Admin/LogDirsCommand.cs diff --git a/src/KafkaBridge/Admin/ReassignPartitionsCommand.cs b/src/net/KafkaBridge/ClientSide/Admin/ReassignPartitionsCommand.cs similarity index 100% rename from src/KafkaBridge/Admin/ReassignPartitionsCommand.cs rename to src/net/KafkaBridge/ClientSide/Admin/ReassignPartitionsCommand.cs diff --git a/src/KafkaBridge/Admin/TopicCommand.cs b/src/net/KafkaBridge/ClientSide/Admin/TopicCommand.cs similarity index 100% rename from src/KafkaBridge/Admin/TopicCommand.cs rename to src/net/KafkaBridge/ClientSide/Admin/TopicCommand.cs diff --git a/src/net/KafkaBridge/ClientSide/Admin/ZkSecurityMigrator.cs b/src/net/KafkaBridge/ClientSide/Admin/ZkSecurityMigrator.cs new file mode 100644 index 0000000000..f0f4b9713a --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/Admin/ZkSecurityMigrator.cs @@ -0,0 +1,34 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Admin +{ + /// + /// Class managing ZkSecurityMigrator + /// + public class ZkSecurityMigrator : JCOBridge.C2JBridge.JVMBridgeMain + { + /// + /// Initialize a new + /// + public ZkSecurityMigrator() + : base("kafka.admin.ZkSecurityMigrator") + { + } + } +} diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/AdminClientConfig.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/AdminClientConfig.cs new file mode 100644 index 0000000000..a472849a51 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/AdminClientConfig.cs @@ -0,0 +1,66 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Clients.Admin +{ + public class AdminClientConfig : JCOBridge.C2JBridge.JVMBridgeBase + { + public override bool IsStatic => true; + public override string ClassName => "org.apache.kafka.clients.admin.AdminClientConfig"; + + public static readonly string BOOTSTRAP_SERVERS_CONFIG = Clazz.GetField("BOOTSTRAP_SERVERS_CONFIG"); + + public static readonly string CLIENT_DNS_LOOKUP_CONFIG = Clazz.GetField("CLIENT_DNS_LOOKUP_CONFIG"); + + public static readonly string RECONNECT_BACKOFF_MS_CONFIG = Clazz.GetField("RECONNECT_BACKOFF_MS_CONFIG"); + + public static readonly string RECONNECT_BACKOFF_MAX_MS_CONFIG = Clazz.GetField("RECONNECT_BACKOFF_MAX_MS_CONFIG"); + + public static readonly string RETRY_BACKOFF_MS_CONFIG = Clazz.GetField("RETRY_BACKOFF_MS_CONFIG"); + + public static readonly string SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG = Clazz.GetField("SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG"); + + public static readonly string SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG = Clazz.GetField("SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG"); + + public static readonly string CONNECTIONS_MAX_IDLE_MS_CONFIG = Clazz.GetField("CONNECTIONS_MAX_IDLE_MS_CONFIG"); + + public static readonly string REQUEST_TIMEOUT_MS_CONFIG = Clazz.GetField("REQUEST_TIMEOUT_MS_CONFIG"); + + public static readonly string CLIENT_ID_CONFIG = Clazz.GetField("CLIENT_ID_CONFIG"); + + public static readonly string METADATA_MAX_AGE_CONFIG = Clazz.GetField("METADATA_MAX_AGE_CONFIG"); + + public static readonly string SEND_BUFFER_CONFIG = Clazz.GetField("SEND_BUFFER_CONFIG"); + + public static readonly string RECEIVE_BUFFER_CONFIG = Clazz.GetField("RECEIVE_BUFFER_CONFIG"); + + public static readonly string METRIC_REPORTER_CLASSES_CONFIG = Clazz.GetField("METRIC_REPORTER_CLASSES_CONFIG"); + + public static readonly string METRICS_NUM_SAMPLES_CONFIG = Clazz.GetField("METRICS_NUM_SAMPLES_CONFIG"); + + public static readonly string METRICS_SAMPLE_WINDOW_MS_CONFIG = Clazz.GetField("METRICS_SAMPLE_WINDOW_MS_CONFIG"); + + public static readonly string METRICS_RECORDING_LEVEL_CONFIG = Clazz.GetField("METRICS_RECORDING_LEVEL_CONFIG"); + + public static readonly string SECURITY_PROTOCOL_CONFIG = Clazz.GetField("SECURITY_PROTOCOL_CONFIG"); + public static readonly string DEFAULT_SECURITY_PROTOCOL = Clazz.GetField("DEFAULT_SECURITY_PROTOCOL"); + + public static readonly string RETRIES_CONFIG = Clazz.GetField("RETRIES_CONFIG"); + public static readonly string DEFAULT_API_TIMEOUT_MS_CONFIG = Clazz.GetField("DEFAULT_API_TIMEOUT_MS_CONFIG"); + } +} diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/CreateTopicsOptions.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/CreateTopicsOptions.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/CreateTopicsOptions.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/CreateTopicsOptions.cs diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/CreateTopicsResult.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/CreateTopicsResult.cs similarity index 68% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/CreateTopicsResult.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/CreateTopicsResult.cs index 329dd06664..7da5fc39df 100644 --- a/src/KafkaBridge/BridgedClasses/Clients/Admin/CreateTopicsResult.cs +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/CreateTopicsResult.cs @@ -16,11 +16,24 @@ * Refer to LICENSE for more information. */ +using MASES.KafkaBridge.Common; +using MASES.KafkaBridge.Java.Lang; +using MASES.KafkaBridge.Java.Util; + namespace MASES.KafkaBridge.Clients.Admin { public class CreateTopicsResult : JCOBridge.C2JBridge.JVMBridgeBase { public override string ClassName => "org.apache.kafka.clients.admin.CreateTopicsResult"; + + public Map> Values => New>>("values"); + + public KafkaFuture All => New>("all"); + + public KafkaFuture TopicId(string topic) + { + return New>(topic); + } } } diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/DeleteTopicsOptions.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/DeleteTopicsOptions.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/DeleteTopicsOptions.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/DeleteTopicsOptions.cs diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/DeleteTopicsResult.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/DeleteTopicsResult.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/DeleteTopicsResult.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/DeleteTopicsResult.cs diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/DescribeClusterResult.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/DescribeClusterResult.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/DescribeClusterResult.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/DescribeClusterResult.cs diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/DescribeConsumerGroupsResult.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/DescribeConsumerGroupsResult.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/DescribeConsumerGroupsResult.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/DescribeConsumerGroupsResult.cs diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/DescribeTopicsResult.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/DescribeTopicsResult.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/DescribeTopicsResult.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/DescribeTopicsResult.cs diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/ElectLeadersResult.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/ElectLeadersResult.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/ElectLeadersResult.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/ElectLeadersResult.cs diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/KafkaAdminClient.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/KafkaAdminClient.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/KafkaAdminClient.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/KafkaAdminClient.cs diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/ListConsumerGroupsResult.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/ListConsumerGroupsResult.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/ListConsumerGroupsResult.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/ListConsumerGroupsResult.cs diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/ListTopicsResult.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/ListTopicsResult.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/ListTopicsResult.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/ListTopicsResult.cs diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/NewTopic.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/NewTopic.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/NewTopic.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Admin/NewTopic.cs diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Consumer/ConsumerConfig.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Consumer/ConsumerConfig.cs new file mode 100644 index 0000000000..0232236338 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Consumer/ConsumerConfig.cs @@ -0,0 +1,118 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Clients.Consumer +{ + public class ConsumerConfig : JCOBridge.C2JBridge.JVMBridgeBase + { + public override bool IsStatic => true; + public override string ClassName => "org.apache.kafka.clients.consumer.ConsumerConfig"; + + public static readonly string GROUP_ID_CONFIG = Clazz.GetField("GROUP_ID_CONFIG"); + + public static readonly string GROUP_INSTANCE_ID_CONFIG = Clazz.GetField("GROUP_INSTANCE_ID_CONFIG"); + + public static readonly string MAX_POLL_RECORDS_CONFIG = Clazz.GetField("MAX_POLL_RECORDS_CONFIG"); + + public static readonly string MAX_POLL_INTERVAL_MS_CONFIG = Clazz.GetField("MAX_POLL_INTERVAL_MS_CONFIG"); + + public static readonly string SESSION_TIMEOUT_MS_CONFIG = Clazz.GetField("SESSION_TIMEOUT_MS_CONFIG"); + + public static readonly string HEARTBEAT_INTERVAL_MS_CONFIG = Clazz.GetField("HEARTBEAT_INTERVAL_MS_CONFIG"); + + public static readonly string BOOTSTRAP_SERVERS_CONFIG = Clazz.GetField("BOOTSTRAP_SERVERS_CONFIG"); + + public static readonly string CLIENT_DNS_LOOKUP_CONFIG = Clazz.GetField("CLIENT_DNS_LOOKUP_CONFIG"); + + public static readonly string ENABLE_AUTO_COMMIT_CONFIG = Clazz.GetField("ENABLE_AUTO_COMMIT_CONFIG"); + + public static readonly string AUTO_COMMIT_INTERVAL_MS_CONFIG = Clazz.GetField("AUTO_COMMIT_INTERVAL_MS_CONFIG"); + + public static readonly string PARTITION_ASSIGNMENT_STRATEGY_CONFIG = Clazz.GetField("PARTITION_ASSIGNMENT_STRATEGY_CONFIG"); + + public static readonly string AUTO_OFFSET_RESET_CONFIG = Clazz.GetField("AUTO_OFFSET_RESET_CONFIG"); + + public static readonly string FETCH_MIN_BYTES_CONFIG = Clazz.GetField("FETCH_MIN_BYTES_CONFIG"); + + public static readonly string FETCH_MAX_BYTES_CONFIG = Clazz.GetField("FETCH_MAX_BYTES_CONFIG"); + + public static readonly int DEFAULT_FETCH_MAX_BYTES = Clazz.GetField("DEFAULT_FETCH_MAX_BYTES"); + + + public static readonly string FETCH_MAX_WAIT_MS_CONFIG = Clazz.GetField("FETCH_MAX_WAIT_MS_CONFIG"); + + public static readonly string METADATA_MAX_AGE_CONFIG = Clazz.GetField("METADATA_MAX_AGE_CONFIG"); + + public static readonly string MAX_PARTITION_FETCH_BYTES_CONFIG = Clazz.GetField("MAX_PARTITION_FETCH_BYTES_CONFIG"); + + public static readonly int DEFAULT_MAX_PARTITION_FETCH_BYTES = Clazz.GetField("DEFAULT_MAX_PARTITION_FETCH_BYTES"); + + public static readonly string SEND_BUFFER_CONFIG = Clazz.GetField("SEND_BUFFER_CONFIG"); + + public static readonly string RECEIVE_BUFFER_CONFIG = Clazz.GetField("RECEIVE_BUFFER_CONFIG"); + + public static readonly string CLIENT_ID_CONFIG = Clazz.GetField("CLIENT_ID_CONFIG"); + + public static readonly string CLIENT_RACK_CONFIG = Clazz.GetField("CLIENT_RACK_CONFIG"); + + public static readonly string RECONNECT_BACKOFF_MS_CONFIG = Clazz.GetField("RECONNECT_BACKOFF_MS_CONFIG"); + + public static readonly string RECONNECT_BACKOFF_MAX_MS_CONFIG = Clazz.GetField("RECONNECT_BACKOFF_MAX_MS_CONFIG"); + + public static readonly string RETRY_BACKOFF_MS_CONFIG = Clazz.GetField("RETRY_BACKOFF_MS_CONFIG"); + + public static readonly string METRICS_SAMPLE_WINDOW_MS_CONFIG = Clazz.GetField("METRICS_SAMPLE_WINDOW_MS_CONFIG"); + + public static readonly string METRICS_NUM_SAMPLES_CONFIG = Clazz.GetField("METRICS_NUM_SAMPLES_CONFIG"); + + public static readonly string METRICS_RECORDING_LEVEL_CONFIG = Clazz.GetField("METRICS_RECORDING_LEVEL_CONFIG"); + + public static readonly string METRIC_REPORTER_CLASSES_CONFIG = Clazz.GetField("METRIC_REPORTER_CLASSES_CONFIG"); + + public static readonly string CHECK_CRCS_CONFIG = Clazz.GetField("CHECK_CRCS_CONFIG"); + + public static readonly string KEY_DESERIALIZER_CLASS_CONFIG = Clazz.GetField("KEY_DESERIALIZER_CLASS_CONFIG"); + + public static readonly string VALUE_DESERIALIZER_CLASS_CONFIG = Clazz.GetField("VALUE_DESERIALIZER_CLASS_CONFIG"); + + public static readonly string SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG = Clazz.GetField("SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG"); + + public static readonly string SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG = Clazz.GetField("SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG"); + + public static readonly string CONNECTIONS_MAX_IDLE_MS_CONFIG = Clazz.GetField("CONNECTIONS_MAX_IDLE_MS_CONFIG"); + + public static readonly string REQUEST_TIMEOUT_MS_CONFIG = Clazz.GetField("REQUEST_TIMEOUT_MS_CONFIG"); + + public static readonly string DEFAULT_API_TIMEOUT_MS_CONFIG = Clazz.GetField("DEFAULT_API_TIMEOUT_MS_CONFIG"); + + public static readonly string INTERCEPTOR_CLASSES_CONFIG = Clazz.GetField("INTERCEPTOR_CLASSES_CONFIG"); + + public static readonly string EXCLUDE_INTERNAL_TOPICS_CONFIG = Clazz.GetField("EXCLUDE_INTERNAL_TOPICS_CONFIG"); + + public static readonly bool DEFAULT_EXCLUDE_INTERNAL_TOPICS = Clazz.GetField("DEFAULT_EXCLUDE_INTERNAL_TOPICS"); + + public static readonly string ISOLATION_LEVEL_CONFIG = Clazz.GetField("ISOLATION_LEVEL_CONFIG"); + public static readonly string DEFAULT_ISOLATION_LEVEL = Clazz.GetField("DEFAULT_ISOLATION_LEVEL"); + + public static readonly string ALLOW_AUTO_CREATE_TOPICS_CONFIG = Clazz.GetField("ALLOW_AUTO_CREATE_TOPICS_CONFIG"); + + public static readonly bool DEFAULT_ALLOW_AUTO_CREATE_TOPICS = Clazz.GetField("DEFAULT_ALLOW_AUTO_CREATE_TOPICS"); + + public static readonly string SECURITY_PROVIDERS_CONFIG = Clazz.GetField("SECURITY_PROVIDERS_CONFIG"); + } +} diff --git a/src/KafkaBridge/BridgedClasses/Clients/Consumer/ConsumerRecord.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Consumer/ConsumerRecord.cs similarity index 83% rename from src/KafkaBridge/BridgedClasses/Clients/Consumer/ConsumerRecord.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Consumer/ConsumerRecord.cs index 3f4ac4cd86..6e31b2f2f3 100644 --- a/src/KafkaBridge/BridgedClasses/Clients/Consumer/ConsumerRecord.cs +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Consumer/ConsumerRecord.cs @@ -16,7 +16,9 @@ * Refer to LICENSE for more information. */ +using MASES.JCOBridge.C2JBridge.JVMInterop; using MASES.KafkaBridge.Common.Header; +using MASES.KafkaBridge.Common.Record; using System; namespace MASES.KafkaBridge.Clients.Consumer @@ -41,7 +43,7 @@ public class ConsumerRecord : JCOBridge.C2JBridge.JVMBridgeBase IExecute("timestamp"); - public object TimestampType => IExecute("timestampType"); + public TimestampType TimestampType => (TimestampType)Enum.Parse(typeof(TimestampType), IExecute("timestampType").Invoke("name")); // (TimestampType)(int)IExecute("timestampType").GetField("id"); public int SerializedKeySize => IExecute("serializedKeySize"); diff --git a/src/KafkaBridge/BridgedClasses/Clients/Consumer/ConsumerRecords.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Consumer/ConsumerRecords.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Clients/Consumer/ConsumerRecords.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Consumer/ConsumerRecords.cs diff --git a/src/KafkaBridge/BridgedClasses/Clients/Consumer/KafkaConsumer.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Consumer/KafkaConsumer.cs similarity index 97% rename from src/KafkaBridge/BridgedClasses/Clients/Consumer/KafkaConsumer.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Consumer/KafkaConsumer.cs index 9a33b559cd..5054a36fe6 100644 --- a/src/KafkaBridge/BridgedClasses/Clients/Consumer/KafkaConsumer.cs +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Consumer/KafkaConsumer.cs @@ -23,6 +23,8 @@ namespace MASES.KafkaBridge.Clients.Consumer { public class KafkaConsumer : JCOBridge.C2JBridge.JVMBridgeBase> { + public override bool IsCloseable => true; + public override string ClassName => "org.apache.kafka.clients.consumer.KafkaConsumer"; public KafkaConsumer() diff --git a/src/KafkaBridge/BridgedClasses/Clients/Producer/KafkaProducer.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Producer/KafkaProducer.cs similarity index 97% rename from src/KafkaBridge/BridgedClasses/Clients/Producer/KafkaProducer.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Producer/KafkaProducer.cs index 6bee4cc600..94d8bb01a9 100644 --- a/src/KafkaBridge/BridgedClasses/Clients/Producer/KafkaProducer.cs +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Producer/KafkaProducer.cs @@ -18,11 +18,10 @@ using MASES.KafkaBridge.Java.Util; using MASES.KafkaBridge.Java.Util.Concurrent; -using System; namespace MASES.KafkaBridge.Clients.Producer { - public class KafkaProducer : JCOBridge.C2JBridge.JVMBridgeBase, IDisposable + public class KafkaProducer : JCOBridge.C2JBridge.JVMBridgeBase { public override bool IsCloseable => true; diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Producer/ProducerConfig.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Producer/ProducerConfig.cs new file mode 100644 index 0000000000..e312a21244 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Producer/ProducerConfig.cs @@ -0,0 +1,98 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Clients.Producer +{ + public class ProducerConfig : JCOBridge.C2JBridge.JVMBridgeBase + { + public override bool IsStatic => true; + public override string ClassName => "org.apache.kafka.clients.producer.ProducerConfig"; + + public static readonly string BOOTSTRAP_SERVERS_CONFIG = Clazz.GetField("BOOTSTRAP_SERVERS_CONFIG"); + + public static readonly string CLIENT_DNS_LOOKUP_CONFIG = Clazz.GetField("CLIENT_DNS_LOOKUP_CONFIG"); + + public static readonly string METADATA_MAX_AGE_CONFIG = Clazz.GetField("METADATA_MAX_AGE_CONFIG"); + + public static readonly string METADATA_MAX_IDLE_CONFIG = Clazz.GetField("METADATA_MAX_IDLE_CONFIG"); + + public static readonly string BATCH_SIZE_CONFIG = Clazz.GetField("BATCH_SIZE_CONFIG"); + + public static readonly string ACKS_CONFIG = Clazz.GetField("ACKS_CONFIG"); + + public static readonly string LINGER_MS_CONFIG = Clazz.GetField("LINGER_MS_CONFIG"); + + public static readonly string REQUEST_TIMEOUT_MS_CONFIG = Clazz.GetField("REQUEST_TIMEOUT_MS_CONFIG"); + + public static readonly string DELIVERY_TIMEOUT_MS_CONFIG = Clazz.GetField("DELIVERY_TIMEOUT_MS_CONFIG"); + + public static readonly string CLIENT_ID_CONFIG = Clazz.GetField("CLIENT_ID_CONFIG"); + + public static readonly string SEND_BUFFER_CONFIG = Clazz.GetField("SEND_BUFFER_CONFIG"); + + public static readonly string RECEIVE_BUFFER_CONFIG = Clazz.GetField("RECEIVE_BUFFER_CONFIG"); + + public static readonly string MAX_REQUEST_SIZE_CONFIG = Clazz.GetField("MAX_REQUEST_SIZE_CONFIG"); + + public static readonly string RECONNECT_BACKOFF_MS_CONFIG = Clazz.GetField("RECONNECT_BACKOFF_MS_CONFIG"); + + public static readonly string RECONNECT_BACKOFF_MAX_MS_CONFIG = Clazz.GetField("RECONNECT_BACKOFF_MAX_MS_CONFIG"); + + public static readonly string MAX_BLOCK_MS_CONFIG = Clazz.GetField("MAX_BLOCK_MS_CONFIG"); + + public static readonly string BUFFER_MEMORY_CONFIG = Clazz.GetField("BUFFER_MEMORY_CONFIG"); + + public static readonly string RETRY_BACKOFF_MS_CONFIG = Clazz.GetField("RETRY_BACKOFF_MS_CONFIG"); + + public static readonly string COMPRESSION_TYPE_CONFIG = Clazz.GetField("COMPRESSION_TYPE_CONFIG"); + + public static readonly string METRICS_SAMPLE_WINDOW_MS_CONFIG = Clazz.GetField("METRICS_SAMPLE_WINDOW_MS_CONFIG"); + + public static readonly string METRICS_NUM_SAMPLES_CONFIG = Clazz.GetField("METRICS_NUM_SAMPLES_CONFIG"); + + public static readonly string METRICS_RECORDING_LEVEL_CONFIG = Clazz.GetField("METRICS_RECORDING_LEVEL_CONFIG"); + + public static readonly string METRIC_REPORTER_CLASSES_CONFIG = Clazz.GetField("METRIC_REPORTER_CLASSES_CONFIG"); + + public static readonly string MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION = Clazz.GetField("MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION"); + + public static readonly string RETRIES_CONFIG = Clazz.GetField("RETRIES_CONFIG"); + + public static readonly string KEY_SERIALIZER_CLASS_CONFIG = Clazz.GetField("KEY_SERIALIZER_CLASS_CONFIG"); + + public static readonly string VALUE_SERIALIZER_CLASS_CONFIG = Clazz.GetField("VALUE_SERIALIZER_CLASS_CONFIG"); + + public static readonly string SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG = Clazz.GetField("SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG"); + + public static readonly string SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG = Clazz.GetField("SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG"); + + public static readonly string CONNECTIONS_MAX_IDLE_MS_CONFIG = Clazz.GetField("CONNECTIONS_MAX_IDLE_MS_CONFIG"); + + public static readonly string PARTITIONER_CLASS_CONFIG = Clazz.GetField("PARTITIONER_CLASS_CONFIG"); + + public static readonly string INTERCEPTOR_CLASSES_CONFIG = Clazz.GetField("INTERCEPTOR_CLASSES_CONFIG"); + + public static readonly string ENABLE_IDEMPOTENCE_CONFIG = Clazz.GetField("ENABLE_IDEMPOTENCE_CONFIG"); + + public static readonly string TRANSACTION_TIMEOUT_CONFIG = Clazz.GetField("TRANSACTION_TIMEOUT_CONFIG"); + + public static readonly string TRANSACTIONAL_ID_CONFIG = Clazz.GetField("TRANSACTIONAL_ID_CONFIG"); + + public static readonly string SECURITY_PROVIDERS_CONFIG = Clazz.GetField("SECURITY_PROVIDERS_CONFIG"); + } +} diff --git a/src/KafkaBridge/BridgedClasses/Clients/Producer/ProducerRecord.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Producer/ProducerRecord.cs similarity index 55% rename from src/KafkaBridge/BridgedClasses/Clients/Producer/ProducerRecord.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Producer/ProducerRecord.cs index 00beb0f939..466196801e 100644 --- a/src/KafkaBridge/BridgedClasses/Clients/Producer/ProducerRecord.cs +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Producer/ProducerRecord.cs @@ -16,6 +16,9 @@ * Refer to LICENSE for more information. */ +using MASES.KafkaBridge.Common.Header; +using System; + namespace MASES.KafkaBridge.Clients.Producer { public class ProducerRecord : JCOBridge.C2JBridge.JVMBridgeBase> @@ -26,11 +29,31 @@ public ProducerRecord() { } + public ProducerRecord(string topic, int partition, long timestamp, K key, V value, Headers headers) + : base(topic, partition, timestamp, key, value, headers.Instance) + { + } + + public ProducerRecord(string topic, int partition, DateTime timestamp, K key, V value, Headers headers) + : base(topic, partition, new DateTimeOffset(timestamp).ToUnixTimeMilliseconds(), timestamp, key, value, headers.Instance) + { + } + public ProducerRecord(string topic, int partition, long timestamp, K key, V value) : base(topic, partition, timestamp, key, value) { } + public ProducerRecord(string topic, int partition, DateTime timestamp, K key, V value) + : base(topic, partition, new DateTimeOffset(timestamp).ToUnixTimeMilliseconds(), key, value) + { + } + + public ProducerRecord(string topic, int partition, K key, V value, Headers headers) + : base(topic, partition, key, value, headers.Instance) + { + } + public ProducerRecord(string topic, int partition, K key, V value) : base(topic, partition, key, value) { @@ -48,11 +71,17 @@ public ProducerRecord(string topic, V value) public string Topic => IExecute("topic"); + public int Partition => IExecute("partition"); + public K Key => IExecute("key"); public V Value => IExecute("value"); public long Timestamp => IExecute("timestamp"); + + public DateTime DateTime => DateTimeOffset.FromUnixTimeMilliseconds(Timestamp).DateTime; + + public Headers Headers => New("headers"); } public class ProducerRecord : ProducerRecord @@ -61,11 +90,31 @@ public ProducerRecord() { } + public ProducerRecord(string topic, int partition, long timestamp, object key, object value, Headers headers) + : base(topic, partition, timestamp, key, value, headers) + { + } + + public ProducerRecord(string topic, int partition, DateTime timestamp, object key, object value, Headers headers) + : base(topic, partition, timestamp, key, value, headers) + { + } + public ProducerRecord(string topic, int partition, long timestamp, object key, object value) : base(topic, partition, timestamp, key, value) { } + public ProducerRecord(string topic, int partition, DateTime timestamp, object key, object value) + : base(topic, partition, timestamp, key, value) + { + } + + public ProducerRecord(string topic, int partition, object key, object value, Headers headers) + : base(topic, partition, key, value, headers) + { + } + public ProducerRecord(string topic, int partition, object key, object value) : base(topic, partition, key, value) { diff --git a/src/KafkaBridge/BridgedClasses/Clients/Producer/RecordMetadata.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Producer/RecordMetadata.cs similarity index 92% rename from src/KafkaBridge/BridgedClasses/Clients/Producer/RecordMetadata.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Producer/RecordMetadata.cs index e5a09d67b0..5793a0fc40 100644 --- a/src/KafkaBridge/BridgedClasses/Clients/Producer/RecordMetadata.cs +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Clients/Producer/RecordMetadata.cs @@ -16,6 +16,8 @@ * Refer to LICENSE for more information. */ +using System; + namespace MASES.KafkaBridge.Clients.Producer { public class RecordMetadata : JCOBridge.C2JBridge.JVMBridgeBase @@ -30,6 +32,8 @@ public class RecordMetadata : JCOBridge.C2JBridge.JVMBridgeBase public long Timestamp => IExecute("timestamp"); + public DateTime DateTime => DateTimeOffset.FromUnixTimeMilliseconds(Timestamp).DateTime; + public int SerializedKeySize => IExecute("serializedKeySize"); public int SerializedValueSize => IExecute("serializedValueSize"); diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Config/TopicConfig.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Config/TopicConfig.cs new file mode 100644 index 0000000000..343cda8d5b --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Config/TopicConfig.cs @@ -0,0 +1,86 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +using System; + +namespace MASES.KafkaBridge.Common.Config +{ + public class TopicConfig : JCOBridge.C2JBridge.JVMBridgeBase + { + public override bool IsStatic => true; + public override string ClassName => "org.apache.kafka.common.config.TopicConfig"; + + public static readonly string SEGMENT_BYTES_CONFIG = Clazz.GetField("SEGMENT_BYTES_CONFIG"); + + public static readonly string SEGMENT_MS_CONFIG = Clazz.GetField("SEGMENT_MS_CONFIG"); + + public static readonly string SEGMENT_JITTER_MS_CONFIG = Clazz.GetField("SEGMENT_JITTER_MS_CONFIG"); + + public static readonly string SEGMENT_INDEX_BYTES_CONFIG = Clazz.GetField("SEGMENT_INDEX_BYTES_CONFIG"); + + public static readonly string FLUSH_MESSAGES_INTERVAL_CONFIG = Clazz.GetField("FLUSH_MESSAGES_INTERVAL_CONFIG"); + + public static readonly string FLUSH_MS_CONFIG = Clazz.GetField("FLUSH_MS_CONFIG"); + + public static readonly string RETENTION_BYTES_CONFIG = Clazz.GetField("RETENTION_BYTES_CONFIG"); + + public static readonly string RETENTION_MS_CONFIG = Clazz.GetField("RETENTION_MS_CONFIG"); + + public static readonly string REMOTE_LOG_STORAGE_ENABLE_CONFIG = Clazz.GetField("REMOTE_LOG_STORAGE_ENABLE_CONFIG"); + + public static readonly string LOCAL_LOG_RETENTION_MS_CONFIG = Clazz.GetField("LOCAL_LOG_RETENTION_MS_CONFIG"); + + public static readonly string LOCAL_LOG_RETENTION_BYTES_CONFIG = Clazz.GetField("LOCAL_LOG_RETENTION_BYTES_CONFIG"); + + public static readonly string MAX_MESSAGE_BYTES_CONFIG = Clazz.GetField("MAX_MESSAGE_BYTES_CONFIG"); + + public static readonly string INDEX_INTERVAL_BYTES_CONFIG = Clazz.GetField("INDEX_INTERVAL_BYTES_CONFIG"); + + public static readonly string FILE_DELETE_DELAY_MS_CONFIG = Clazz.GetField("FILE_DELETE_DELAY_MS_CONFIG"); + + public static readonly string DELETE_RETENTION_MS_CONFIG = Clazz.GetField("DELETE_RETENTION_MS_CONFIG"); + + public static readonly string MIN_COMPACTION_LAG_MS_CONFIG = Clazz.GetField("MIN_COMPACTION_LAG_MS_CONFIG"); + + public static readonly string MAX_COMPACTION_LAG_MS_CONFIG = Clazz.GetField("MAX_COMPACTION_LAG_MS_CONFIG"); + + public static readonly string MIN_CLEANABLE_DIRTY_RATIO_CONFIG = Clazz.GetField("MIN_CLEANABLE_DIRTY_RATIO_CONFIG"); + + public static readonly string CLEANUP_POLICY_CONFIG = Clazz.GetField("CLEANUP_POLICY_CONFIG"); + public static readonly string CLEANUP_POLICY_COMPACT = Clazz.GetField("CLEANUP_POLICY_COMPACT"); + public static readonly string CLEANUP_POLICY_DELETE = Clazz.GetField("CLEANUP_POLICY_DELETE"); + + public static readonly string UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG = Clazz.GetField("UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG"); + + public static readonly string MIN_IN_SYNC_REPLICAS_CONFIG = Clazz.GetField("MIN_IN_SYNC_REPLICAS_CONFIG"); + + public static readonly string COMPRESSION_TYPE_CONFIG = Clazz.GetField("COMPRESSION_TYPE_CONFIG"); + + public static readonly string PREALLOCATE_CONFIG = Clazz.GetField("PREALLOCATE_CONFIG"); + + [Obsolete()] + public static readonly string MESSAGE_FORMAT_VERSION_CONFIG = Clazz.GetField("MESSAGE_FORMAT_VERSION_CONFIG"); + + public static readonly string MESSAGE_TIMESTAMP_TYPE_CONFIG = Clazz.GetField("MESSAGE_TIMESTAMP_TYPE_CONFIG"); + + public static readonly string MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG = Clazz.GetField("MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG"); + + public static readonly string MESSAGE_DOWNCONVERSION_ENABLE_CONFIG = Clazz.GetField("MESSAGE_DOWNCONVERSION_ENABLE_CONFIG"); + + } +} diff --git a/src/KafkaBridge/BridgedClasses/Common/ElectionType.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/ElectionType.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Common/ElectionType.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Common/ElectionType.cs diff --git a/src/KafkaBridge/BridgedClasses/Common/Header/Header.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Header/Header.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Common/Header/Header.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Header/Header.cs diff --git a/src/KafkaBridge/BridgedClasses/Common/Header/Headers.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Header/Headers.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Common/Header/Headers.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Header/Headers.cs diff --git a/src/KafkaBridge/BridgedClasses/Common/KafkaFuture.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/KafkaFuture.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Common/KafkaFuture.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Common/KafkaFuture.cs diff --git a/src/KafkaBridge/BridgedClasses/Java/Util/Map.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Record/TimestampType.cs similarity index 78% rename from src/KafkaBridge/BridgedClasses/Java/Util/Map.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Record/TimestampType.cs index 5f895f2bec..8ce5df0188 100644 --- a/src/KafkaBridge/BridgedClasses/Java/Util/Map.cs +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Record/TimestampType.cs @@ -16,10 +16,12 @@ * Refer to LICENSE for more information. */ -namespace MASES.KafkaBridge.Java.Util +namespace MASES.KafkaBridge.Common.Record { - public class Map : JCOBridge.C2JBridge.JVMBridgeBase> + public enum TimestampType : int { - public override string ClassName => "java.util.Map"; + NO_TIMESTAMP_TYPE = -1, + CREATE_TIME = 0, + LOG_APPEND_TIME = 1, } -} \ No newline at end of file +} diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Serialization/Serdes.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Serialization/Serdes.cs new file mode 100644 index 0000000000..983bd3e30c --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Serialization/Serdes.cs @@ -0,0 +1,69 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Common.Serialization +{ + public class Serdes : JCOBridge.C2JBridge.JVMBridgeBase + { + public override string ClassName => "org.apache.kafka.common.serialization.Serdes"; + + public static readonly dynamic VoidSerde = DynClazz.VoidSerde; + + public static readonly dynamic LongSerde = DynClazz.LongSerde; + + public static readonly dynamic IntegerSerde = DynClazz.IntegerSerde; + + public static readonly dynamic ShortSerde = DynClazz.ShortSerde; + + public static readonly dynamic FloatSerde = DynClazz.FloatSerde; + + public static readonly dynamic DoubleSerde = DynClazz.DoubleSerde; + + public static readonly dynamic StringSerde = DynClazz.StringSerde; + + public static readonly dynamic ByteBufferSerde = DynClazz.ByteBufferSerde; + + public static readonly dynamic BytesSerde = DynClazz.BytesSerde; + + public static readonly dynamic ByteArraySerde = DynClazz.ByteArraySerde; + + public static readonly dynamic UUIDSerde = DynClazz.UUIDSerde; + + public static dynamic Long => DynClazz.Long(); + + public static dynamic Integer => DynClazz.Integer(); + + public static dynamic Short => DynClazz.Short(); + + public static dynamic Float => DynClazz.Float(); + + public static dynamic Double => DynClazz.Double(); + + public static dynamic String => DynClazz.String(); + + public static dynamic ByteBuffer => DynClazz.ByteBuffer(); + + public static dynamic Bytes => DynClazz.Bytes(); + + public static dynamic UUID => DynClazz.UUID(); + + public static dynamic ByteArray => DynClazz.ByteArray(); + + public static dynamic Void => DynClazz.Void(); + } +} diff --git a/src/KafkaBridge/BridgedClasses/Common/TopicPartition.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/TopicPartition.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Common/TopicPartition.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Common/TopicPartition.cs diff --git a/src/KafkaBridge/BridgedClasses/Common/Serialization/Serdes.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Uuid.cs similarity index 82% rename from src/KafkaBridge/BridgedClasses/Common/Serialization/Serdes.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Uuid.cs index 09585f14c6..b0b514a1bc 100644 --- a/src/KafkaBridge/BridgedClasses/Common/Serialization/Serdes.cs +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Common/Uuid.cs @@ -16,10 +16,10 @@ * Refer to LICENSE for more information. */ -namespace MASES.KafkaBridge.Common.Serialization +namespace MASES.KafkaBridge.Common { - public class Serdes : JCOBridge.C2JBridge.JVMBridgeBase + public class Uuid : JCOBridge.C2JBridge.JVMBridgeBase { - public override string ClassName => "org.apache.kafka.common.serialization.Serdes"; + public override string ClassName => "org.apache.kafka.common.Uuid"; } -} +} \ No newline at end of file diff --git a/src/KafkaBridge/BridgedClasses/Streams/StreamsConfig.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Lang/UUID.cs similarity index 77% rename from src/KafkaBridge/BridgedClasses/Streams/StreamsConfig.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Lang/UUID.cs index 993c39e0a7..502715b15c 100644 --- a/src/KafkaBridge/BridgedClasses/Streams/StreamsConfig.cs +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Lang/UUID.cs @@ -16,11 +16,12 @@ * Refer to LICENSE for more information. */ -namespace MASES.KafkaBridge.Streams +namespace MASES.KafkaBridge.Java.Lang { - public class StreamsConfig : JCOBridge.C2JBridge.JVMBridgeBase + public sealed class UUID : JCOBridge.C2JBridge.JVMBridgeBase { public override bool IsStatic => true; - public override string ClassName => "org.apache.kafka.streams.StreamsConfig"; + + public override string ClassName => "java.lang.UUID"; } } diff --git a/src/KafkaBridge/BridgedClasses/Java/Lang/Void.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Lang/Void.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Java/Lang/Void.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Lang/Void.cs diff --git a/src/KafkaBridge/BridgedClasses/Java/Util/Collection.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Collection.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Java/Util/Collection.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Collection.cs diff --git a/src/KafkaBridge/BridgedClasses/Java/Util/Collections.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Collections.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Java/Util/Collections.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Collections.cs diff --git a/src/KafkaBridge/BridgedClasses/Java/Util/Concurrent/Future.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Concurrent/Future.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Java/Util/Concurrent/Future.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Concurrent/Future.cs diff --git a/src/KafkaBridge/BridgedClasses/Java/Util/Concurrent/TimeUnit.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Concurrent/TimeUnit.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Java/Util/Concurrent/TimeUnit.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Concurrent/TimeUnit.cs diff --git a/src/KafkaBridge/BridgedClasses/Java/Util/List.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/List.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Java/Util/List.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/List.cs diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Map.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Map.cs new file mode 100644 index 0000000000..c2843cd60c --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Map.cs @@ -0,0 +1,43 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Java.Util +{ + public class Map : JCOBridge.C2JBridge.JVMBridgeBase> + { + public override string ClassName => "java.util.Map"; + + public virtual V Get​(K key) { return IExecute("get", key); } + + public virtual V Put​(K key, V value) + { + object val = value; + if (typeof(JCOBridge.C2JBridge.JVMBridgeBase).IsAssignableFrom(typeof(V))) + { + val = (value as JCOBridge.C2JBridge.JVMBridgeBase).Instance; + } + + return IExecute("put", key, val); + } + } + + public class Map2 : Map where V : JCOBridge.C2JBridge.JVMBridgeBase, new() + { + public override V Get​(K key) { return New("get", key); } + } +} \ No newline at end of file diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Optional.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Optional.cs new file mode 100644 index 0000000000..765dfb2e0c --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Optional.cs @@ -0,0 +1,32 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Java.Util +{ + public class Optional : JCOBridge.C2JBridge.JVMBridgeBase> + { + public override string ClassName => "java.util.Optional"; + + public static Optional Empty => SExecute>("empty"); + + public bool IsPresent => IExecute("isPresent"); + + public virtual T Get​() { return IExecute("get"); } + } +} + diff --git a/src/KafkaBridge/BridgedClasses/Java/Util/Properties.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Properties.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Java/Util/Properties.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Properties.cs diff --git a/src/KafkaBridge/BridgedClasses/Java/Util/Set.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Set.cs similarity index 100% rename from src/KafkaBridge/BridgedClasses/Java/Util/Set.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Java/Util/Set.cs diff --git a/src/KafkaBridge/BridgedClasses/Streams/KafkaStreams.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/GlobalKTable.cs similarity index 74% rename from src/KafkaBridge/BridgedClasses/Streams/KafkaStreams.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/GlobalKTable.cs index 7c401d0fd3..0ed8ef9f51 100644 --- a/src/KafkaBridge/BridgedClasses/Streams/KafkaStreams.cs +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/GlobalKTable.cs @@ -16,11 +16,12 @@ * Refer to LICENSE for more information. */ -namespace MASES.KafkaBridge.Streams +namespace MASES.KafkaBridge.Streams.KStream { - public class KafkaStreams : JCOBridge.C2JBridge.JVMBridgeBase + public class GlobalKTable : JCOBridge.C2JBridge.JVMBridgeBase> { - public override string ClassName => "org.apache.kafka.streams.KafkaStreams"; + public override string ClassName => "org.apache.kafka.streams.kstream.GlobalKTable"; + + public string QueryableStoreName => IExecute("queryableStoreName"); } } - diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/KStream.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/KStream.cs new file mode 100644 index 0000000000..e17f6c1308 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/KStream.cs @@ -0,0 +1,33 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Streams.KStream +{ + public class KStream : JCOBridge.C2JBridge.JVMBridgeBase> + { + public override string ClassName => "org.apache.kafka.streams.kstream.KStream"; + + public KStream Merge(KStream stream) { return New>("merge", stream.Instance); } + + public KStream Repartition() { return New>("repartition"); } + + public void To(string topic) { IExecute("to", topic); } + + public KTable ToTable() { return New>("toTable"); } + } +} diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/KTable.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/KTable.cs new file mode 100644 index 0000000000..3c7c72da3c --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/KTable.cs @@ -0,0 +1,63 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Streams.KStream +{ + public class KTable : JCOBridge.C2JBridge.JVMBridgeBase> + { + public override string ClassName => "org.apache.kafka.streams.kstream.KTable"; + + public KTable Filter(Predicate predicate) + where T : K + where J : V + { + return New>("filter", predicate.Listener); + } + + public KTable Filter(Predicate predicate, Named named) + where T : K + where J : V + { + return New>("filter", predicate.Listener, named.Instance); + } + + public KTable FilterNot(Predicate predicate) + where T : K + where J : V + { + return New>("filterNot", predicate.Listener); + } + + public KTable FilterNot(Predicate predicate, Named named) + where T : K + where J : V + { + return New>("filterNot", predicate.Listener, named.Instance); + } + + public KStream ToStream() + { + return New>("toStream"); + } + + public KStream ToStream(Named named) + { + return New>("toStream", named.Instance); + } + } +} diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/KeyValueMapper.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/KeyValueMapper.cs new file mode 100644 index 0000000000..8cfb0456c1 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/KeyValueMapper.cs @@ -0,0 +1,91 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +using MASES.JCOBridge.C2JBridge; +using System; + +namespace MASES.KafkaBridge.Streams.KStream +{ + /// + /// Listerner for Kafka KeyValueMapper. Extends + /// + /// The data associated to the event + /// The data associated to the event + public class KeyValueMapper : CLRListener + { + /// + public sealed override string JniClass => "org.mases.kafkabridge.streams.kstream.KeyValueMapperImpl"; + + readonly Func executionFunction = null; + /// + /// The to be executed + /// + public virtual Func Execute { get { return executionFunction; } } + /// + /// Initialize a new instance of + /// + /// The to be executed + public KeyValueMapper(Func func = null) + { + if (func != null) executionFunction = func; + else executionFunction = Apply; + + AddEventHandler("apply", new EventHandler>>(EventHandler)); + } + + void EventHandler(object sender, CLRListenerEventArgs> data) + { + var retVal = Execute(data.EventData.TypedEventData, data.EventData.To(0)); + data.CLRReturnValue = retVal; + } + /// + /// Executes the KeyValueMapper action in the CLR + /// + /// The KeyValueMapper object + /// The KeyValueMapper object + /// The apply evaluation + public virtual VR Apply(T o1, U o2) { return default(VR); } + } + + /// + /// Listerner for Kafka KeyValueMapper. Extends + /// + /// The data associated to the event as an object + /// The data associated to the event as an object + public class JVMBridgeKeyValueMapper : KeyValueMapper + where T : JVMBridgeBase, new() + where U : JVMBridgeBase, new() + where VR : JVMBridgeBase, new() + { + /// + /// Initialize a new instance of + /// + /// The to be executed + public JVMBridgeKeyValueMapper(Func func = null) : base(func) + { + AddEventHandler("apply", new EventHandler>>(EventHandler)); + } + + void EventHandler(object sender, CLRListenerEventArgs> data) + { + var retVal = Execute(data.EventData.TypedEventData, data.EventData.To(0)); + data.CLRReturnValue = retVal; + } + } + +} diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/Materialized.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/Materialized.cs new file mode 100644 index 0000000000..5e74029097 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/Materialized.cs @@ -0,0 +1,34 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +using MASES.KafkaBridge.Streams.Processor; + +namespace MASES.KafkaBridge.Streams.KStream +{ + public class Materialized : JCOBridge.C2JBridge.JVMBridgeBase> + where S : StateStore + { + public override string ClassName => "org.apache.kafka.streams.kstream.Materialized"; + + public static Materialized As(string storeName) + { + return SExecute>("as", storeName); + } + + } +} diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/Named.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/Named.cs new file mode 100644 index 0000000000..c5d0d05201 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/Named.cs @@ -0,0 +1,35 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Streams.KStream +{ + public class Named : JCOBridge.C2JBridge.JVMBridgeBase + { + public override string ClassName => "org.apache.kafka.streams.kstream.Named"; + + public static Named As(string name) + { + return SExecute("as", name); + } + + public Named WithName(string name) + { + return New("withName", name); + } + } +} diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/Predicate.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/Predicate.cs new file mode 100644 index 0000000000..ac7ed9e6ef --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KStream/Predicate.cs @@ -0,0 +1,90 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +using MASES.JCOBridge.C2JBridge; +using System; + +namespace MASES.KafkaBridge.Streams.KStream +{ + /// + /// Listerner for Kafka Predicate. Extends + /// + /// The data associated to the event + /// The data associated to the event + public class Predicate : CLRListener + { + /// + public sealed override string JniClass => "org.mases.kafkabridge.streams.kstream.PredicateImpl"; + + readonly Func executionFunction = null; + /// + /// The to be executed + /// + public virtual Func Execute { get { return executionFunction; } } + /// + /// Initialize a new instance of + /// + /// The to be executed + public Predicate(Func func = null) + { + if (func != null) executionFunction = func; + else executionFunction = Test; + + AddEventHandler("test", new EventHandler>>(EventHandler)); + } + + void EventHandler(object sender, CLRListenerEventArgs> data) + { + var retVal = Execute(data.EventData.TypedEventData, data.EventData.To(0)); + data.CLRReturnValue = retVal; + } + /// + /// Executes the Predicate action in the CLR + /// + /// The Predicate object + /// The Predicate object + /// The test evaluation + public virtual bool Test(T o1, U o2) { return false; } + } + + /// + /// Listerner for Kafka Predicate. Extends + /// + /// The data associated to the event as an object + /// The data associated to the event as an object + public class JVMBridgePredicate : Predicate + where T : JVMBridgeBase, new() + where U : JVMBridgeBase, new() + { + /// + /// Initialize a new instance of + /// + /// The to be executed + public JVMBridgePredicate(Func func = null) : base(func) + { + AddEventHandler("test", new EventHandler>>(EventHandler)); + } + + void EventHandler(object sender, CLRListenerEventArgs> data) + { + var retVal = Execute(data.EventData.TypedEventData, data.EventData.To(0)); + data.CLRReturnValue = retVal; + } + } + +} diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KafkaStreams.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KafkaStreams.cs new file mode 100644 index 0000000000..ddc3aecf23 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/KafkaStreams.cs @@ -0,0 +1,55 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +using MASES.JCOBridge.C2JBridge.JVMInterop; +using MASES.KafkaBridge.Java.Util; +using System; + +namespace MASES.KafkaBridge.Streams +{ + public class KafkaStreams : JCOBridge.C2JBridge.JVMBridgeBase + { + public override bool IsCloseable => true; + + public override string ClassName => "org.apache.kafka.streams.KafkaStreams"; + + public enum StateType + { + CREATED, // 0 + REBALANCING, // 1 + RUNNING, // 2 + PENDING_SHUTDOWN, // 3 + NOT_RUNNING, // 4 + PENDING_ERROR, // 5 + ERROR, // 6 + } + [Obsolete("This is not public in Apache Kafka API")] + public KafkaStreams() { } + + public KafkaStreams(Topology topology, Properties props) : base(topology.Instance, props.Instance) { } + + public StateType State => (StateType)Enum.Parse(typeof(StateType), IExecute("state").Invoke("name")); + + public Optional AddStreamThread() { return New>("addStreamThread"); } + + public Optional RemoveStreamThread() { return New>("removeStreamThread"); } + + public void Start() { IExecute("start"); } + } +} + diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/Processor/StateStore.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/Processor/StateStore.cs new file mode 100644 index 0000000000..7cb4455278 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/Processor/StateStore.cs @@ -0,0 +1,35 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Streams.Processor +{ + public class StateStore : JCOBridge.C2JBridge.JVMBridgeBase + { + public override string ClassName => "org.apache.kafka.streams.processor.StateStore"; + + public string Name => IExecute("name"); + + public void Flush() { IExecute("flush"); } + + public void Close() { IExecute("close"); } + + public bool Persistent => IExecute("persistent"); + + public bool IsOpen => IExecute("isOpen"); + } +} diff --git a/src/KafkaBridge/BridgedClasses/Streams/StreamsBuilder.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/State/WindowStore.cs similarity index 69% rename from src/KafkaBridge/BridgedClasses/Streams/StreamsBuilder.cs rename to src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/State/WindowStore.cs index ed15aa0923..7f8428d252 100644 --- a/src/KafkaBridge/BridgedClasses/Streams/StreamsBuilder.cs +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/State/WindowStore.cs @@ -16,10 +16,20 @@ * Refer to LICENSE for more information. */ -namespace MASES.KafkaBridge.Streams +using MASES.KafkaBridge.Streams.Processor; + +namespace MASES.KafkaBridge.Streams.State { - public class StreamsBuilder : JCOBridge.C2JBridge.JVMBridgeBase + public class WindowStore : StateStore { - public override string ClassName => "org.apache.kafka.streams.StreamsBuilder"; + public override string ClassName => "org.apache.kafka.streams.state.WindowStore"; + + public void Put(K key, V value, long windowStartTimestamp) + { + IExecute("put", key, value, windowStartTimestamp); + } + + } } + diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/StreamsBuilder.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/StreamsBuilder.cs new file mode 100644 index 0000000000..69fc2195e4 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/StreamsBuilder.cs @@ -0,0 +1,44 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +using MASES.KafkaBridge.Java.Util; +using MASES.KafkaBridge.Streams.KStream; + +namespace MASES.KafkaBridge.Streams +{ + public class StreamsBuilder : JCOBridge.C2JBridge.JVMBridgeBase + { + public override string ClassName => "org.apache.kafka.streams.StreamsBuilder"; + + public KStream Stream(string topic) { return New>("stream", topic); } + + public KTable Table(string topic) { return New>("table", topic); } + + public GlobalKTable GlobalTable(string topic) { return New>("globalTable", topic); } + + public Topology Build() + { + return New("build"); + } + + public Topology Build(Properties props) + { + return New("build", props.Instance); + } + } +} diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/StreamsConfig.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/StreamsConfig.cs new file mode 100644 index 0000000000..48b8d6187c --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/StreamsConfig.cs @@ -0,0 +1,177 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +using System; + +namespace MASES.KafkaBridge.Streams +{ + public class StreamsConfig : JCOBridge.C2JBridge.JVMBridgeBase + { + public override bool IsStatic => true; + public override string ClassName => "org.apache.kafka.streams.StreamsConfig"; + + public static readonly string TOPIC_PREFIX = Clazz.GetField("TOPIC_PREFIX"); + + public static readonly string CONSUMER_PREFIX = Clazz.GetField("CONSUMER_PREFIX"); + + public static readonly string MAIN_CONSUMER_PREFIX = Clazz.GetField("MAIN_CONSUMER_PREFIX"); + + public static readonly string RESTORE_CONSUMER_PREFIX = Clazz.GetField("RESTORE_CONSUMER_PREFIX"); + + public static readonly string GLOBAL_CONSUMER_PREFIX = Clazz.GetField("GLOBAL_CONSUMER_PREFIX"); + + public static readonly string PRODUCER_PREFIX = Clazz.GetField("PRODUCER_PREFIX"); + + public static readonly string ADMIN_CLIENT_PREFIX = Clazz.GetField("ADMIN_CLIENT_PREFIX"); + + public static readonly string NO_OPTIMIZATION = Clazz.GetField("NO_OPTIMIZATION"); + + public static readonly string OPTIMIZE = Clazz.GetField("OPTIMIZE"); + + public static readonly string UPGRADE_FROM_0100 = Clazz.GetField("UPGRADE_FROM_0100"); + + public static readonly string UPGRADE_FROM_0101 = Clazz.GetField("UPGRADE_FROM_0101"); + + public static readonly string UPGRADE_FROM_0102 = Clazz.GetField("UPGRADE_FROM_0102"); + + public static readonly string UPGRADE_FROM_0110 = Clazz.GetField("UPGRADE_FROM_0110"); + + public static readonly string UPGRADE_FROM_10 = Clazz.GetField("UPGRADE_FROM_10"); + + public static readonly string UPGRADE_FROM_11 = Clazz.GetField("UPGRADE_FROM_11"); + + public static readonly string UPGRADE_FROM_20 = Clazz.GetField("UPGRADE_FROM_20"); + + public static readonly string UPGRADE_FROM_21 = Clazz.GetField("UPGRADE_FROM_21"); + + public static readonly string UPGRADE_FROM_22 = Clazz.GetField("UPGRADE_FROM_22"); + + public static readonly string UPGRADE_FROM_23 = Clazz.GetField("UPGRADE_FROM_23"); + + public static readonly string AT_LEAST_ONCE = Clazz.GetField("AT_LEAST_ONCE"); + + [Obsolete] + public static readonly string EXACTLY_ONCE = Clazz.GetField("EXACTLY_ONCE"); + + [Obsolete] + public static readonly string EXACTLY_ONCE_BETA = Clazz.GetField("EXACTLY_ONCE_BETA"); + + public static readonly string EXACTLY_ONCE_V2 = Clazz.GetField("EXACTLY_ONCE_V2"); + + public static readonly string METRICS_LATEST = Clazz.GetField("METRICS_LATEST"); + + public static readonly string ACCEPTABLE_RECOVERY_LAG_CONFIG = Clazz.GetField("ACCEPTABLE_RECOVERY_LAG_CONFIG"); + + public static readonly string APPLICATION_ID_CONFIG = Clazz.GetField("APPLICATION_ID_CONFIG"); + + public static readonly string APPLICATION_SERVER_CONFIG = Clazz.GetField("APPLICATION_SERVER_CONFIG"); + + public static readonly string BOOTSTRAP_SERVERS_CONFIG = Clazz.GetField("BOOTSTRAP_SERVERS_CONFIG"); + + public static readonly string BUFFERED_RECORDS_PER_PARTITION_CONFIG = Clazz.GetField("BUFFERED_RECORDS_PER_PARTITION_CONFIG"); + + public static readonly string BUILT_IN_METRICS_VERSION_CONFIG = Clazz.GetField("BUILT_IN_METRICS_VERSION_CONFIG"); + + public static readonly string CACHE_MAX_BYTES_BUFFERING_CONFIG = Clazz.GetField("CACHE_MAX_BYTES_BUFFERING_CONFIG"); + + public static readonly string CLIENT_ID_CONFIG = Clazz.GetField("CLIENT_ID_CONFIG"); + + public static readonly string COMMIT_INTERVAL_MS_CONFIG = Clazz.GetField("COMMIT_INTERVAL_MS_CONFIG"); + + public static readonly string CONNECTIONS_MAX_IDLE_MS_CONFIG = Clazz.GetField("CONNECTIONS_MAX_IDLE_MS_CONFIG"); + + public static readonly string DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG = Clazz.GetField("DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG"); + + public static readonly string DEFAULT_PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG = Clazz.GetField("DEFAULT_PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG"); + + [Obsolete] + public static readonly string DEFAULT_WINDOWED_KEY_SERDE_INNER_CLASS = Clazz.GetField("DEFAULT_WINDOWED_KEY_SERDE_INNER_CLASS"); + + [Obsolete] + public static readonly string DEFAULT_WINDOWED_VALUE_SERDE_INNER_CLASS = Clazz.GetField("DEFAULT_WINDOWED_VALUE_SERDE_INNER_CLASS"); + + public static readonly string WINDOWED_INNER_CLASS_SERDE = Clazz.GetField("WINDOWED_INNER_CLASS_SERDE"); + + public static readonly string DEFAULT_KEY_SERDE_CLASS_CONFIG = Clazz.GetField("DEFAULT_KEY_SERDE_CLASS_CONFIG"); + + public static readonly string DEFAULT_VALUE_SERDE_CLASS_CONFIG = Clazz.GetField("DEFAULT_VALUE_SERDE_CLASS_CONFIG"); + + public static readonly string DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG = Clazz.GetField("DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG"); + + public static readonly string MAX_TASK_IDLE_MS_CONFIG = Clazz.GetField("MAX_TASK_IDLE_MS_CONFIG"); + + public static readonly string MAX_WARMUP_REPLICAS_CONFIG = Clazz.GetField("MAX_WARMUP_REPLICAS_CONFIG"); + + public static readonly string METADATA_MAX_AGE_CONFIG = Clazz.GetField("METADATA_MAX_AGE_CONFIG"); + + public static readonly string METRICS_NUM_SAMPLES_CONFIG = Clazz.GetField("METRICS_NUM_SAMPLES_CONFIG"); + + public static readonly string METRICS_RECORDING_LEVEL_CONFIG = Clazz.GetField("METRICS_RECORDING_LEVEL_CONFIG"); + + public static readonly string METRIC_REPORTER_CLASSES_CONFIG = Clazz.GetField("METRIC_REPORTER_CLASSES_CONFIG"); + + public static readonly string METRICS_SAMPLE_WINDOW_MS_CONFIG = Clazz.GetField("METRICS_SAMPLE_WINDOW_MS_CONFIG"); + + public static readonly string NUM_STANDBY_REPLICAS_CONFIG = Clazz.GetField("NUM_STANDBY_REPLICAS_CONFIG"); + + public static readonly string NUM_STREAM_THREADS_CONFIG = Clazz.GetField("NUM_STREAM_THREADS_CONFIG"); + + public static readonly string POLL_MS_CONFIG = Clazz.GetField("POLL_MS_CONFIG"); + + public static readonly string PROBING_REBALANCE_INTERVAL_MS_CONFIG = Clazz.GetField("PROBING_REBALANCE_INTERVAL_MS_CONFIG"); + + public static readonly string PROCESSING_GUARANTEE_CONFIG = Clazz.GetField("PROCESSING_GUARANTEE_CONFIG"); + + public static readonly string RECEIVE_BUFFER_CONFIG = Clazz.GetField("RECEIVE_BUFFER_CONFIG"); + + public static readonly string RECONNECT_BACKOFF_MS_CONFIG = Clazz.GetField("RECONNECT_BACKOFF_MS_CONFIG"); + + public static readonly string RECONNECT_BACKOFF_MAX_MS_CONFIG = Clazz.GetField("RECONNECT_BACKOFF_MAX_MS_CONFIG"); + + public static readonly string REPLICATION_FACTOR_CONFIG = Clazz.GetField("REPLICATION_FACTOR_CONFIG"); + + public static readonly string REQUEST_TIMEOUT_MS_CONFIG = Clazz.GetField("REQUEST_TIMEOUT_MS_CONFIG"); + + public static readonly string RETRIES_CONFIG = Clazz.GetField("RETRIES_CONFIG"); + + public static readonly string RETRY_BACKOFF_MS_CONFIG = Clazz.GetField("RETRY_BACKOFF_MS_CONFIG"); + + public static readonly string ROCKSDB_CONFIG_SETTER_CLASS_CONFIG = Clazz.GetField("ROCKSDB_CONFIG_SETTER_CLASS_CONFIG"); + + public static readonly string SECURITY_PROTOCOL_CONFIG = Clazz.GetField("SECURITY_PROTOCOL_CONFIG"); + + public static readonly string SEND_BUFFER_CONFIG = Clazz.GetField("SEND_BUFFER_CONFIG"); + + public static readonly string STATE_CLEANUP_DELAY_MS_CONFIG = Clazz.GetField("STATE_CLEANUP_DELAY_MS_CONFIG"); + + public static readonly string STATE_DIR_CONFIG = Clazz.GetField("STATE_DIR_CONFIG"); + + public static readonly string TASK_TIMEOUT_MS_CONFIG = Clazz.GetField("TASK_TIMEOUT_MS_CONFIG"); + + public static readonly string TOPOLOGY_OPTIMIZATION_CONFIG = Clazz.GetField("TOPOLOGY_OPTIMIZATION_CONFIG"); + + public static readonly string WINDOW_SIZE_MS_CONFIG = Clazz.GetField("WINDOW_SIZE_MS_CONFIG"); + + public static readonly string UPGRADE_FROM_CONFIG = Clazz.GetField("UPGRADE_FROM_CONFIG"); + + public static readonly string WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG = Clazz.GetField("WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG"); + + [Obsolete] + public static readonly string TOPOLOGY_OPTIMIZATION = Clazz.GetField("TOPOLOGY_OPTIMIZATION"); + } +} diff --git a/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/Topology.cs b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/Topology.cs new file mode 100644 index 0000000000..d9186a9854 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/BridgedClasses/Streams/Topology.cs @@ -0,0 +1,48 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Streams +{ + public class Topology : JCOBridge.C2JBridge.JVMBridgeBase + { + public enum AutoOffsetReset + { + EARLIEST, LATEST + } + + public override string ClassName => "org.apache.kafka.streams.Topology"; + + public Topology AddSource(string name, params string[] topics) + { + IExecute("addSource", name, topics); + return this; + } + + public Topology AddSource(AutoOffsetReset offsetReset, string name, params string[] topics) + { + IExecute("addSource", offsetReset, name, topics); + return this; + } + + public Topology AddSink(string name, string topic, params string[] parentNames) + { + IExecute("addSink", name, topic, parentNames); + return this; + } + } +} diff --git a/src/net/KafkaBridge/ClientSide/Shell/MetadataShell.cs b/src/net/KafkaBridge/ClientSide/Shell/MetadataShell.cs new file mode 100644 index 0000000000..b709f97216 --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/Shell/MetadataShell.cs @@ -0,0 +1,34 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Shell +{ + /// + /// Class managing MetadataShell + /// + public class MetadataShell : JCOBridge.C2JBridge.JVMBridgeMain + { + /// + /// Initialize a new + /// + public MetadataShell() + : base("org.apache.kafka.shell.MetadataShell") + { + } + } +} diff --git a/src/KafkaBridge/BridgedClasses/Clients/Producer/ProducerConfig.cs b/src/net/KafkaBridge/ClientSide/Tools/ClusterTool.cs similarity index 63% rename from src/KafkaBridge/BridgedClasses/Clients/Producer/ProducerConfig.cs rename to src/net/KafkaBridge/ClientSide/Tools/ClusterTool.cs index d2e0e1ba0b..d1f834bb3e 100644 --- a/src/KafkaBridge/BridgedClasses/Clients/Producer/ProducerConfig.cs +++ b/src/net/KafkaBridge/ClientSide/Tools/ClusterTool.cs @@ -16,11 +16,19 @@ * Refer to LICENSE for more information. */ -namespace MASES.KafkaBridge.Clients.Producer +namespace MASES.KafkaBridge.Tools { - public class ProducerConfig : JCOBridge.C2JBridge.JVMBridgeBase + /// + /// Class managing ClusterTool + /// + public class ClusterTool : JCOBridge.C2JBridge.JVMBridgeMain { - public override bool IsStatic => true; - public override string ClassName => "org.apache.kafka.clients.producer.ProducerConfig"; + /// + /// Initialize a new + /// + public ClusterTool() + : base("kafka.tools.ClusterTool") + { + } } } diff --git a/src/KafkaBridge/Tools/ConsoleConsumer.cs b/src/net/KafkaBridge/ClientSide/Tools/ConsoleConsumer.cs similarity index 100% rename from src/KafkaBridge/Tools/ConsoleConsumer.cs rename to src/net/KafkaBridge/ClientSide/Tools/ConsoleConsumer.cs diff --git a/src/KafkaBridge/Tools/ConsoleProducer.cs b/src/net/KafkaBridge/ClientSide/Tools/ConsoleProducer.cs similarity index 100% rename from src/KafkaBridge/Tools/ConsoleProducer.cs rename to src/net/KafkaBridge/ClientSide/Tools/ConsoleProducer.cs diff --git a/src/KafkaBridge/Tools/ConsumerPerformance.cs b/src/net/KafkaBridge/ClientSide/Tools/ConsumerPerformance.cs similarity index 100% rename from src/KafkaBridge/Tools/ConsumerPerformance.cs rename to src/net/KafkaBridge/ClientSide/Tools/ConsumerPerformance.cs diff --git a/src/KafkaBridge/Tools/DumpLogSegments.cs b/src/net/KafkaBridge/ClientSide/Tools/DumpLogSegments.cs similarity index 100% rename from src/KafkaBridge/Tools/DumpLogSegments.cs rename to src/net/KafkaBridge/ClientSide/Tools/DumpLogSegments.cs diff --git a/src/KafkaBridge/Tools/GetOffsetShell.cs b/src/net/KafkaBridge/ClientSide/Tools/GetOffsetShell.cs similarity index 100% rename from src/KafkaBridge/Tools/GetOffsetShell.cs rename to src/net/KafkaBridge/ClientSide/Tools/GetOffsetShell.cs diff --git a/src/KafkaBridge/Tools/MirrorMaker.cs b/src/net/KafkaBridge/ClientSide/Tools/MirrorMaker.cs similarity index 100% rename from src/KafkaBridge/Tools/MirrorMaker.cs rename to src/net/KafkaBridge/ClientSide/Tools/MirrorMaker.cs diff --git a/src/KafkaBridge/Tools/ProducerPerformance.cs b/src/net/KafkaBridge/ClientSide/Tools/ProducerPerformance.cs similarity index 100% rename from src/KafkaBridge/Tools/ProducerPerformance.cs rename to src/net/KafkaBridge/ClientSide/Tools/ProducerPerformance.cs diff --git a/src/KafkaBridge/Tools/ReplicaVerificationTool.cs b/src/net/KafkaBridge/ClientSide/Tools/ReplicaVerificationTool.cs similarity index 100% rename from src/KafkaBridge/Tools/ReplicaVerificationTool.cs rename to src/net/KafkaBridge/ClientSide/Tools/ReplicaVerificationTool.cs diff --git a/src/KafkaBridge/BridgedClasses/Clients/Admin/AdminClientConfig.cs b/src/net/KafkaBridge/ClientSide/Tools/StorageTool.cs similarity index 63% rename from src/KafkaBridge/BridgedClasses/Clients/Admin/AdminClientConfig.cs rename to src/net/KafkaBridge/ClientSide/Tools/StorageTool.cs index 689e17dbde..5bea998738 100644 --- a/src/KafkaBridge/BridgedClasses/Clients/Admin/AdminClientConfig.cs +++ b/src/net/KafkaBridge/ClientSide/Tools/StorageTool.cs @@ -16,11 +16,19 @@ * Refer to LICENSE for more information. */ -namespace MASES.KafkaBridge.Clients.Admin +namespace MASES.KafkaBridge.Tools { - public class AdminClientConfig : JCOBridge.C2JBridge.JVMBridgeBase + /// + /// Class managing StorageTool + /// + public class StorageTool : JCOBridge.C2JBridge.JVMBridgeMain { - public override bool IsStatic => true; - public override string ClassName => "org.apache.kafka.clients.admin.AdminClientConfig"; + /// + /// Initialize a new + /// + public StorageTool() + : base("kafka.tools.StorageTool") + { + } } } diff --git a/src/KafkaBridge/Tools/StreamsResetter.cs b/src/net/KafkaBridge/ClientSide/Tools/StreamsResetter.cs similarity index 97% rename from src/KafkaBridge/Tools/StreamsResetter.cs rename to src/net/KafkaBridge/ClientSide/Tools/StreamsResetter.cs index 1d8d8916cc..bd83bc8059 100644 --- a/src/KafkaBridge/Tools/StreamsResetter.cs +++ b/src/net/KafkaBridge/ClientSide/Tools/StreamsResetter.cs @@ -21,7 +21,7 @@ namespace MASES.KafkaBridge.Tools /// /// Class managing StreamsResetter /// - public class StreamsResetter : JCOBridge.C2JBridge.JVMBridgeMain + public class StreamsResetter : JCOBridge.C2JBridge.JVMBridgeMain { static StreamsResetter() { diff --git a/src/KafkaBridge/Tools/TransactionsCommand.cs b/src/net/KafkaBridge/ClientSide/Tools/TransactionsCommand.cs similarity index 100% rename from src/KafkaBridge/Tools/TransactionsCommand.cs rename to src/net/KafkaBridge/ClientSide/Tools/TransactionsCommand.cs diff --git a/src/net/KafkaBridge/ClientSide/Tools/VerifiableConsumer.cs b/src/net/KafkaBridge/ClientSide/Tools/VerifiableConsumer.cs new file mode 100644 index 0000000000..d83adbb0ad --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/Tools/VerifiableConsumer.cs @@ -0,0 +1,39 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Tools +{ + /// + /// Class managing VerifiableConsumer + /// + public class VerifiableConsumer : JCOBridge.C2JBridge.JVMBridgeMain + { + static VerifiableConsumer() + { + KafkaBridgeCore.GlobalHeapSize = "512M"; + } + + /// + /// Initialize a new + /// + public VerifiableConsumer() + : base("org.apache.kafka.tools.VerifiableConsumer") + { + } + } +} diff --git a/src/net/KafkaBridge/ClientSide/Tools/VerifiableProducer.cs b/src/net/KafkaBridge/ClientSide/Tools/VerifiableProducer.cs new file mode 100644 index 0000000000..1e3679ee3a --- /dev/null +++ b/src/net/KafkaBridge/ClientSide/Tools/VerifiableProducer.cs @@ -0,0 +1,39 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Tools +{ + /// + /// Class managing VerifiableProducer + /// + public class VerifiableProducer : JCOBridge.C2JBridge.JVMBridgeMain + { + static VerifiableProducer() + { + KafkaBridgeCore.GlobalHeapSize = "512M"; + } + + /// + /// Initialize a new + /// + public VerifiableProducer() + : base("org.apache.kafka.tools.VerifiableProducer") + { + } + } +} diff --git a/src/KafkaBridge/InternalConst.cs b/src/net/KafkaBridge/InternalConst.cs similarity index 81% rename from src/KafkaBridge/InternalConst.cs rename to src/net/KafkaBridge/InternalConst.cs index 1f1d9d3c4e..3ad32e0910 100644 --- a/src/KafkaBridge/InternalConst.cs +++ b/src/net/KafkaBridge/InternalConst.cs @@ -26,6 +26,7 @@ class CLIParam public const string ClassToRun = "ClassToRun"; public const string ScalaVersion = "ScalaVersion"; public const string KafkaLocation = "KafkaLocation"; + public const string Log4JConfiguration = "Log4JConfiguration"; } /// @@ -40,7 +41,11 @@ public class Const /// /// Default root path, i.e. consider installation within bin folder /// - public const string DefaultRootPath = ".."; + public const string DefaultRootPath = "./jars"; + /// + /// Default log4j path, i.e. consider installation within bin folder + /// + public const string DefaultLog4JPath = "./config/log4j.properties"; } class InternalConst diff --git a/src/KafkaBridge/KafkaBridge.csproj b/src/net/KafkaBridge/KafkaBridge.csproj similarity index 76% rename from src/KafkaBridge/KafkaBridge.csproj rename to src/net/KafkaBridge/KafkaBridge.csproj index c8cd1ba50e..98c0b84e62 100644 --- a/src/KafkaBridge/KafkaBridge.csproj +++ b/src/net/KafkaBridge/KafkaBridge.csproj @@ -8,11 +8,11 @@ MASES s.r.l. MASES s.r.l. MASES s.r.l. - 1.1.1.0 + 1.1.2.0 KafkaBridge true net461;netcoreapp3.1;net5.0;net5.0-windows;net6.0;net6.0-windows - ..\..\bin\ + ..\..\..\bin\ true false https://github.com/masesgroup/KafkaBridge/ @@ -20,13 +20,14 @@ https://github.com/masesgroup/KafkaBridge/releases LICENSE JCOB128x128.png - kafka apache-kafka dotnet clr netcore net5 + kafka apache-kafka dotnet clr netcore net5 net6 MASES.KafkaBridge true snupkg true true ..\Common\KafkaBridge.snk + README.md @@ -39,14 +40,21 @@ true + + + - + + + + + - + diff --git a/src/KafkaBridge/KafkaBridgeCore.cs b/src/net/KafkaBridge/KafkaBridgeCore.cs similarity index 92% rename from src/KafkaBridge/KafkaBridgeCore.cs rename to src/net/KafkaBridge/KafkaBridgeCore.cs index 34fca01df3..65238ecf84 100644 --- a/src/KafkaBridge/KafkaBridgeCore.cs +++ b/src/net/KafkaBridge/KafkaBridgeCore.cs @@ -54,7 +54,13 @@ static IArgumentMetadata[] prepareArguments() { Name = CLIParam.KafkaLocation, Default = Const.DefaultRootPath, - Help = "The folder where Kafka package is available. Default consider this application running in bin folder.", + Help = "The folder where Kafka package is available. Default consider the application use the Jars in the package.", + }, + new ArgumentMetadata() + { + Name = CLIParam.Log4JConfiguration, + Default = Const.DefaultLog4JPath, + Help = "The file containing the configuration of log4j.", }, }; } @@ -76,9 +82,10 @@ static KafkaBridgeCore() ApplicationArgs = parser.UnparsedArgs.FilterJCOBridgeArguments(); GlobalRootPath = _parsedArgs.Get(CLIParam.KafkaLocation); + GlobalLog4JPath = _parsedArgs.Get(CLIParam.Log4JConfiguration); GlobalScalaVersion = _parsedArgs.Get(CLIParam.ScalaVersion); - new KafkaBridgeCore().Globalize(); + new KafkaBridgeCore(); } KafkaBridgeCore() @@ -88,7 +95,7 @@ static KafkaBridgeCore() /// /// Sets the global value of root path /// - public static string MainClassToRun { get; set; } + public static string MainClassToRun { get; protected set; } /// /// The filtered application arguments @@ -100,6 +107,11 @@ static KafkaBridgeCore() /// public static string GlobalRootPath { get; set; } + /// + /// Sets the global value of log4j path + /// + public static string GlobalLog4JPath { get; set; } + /// /// Sets the global value of root path /// @@ -110,6 +122,11 @@ static KafkaBridgeCore() /// public static string GlobalHeapSize { get; set; } + /// + /// Sets the initial heap size + /// + public static string InitialHeapSize { get; set; } + /// /// The Scala version to be used /// @@ -300,9 +317,15 @@ protected virtual IDictionary Options { "com.sun.management.jmxremote.ssl", "false" }, { "log4j.configuration", Log4JOpts}, { "kafka.logs.dir", LogDir}, - { "-Xmx" + GlobalHeapSize, null} + { "-Xmx" + GlobalHeapSize, null}, + { "log4j.configuration", $"file:{GlobalLog4JPath}"}, }; + if (!string.IsNullOrEmpty(InitialHeapSize)) + { + options.Add("-Xms" + InitialHeapSize, null); + } + if (JmxPort.HasValue) { options.Add("com.sun.management.jmxremote.port", JmxPort.Value.ToString()); @@ -327,6 +350,7 @@ protected virtual IDictionary Options string buildClassPath() { classPath = string.Empty; + buildClassPath(GlobalRootPath); buildClassPath(CoreDependenciesPath); buildClassPath(ExamplesPath); buildClassPath(ClientsPath); diff --git a/src/net/KafkaBridge/ServerSide/KafkaStart.cs b/src/net/KafkaBridge/ServerSide/KafkaStart.cs new file mode 100644 index 0000000000..2dc659ace7 --- /dev/null +++ b/src/net/KafkaBridge/ServerSide/KafkaStart.cs @@ -0,0 +1,42 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +using System; + +namespace MASES.KafkaBridge.Tools +{ + /// + /// Class managing Kafka start + /// + public class KafkaStart : JCOBridge.C2JBridge.JVMBridgeMain + { + static KafkaStart() + { + KafkaBridgeCore.GlobalHeapSize = Environment.Is64BitOperatingSystem ? "1G": "512M"; + KafkaBridgeCore.InitialHeapSize = Environment.Is64BitOperatingSystem ? "1G" : "512M"; + } + + /// + /// Initialize a new + /// + public KafkaStart() + : base("kafka.Kafka") + { + } + } +} diff --git a/src/net/KafkaBridge/ServerSide/ZooKeeperShell.cs b/src/net/KafkaBridge/ServerSide/ZooKeeperShell.cs new file mode 100644 index 0000000000..ed1b5829f6 --- /dev/null +++ b/src/net/KafkaBridge/ServerSide/ZooKeeperShell.cs @@ -0,0 +1,40 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Tools +{ + /// + /// Class managing ZooKeeper shell + /// + public class ZooKeeperShell : JCOBridge.C2JBridge.JVMBridgeMain + { + static ZooKeeperShell() + { + KafkaBridgeCore.GlobalHeapSize = "512M"; + KafkaBridgeCore.InitialHeapSize = "512M"; + } + + /// + /// Initialize a new + /// + public ZooKeeperShell() + : base("org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka") + { + } + } +} diff --git a/src/net/KafkaBridge/ServerSide/ZooKeeperStart.cs b/src/net/KafkaBridge/ServerSide/ZooKeeperStart.cs new file mode 100644 index 0000000000..3885dd4323 --- /dev/null +++ b/src/net/KafkaBridge/ServerSide/ZooKeeperStart.cs @@ -0,0 +1,40 @@ +/* +* Copyright 2021 MASES s.r.l. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Refer to LICENSE for more information. +*/ + +namespace MASES.KafkaBridge.Tools +{ + /// + /// Class managing ZooKeeper start + /// + public class ZooKeeperStart : JCOBridge.C2JBridge.JVMBridgeMain + { + static ZooKeeperStart() + { + KafkaBridgeCore.GlobalHeapSize = "512M"; + KafkaBridgeCore.InitialHeapSize = "512M"; + } + + /// + /// Initialize a new + /// + public ZooKeeperStart() + : base("org.apache.zookeeper.server.quorum.QuorumPeerMain") + { + } + } +} diff --git a/src/net/KafkaBridge/mases.kafkabridge.targets b/src/net/KafkaBridge/mases.kafkabridge.targets new file mode 100644 index 0000000000..cd50189d66 --- /dev/null +++ b/src/net/KafkaBridge/mases.kafkabridge.targets @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/KafkaBridgeCLI/InternalConst.cs b/src/net/KafkaBridgeCLI/InternalConst.cs similarity index 100% rename from src/KafkaBridgeCLI/InternalConst.cs rename to src/net/KafkaBridgeCLI/InternalConst.cs diff --git a/src/KafkaBridgeCLI/KafkaBridgeCLI.csproj b/src/net/KafkaBridgeCLI/KafkaBridgeCLI.csproj similarity index 88% rename from src/KafkaBridgeCLI/KafkaBridgeCLI.csproj rename to src/net/KafkaBridgeCLI/KafkaBridgeCLI.csproj index 579afd8b7d..3f0e2dd190 100644 --- a/src/KafkaBridgeCLI/KafkaBridgeCLI.csproj +++ b/src/net/KafkaBridgeCLI/KafkaBridgeCLI.csproj @@ -9,11 +9,11 @@ MASES s.r.l. MASES s.r.l. MASES s.r.l. - 1.1.1.0 + 1.1.2.0 KafkaBridgeCLI true net461;netcoreapp3.1;net5.0;net5.0-windows;net6.0;net6.0-windows - ..\..\bin\ + ..\..\..\bin\ true false https://github.com/masesgroup/KafkaBridge/ @@ -21,13 +21,14 @@ https://github.com/masesgroup/KafkaBridge/releases LICENSE JCOB128x128.png - kafka apache-kafka dotnet clr netcore net5 + kafka apache-kafka dotnet clr netcore net5 net6 MASES.KafkaBridgeCLI true snupkg true true ..\Common\KafkaBridge.snk + README.md @@ -41,9 +42,10 @@ true - + + diff --git a/src/KafkaBridgeCLI/Program.cs b/src/net/KafkaBridgeCLI/Program.cs similarity index 100% rename from src/KafkaBridgeCLI/Program.cs rename to src/net/KafkaBridgeCLI/Program.cs diff --git a/src/KafkaBridgeTemplates.sln b/src/net/KafkaBridgeTemplates.sln similarity index 80% rename from src/KafkaBridgeTemplates.sln rename to src/net/KafkaBridgeTemplates.sln index ca356f7270..258710b4d6 100644 --- a/src/KafkaBridgeTemplates.sln +++ b/src/net/KafkaBridgeTemplates.sln @@ -9,6 +9,8 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "kafkabridgeProducerApp", "t EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "kafkabridgeConsumerApp", "templates\templates\kafkabridgeConsumerApp\kafkabridgeConsumerApp.csproj", "{A1CF5D0C-C76C-4E44-A113-8F73DC7F3732}" EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "kafkabridgePipeStreamApp", "templates\templates\kafkabridgePipeStreamApp\kafkabridgePipeStreamApp.csproj", "{D022E0DE-0B84-4C83-9530-C3B9D797D6C3}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -27,6 +29,10 @@ Global {A1CF5D0C-C76C-4E44-A113-8F73DC7F3732}.Debug|Any CPU.Build.0 = Debug|Any CPU {A1CF5D0C-C76C-4E44-A113-8F73DC7F3732}.Release|Any CPU.ActiveCfg = Release|Any CPU {A1CF5D0C-C76C-4E44-A113-8F73DC7F3732}.Release|Any CPU.Build.0 = Release|Any CPU + {D022E0DE-0B84-4C83-9530-C3B9D797D6C3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {D022E0DE-0B84-4C83-9530-C3B9D797D6C3}.Debug|Any CPU.Build.0 = Debug|Any CPU + {D022E0DE-0B84-4C83-9530-C3B9D797D6C3}.Release|Any CPU.ActiveCfg = Release|Any CPU + {D022E0DE-0B84-4C83-9530-C3B9D797D6C3}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/src/net/README.md b/src/net/README.md new file mode 100644 index 0000000000..23a0b0e49e --- /dev/null +++ b/src/net/README.md @@ -0,0 +1,3 @@ +# KafkaBridge .NET implementation + +This folder contains the project and classes ported from the official Apache Kafka delivery. \ No newline at end of file diff --git a/src/templates/templatepack.csproj b/src/net/templates/templatepack.csproj similarity index 88% rename from src/templates/templatepack.csproj rename to src/net/templates/templatepack.csproj index 80fd1fd528..f097de1c85 100644 --- a/src/templates/templatepack.csproj +++ b/src/net/templates/templatepack.csproj @@ -1,7 +1,7 @@ Template - 1.1.1.0 + 1.1.2.0 MASES.KafkaBridge.Templates KafkaBridge Templates - Templates to use the KafkaBridge MASES s.r.l. @@ -11,7 +11,7 @@ Ready made templates to create applications based on KafkaBridge Ready made templates to create applications based on KafkaBridge. The templates are ready made starting points, all information and APIs are available in the official website https://masesgroup.github.io/KafkaBridge net6.0;net5.0;netcoreapp3.1;net461 - ..\..\bin\ + ..\..\..\bin\ true false https://github.com/masesgroup/KafkaBridge/ @@ -19,7 +19,7 @@ https://github.com/masesgroup/KafkaBridge/releases LICENSE JCOB128x128.png - kafka apache-kafka dotnet clr netcore net5 template + kafka apache-kafka dotnet clr netcore net5 net6 template 8.0 KafkaBridge Templates true @@ -28,6 +28,7 @@ true false content + README.md @@ -41,9 +42,10 @@ true - + + diff --git a/src/templates/templates/kafkabridgeConsumerApp/.template.config/template.json b/src/net/templates/templates/kafkabridgeConsumerApp/.template.config/template.json similarity index 100% rename from src/templates/templates/kafkabridgeConsumerApp/.template.config/template.json rename to src/net/templates/templates/kafkabridgeConsumerApp/.template.config/template.json diff --git a/src/net/templates/templates/kafkabridgeConsumerApp/Program.cs b/src/net/templates/templates/kafkabridgeConsumerApp/Program.cs new file mode 100644 index 0000000000..ec1827dc34 --- /dev/null +++ b/src/net/templates/templates/kafkabridgeConsumerApp/Program.cs @@ -0,0 +1,47 @@ +using MASES.KafkaBridge; +using MASES.KafkaBridge.Clients.Consumer; +using MASES.KafkaBridge.Java.Util; +using System; + +namespace MASES.KafkaBridgeTemplate.KafkaBridgeConsumer +{ + class Program + { + const string theServer = "localhost:9092"; + const string theTopic = "myTopic"; + + static string serverToUse = theServer; + static string topicToUse = theTopic; + + static void Main(string[] args) + { + var appArgs = KafkaBridgeCore.ApplicationArgs; + + if (appArgs.Length != 0) + { + serverToUse = args[0]; + } + + Properties props = new Properties(); + props.Put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, serverToUse); + props.Put(ConsumerConfig.GROUP_ID_CONFIG, "test"); + props.Put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); + props.Put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); + props.Put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); + props.Put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); + + using (var consumer = new KafkaConsumer(props)) + { + consumer.Subscribe(Collections.singleton(topicToUse)); + while (true) + { + var records = consumer.Poll((long)TimeSpan.FromMilliseconds(200).TotalMilliseconds); + foreach (var item in records) + { + Console.WriteLine($"Offset = {item.Offset}, Key = {item.Key}, Value = {item.Value}"); + } + } + } + } + } +} diff --git a/src/templates/templates/kafkabridgeConsumerApp/kafkabridgeConsumerApp.csproj b/src/net/templates/templates/kafkabridgeConsumerApp/kafkabridgeConsumerApp.csproj similarity index 83% rename from src/templates/templates/kafkabridgeConsumerApp/kafkabridgeConsumerApp.csproj rename to src/net/templates/templates/kafkabridgeConsumerApp/kafkabridgeConsumerApp.csproj index 1cef255d0a..ea9c8e188c 100644 --- a/src/templates/templates/kafkabridgeConsumerApp/kafkabridgeConsumerApp.csproj +++ b/src/net/templates/templates/kafkabridgeConsumerApp/kafkabridgeConsumerApp.csproj @@ -8,11 +8,11 @@ - + - + diff --git a/src/net/templates/templates/kafkabridgePipeStreamApp/.template.config/template.json b/src/net/templates/templates/kafkabridgePipeStreamApp/.template.config/template.json new file mode 100644 index 0000000000..879f0a061a --- /dev/null +++ b/src/net/templates/templates/kafkabridgePipeStreamApp/.template.config/template.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json.schemastore.org/template", + "author": "MASES s.r.l.", + "classifications": [ "Common", "Console", "C#8" ], + "identity": "MASES.KafkaBridgeTemplate.KafkaBridgePipeStream", + "name": "Console templates: Kafka Bridge Pipe Stream project", + "shortName": "kafkabridgePipeStreamApp", + "tags": { + "language": "C#", + "type": "project" + } +} \ No newline at end of file diff --git a/src/net/templates/templates/kafkabridgePipeStreamApp/Program.cs b/src/net/templates/templates/kafkabridgePipeStreamApp/Program.cs new file mode 100644 index 0000000000..c85bef13ee --- /dev/null +++ b/src/net/templates/templates/kafkabridgePipeStreamApp/Program.cs @@ -0,0 +1,52 @@ +using MASES.KafkaBridge; +using MASES.KafkaBridge.Clients.Consumer; +using MASES.KafkaBridge.Common.Serialization; +using MASES.KafkaBridge.Java.Util; +using MASES.KafkaBridge.Streams; +using System; + +namespace MASES.KafkaBridgeTemplate.KafkaBridgeStreamPipe +{ + class Program + { + const string theServer = "localhost:9092"; + const string theTopic = "myTopic"; + + static string serverToUse = theServer; + static string topicToUse = theTopic; + + static void Main(string[] args) + { + var appArgs = KafkaBridgeCore.ApplicationArgs; + + if (appArgs.Length != 0) + { + serverToUse = args[0]; + } + + var props = new Properties(); + + props.Put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-pipe"); + props.Put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, serverToUse); + props.Put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String.getClass()); + props.Put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String.getClass()); + + // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data + props.Put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + + var builder = new StreamsBuilder(); + + builder.Stream(topicToUse).To("streams-pipe-output"); + + using (var streams = new KafkaStreams(builder.Build(), props)) + { + streams.Start(); + while (true) + { + var state = streams.State; + Console.WriteLine($"KafkaStreams state: {state}"); + } + } + } + } +} diff --git a/src/net/templates/templates/kafkabridgePipeStreamApp/kafkabridgePipeStreamApp.csproj b/src/net/templates/templates/kafkabridgePipeStreamApp/kafkabridgePipeStreamApp.csproj new file mode 100644 index 0000000000..ea9c8e188c --- /dev/null +++ b/src/net/templates/templates/kafkabridgePipeStreamApp/kafkabridgePipeStreamApp.csproj @@ -0,0 +1,18 @@ + + + Exe + net6.0;net5.0;netcoreapp3.1;net461 + + + + + + + + + + + + + + diff --git a/src/templates/templates/kafkabridgeProducerApp/.template.config/template.json b/src/net/templates/templates/kafkabridgeProducerApp/.template.config/template.json similarity index 100% rename from src/templates/templates/kafkabridgeProducerApp/.template.config/template.json rename to src/net/templates/templates/kafkabridgeProducerApp/.template.config/template.json diff --git a/src/templates/templates/kafkabridgeProducerApp/Program.cs b/src/net/templates/templates/kafkabridgeProducerApp/Program.cs similarity index 66% rename from src/templates/templates/kafkabridgeProducerApp/Program.cs rename to src/net/templates/templates/kafkabridgeProducerApp/Program.cs index 5fb7fb7f1b..a41e495335 100644 --- a/src/templates/templates/kafkabridgeProducerApp/Program.cs +++ b/src/net/templates/templates/kafkabridgeProducerApp/Program.cs @@ -1,10 +1,9 @@ using MASES.KafkaBridge; -using MASES.KafkaBridge.Clients.Consumer; using MASES.KafkaBridge.Clients.Producer; using MASES.KafkaBridge.Java.Util; using System; -namespace MASES.KafkaBridgeTemplate.KafkaBridgeConsumer +namespace MASES.KafkaBridgeTemplate.KafkaBridgeProducer { class Program { @@ -24,12 +23,12 @@ static void Main(string[] args) } Properties props = new Properties(); - props.Put("bootstrap.servers", serverToUse); - props.Put("acks", "all"); - props.Put("retries", 0); - props.Put("linger.ms", 1); - props.Put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); - props.Put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + props.Put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, serverToUse); + props.Put(ProducerConfig.ACKS_CONFIG, "all"); + props.Put(ProducerConfig.RETRIES_CONFIG, 0); + props.Put(ProducerConfig.LINGER_MS_CONFIG, 1); + props.Put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); + props.Put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); using (KafkaProducer producer = new KafkaProducer(props)) { diff --git a/src/templates/templates/kafkabridgeProducerApp/kafkabridgeProducerApp.csproj b/src/net/templates/templates/kafkabridgeProducerApp/kafkabridgeProducerApp.csproj similarity index 83% rename from src/templates/templates/kafkabridgeProducerApp/kafkabridgeProducerApp.csproj rename to src/net/templates/templates/kafkabridgeProducerApp/kafkabridgeProducerApp.csproj index 1cef255d0a..ea9c8e188c 100644 --- a/src/templates/templates/kafkabridgeProducerApp/kafkabridgeProducerApp.csproj +++ b/src/net/templates/templates/kafkabridgeProducerApp/kafkabridgeProducerApp.csproj @@ -8,11 +8,11 @@ - + - + diff --git a/src/templates/templates/kafkabridgeConsumerApp/Program.cs b/src/templates/templates/kafkabridgeConsumerApp/Program.cs deleted file mode 100644 index ab124ab466..0000000000 --- a/src/templates/templates/kafkabridgeConsumerApp/Program.cs +++ /dev/null @@ -1,44 +0,0 @@ -using MASES.KafkaBridge; -using MASES.KafkaBridge.Clients.Consumer; -using MASES.KafkaBridge.Java.Util; -using System; - -namespace MASES.KafkaBridgeTemplate.KafkaBridgeConsumer -{ - class Program - { - const string theServer = "localhost:9092"; - const string theTopic = "myTopic"; - - static string serverToUse = theServer; - static string topicToUse = theTopic; - - static void Main(string[] args) - { - var appArgs = KafkaBridgeCore.ApplicationArgs; - - if (appArgs.Length != 0) - { - serverToUse = args[0]; - } - - Properties props = new Properties(); - props.Put("bootstrap.servers", serverToUse); - props.Put("group.id", "test"); - props.Put("enable.auto.commit", "true"); - props.Put("auto.commit.interval.ms", "1000"); - props.Put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); - props.Put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); - var consumer = new KafkaConsumer(props); - consumer.Subscribe(Collections.singleton(topicToUse)); - while (true) - { - var records = consumer.Poll((long)TimeSpan.FromMilliseconds(200).TotalMilliseconds); - foreach (var item in records) - { - Console.WriteLine($"Offset = {item.Offset}, Key = {item.Key}, Value = {item.Value}"); - } - } - } - } -} diff --git a/tests/KafkaBridgeTest.sln b/tests/KafkaBridgeTest.sln index 9ab9d39d9f..eb91ef5a1d 100644 --- a/tests/KafkaBridgeTest.sln +++ b/tests/KafkaBridgeTest.sln @@ -3,24 +3,24 @@ Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio Version 16 VisualStudioVersion = 16.0.31624.102 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "KafkaBridge", "..\src\KafkaBridge\KafkaBridge.csproj", "{BAB438E7-DFF7-46AF-8097-F686400BDB8A}" -EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "KafkaBridgeTest", "KafkaBridgeTest\KafkaBridgeTest.csproj", "{C556E8A0-8B06-4D5B-AEAD-B318B48DF150}" EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "KafkaBridge", "..\src\net\KafkaBridge\KafkaBridge.csproj", "{14871D50-7E4E-4BAE-8005-CA86E891F602}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU Release|Any CPU = Release|Any CPU EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {BAB438E7-DFF7-46AF-8097-F686400BDB8A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {BAB438E7-DFF7-46AF-8097-F686400BDB8A}.Debug|Any CPU.Build.0 = Debug|Any CPU - {BAB438E7-DFF7-46AF-8097-F686400BDB8A}.Release|Any CPU.ActiveCfg = Release|Any CPU - {BAB438E7-DFF7-46AF-8097-F686400BDB8A}.Release|Any CPU.Build.0 = Release|Any CPU {C556E8A0-8B06-4D5B-AEAD-B318B48DF150}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {C556E8A0-8B06-4D5B-AEAD-B318B48DF150}.Debug|Any CPU.Build.0 = Debug|Any CPU {C556E8A0-8B06-4D5B-AEAD-B318B48DF150}.Release|Any CPU.ActiveCfg = Release|Any CPU {C556E8A0-8B06-4D5B-AEAD-B318B48DF150}.Release|Any CPU.Build.0 = Release|Any CPU + {14871D50-7E4E-4BAE-8005-CA86E891F602}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {14871D50-7E4E-4BAE-8005-CA86E891F602}.Debug|Any CPU.Build.0 = Debug|Any CPU + {14871D50-7E4E-4BAE-8005-CA86E891F602}.Release|Any CPU.ActiveCfg = Release|Any CPU + {14871D50-7E4E-4BAE-8005-CA86E891F602}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/tests/KafkaBridgeTest/KafkaBridgeTest.csproj b/tests/KafkaBridgeTest/KafkaBridgeTest.csproj index f632bb7ba1..d9736d09c6 100644 --- a/tests/KafkaBridgeTest/KafkaBridgeTest.csproj +++ b/tests/KafkaBridgeTest/KafkaBridgeTest.csproj @@ -1,5 +1,4 @@ - KafkaBridgeTest Exe @@ -13,10 +12,10 @@ net461;netcoreapp3.1;net5.0;net5.0-windows;net6.0;net6.0-windows ..\..\bin\ - + - + diff --git a/tests/KafkaBridgeTest/Program.cs b/tests/KafkaBridgeTest/Program.cs index 5c8798bd64..463c6d8ac6 100644 --- a/tests/KafkaBridgeTest/Program.cs +++ b/tests/KafkaBridgeTest/Program.cs @@ -22,7 +22,6 @@ * SOFTWARE. */ -using MASES.JCOBridge.C2JBridge; using MASES.KafkaBridge; using MASES.KafkaBridge.Clients.Admin; using MASES.KafkaBridge.Clients.Consumer; @@ -93,9 +92,8 @@ static void createTopic() topic.Configs(map); var coll = Collections.singleton(topic); - var adminClientConfig = AdminClientConfig.DynClazz; Properties props = new Properties(); - props.Put(adminClientConfig.BOOTSTRAP_SERVERS_CONFIG, serverToUse); + props.Put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, serverToUse); using (var admin = KafkaAdminClient.Create(props)) { @@ -103,11 +101,11 @@ static void createTopic() CreateTopicsResult result = admin.CreateTopics(coll); // Call values() to get the result for a specific topic - var future = result.Dyn().values().get(topicName); + var future = result.Values.Get(topicName); // Call get() to block until the topic creation is complete or has failed // if creation failed the ExecutionException wraps the underlying cause. - future.get(); + future.Get(); } } catch (Exception e) @@ -119,12 +117,12 @@ static void createTopic() static void produceSomething() { Properties props = new Properties(); - props.Put("bootstrap.servers", serverToUse); - props.Put("acks", "all"); - props.Put("retries", 0); - props.Put("linger.ms", 1); - props.Put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); - props.Put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + props.Put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, serverToUse); + props.Put(ProducerConfig.ACKS_CONFIG, "all"); + props.Put(ProducerConfig.RETRIES_CONFIG, 0); + props.Put(ProducerConfig.LINGER_MS_CONFIG, 1); + props.Put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); + props.Put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); using (KafkaProducer producer = new KafkaProducer(props)) { @@ -143,51 +141,49 @@ static void produceSomething() static void consumeSomething() { Properties props = new Properties(); - props.Put("bootstrap.servers", serverToUse); - props.Put("group.id", "test"); - props.Put("enable.auto.commit", "true"); - props.Put("auto.commit.interval.ms", "1000"); - props.Put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); - props.Put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); - var consumer = new KafkaConsumer(props); - consumer.Subscribe(Collections.singleton(topicToUse)); - while (!resetEvent.WaitOne(0)) + props.Put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, serverToUse); + props.Put(ConsumerConfig.GROUP_ID_CONFIG, "test"); + props.Put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); + props.Put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); + props.Put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); + props.Put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); + + using (var consumer = new KafkaConsumer(props)) { - var records = consumer.Poll((long)TimeSpan.FromMilliseconds(200).TotalMilliseconds); - foreach (var item in records) + consumer.Subscribe(Collections.singleton(topicToUse)); + while (!resetEvent.WaitOne(0)) { - Console.WriteLine($"Offset = {item.Offset}, Key = {item.Key}, Value = {item.Value}"); + var records = consumer.Poll((long)TimeSpan.FromMilliseconds(200).TotalMilliseconds); + foreach (var item in records) + { + Console.WriteLine($"Offset = {item.Offset}, Key = {item.Key}, Value = {item.Value}"); + } } } } static void streamSomething() { - var streamConfig = StreamsConfig.DynClazz; - var serdes = Serdes.DynClazz; - - var propObj = Properties.New(); + var props = new Properties(); - var props = propObj.Dyn(); - propObj.Put(streamConfig.APPLICATION_ID_CONFIG, "streams-pipe"); - propObj.Put(streamConfig.BOOTSTRAP_SERVERS_CONFIG, serverToUse); - propObj.Put(streamConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, serdes.String().getClass()); - propObj.Put(streamConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, serdes.String().getClass()); + props.Put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-pipe"); + props.Put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, serverToUse); + props.Put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String.getClass()); + props.Put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String.getClass()); // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data - props.put(ConsumerConfig.DynClazz.AUTO_OFFSET_RESET_CONFIG, "earliest"); + props.Put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - var builder = StreamsBuilder.New(); - var dynBuilder = builder.Dyn(); + var builder = new StreamsBuilder(); - dynBuilder.stream(topicToUse).to("streams-pipe-output"); + builder.Stream(topicToUse).To("streams-pipe-output"); - using (var streams = KafkaStreams.New(dynBuilder.build(), props)) + using (var streams = new KafkaStreams(builder.Build(), props)) { - streams.start(); + streams.Start(); while (!resetEvent.WaitOne(1000)) { - var state = streams.state(); + var state = streams.State; Console.WriteLine($"KafkaStreams state: {state}"); } }