diff --git a/src/container/DockerfileKNet.linux b/src/container/DockerfileKNet.linux index 92e17f2eb1..f035bd91a8 100644 --- a/src/container/DockerfileKNet.linux +++ b/src/container/DockerfileKNet.linux @@ -31,6 +31,7 @@ ADD ./jars /app/jars ADD ./src/config /app/config ADD ./src/config/kraft /app/config/kraft ADD ./src/container/config_container /app/config_container +ADD ./src/container/config_container/kraft /app/config_container/kraft ADD ./src/container/KNetRun.sh /app ENV JCOBRIDGE_JVMPath=/usr/lib/jvm/java-17-openjdk-amd64/lib/server/libjvm.so diff --git a/src/container/KNetRun.sh b/src/container/KNetRun.sh index 18626434c4..a99bdbe16a 100644 --- a/src/container/KNetRun.sh +++ b/src/container/KNetRun.sh @@ -89,6 +89,15 @@ else #Issue newline to config file in case there is not one already echo "" >> /app/config_container/connect-knet-specific.properties + + #Issue newline to config file in case there is not one already + echo "" >> /app/config_container/kraft/broker.properties + + #Issue newline to config file in case there is not one already + echo "" >> /app/config_container/kraft/controller.properties + + #Issue newline to config file in case there is not one already + echo "" >> /app/config_container/kraft/server.properties ( function updateConfig() { @@ -139,17 +148,25 @@ else if [[ $env_var =~ ^CONNECT_ ]]; then connect_standalone_name=$(echo "$env_var" | tr '[:upper:]' '[:lower:]' | tr _ .) updateConfig "$connect_standalone_name" "${!env_var}" "/app/config_container/connect-standalone.properties" - fi - - if [[ $env_var =~ ^CONNECT_ ]]; then - connect_distributed_name=$(echo "$env_var" | tr '[:upper:]' '[:lower:]' | tr _ .) updateConfig "$connect_distributed_name" "${!env_var}" "/app/config_container/connect-distributed.properties" fi + + if [[ $env_var =~ ^KNETCONNECT_ ]]; then + knetconnect_specific_name=$(echo "$env_var" | tr '[:upper:]' '[:lower:]' | tr _ .) + updateConfig "$knetconnect_specific_name" "${!env_var}" "/app/config_container/connect-knet-specific.properties" + fi if [[ $env_var =~ ^KNETCONNECT_ ]]; then knetconnect_specific_name=$(echo "$env_var" | tr '[:upper:]' '[:lower:]' | tr _ .) updateConfig "$knetconnect_specific_name" "${!env_var}" "/app/config_container/connect-knet-specific.properties" fi + + if [[ $env_var =~ ^KRAFT_ ]]; then + kraft_name=$(echo "$env_var" | tr '[:upper:]' '[:lower:]' | tr _ .) + updateConfig "$kraft_name" "${!env_var}" "/app/config_container/broker.properties" + updateConfig "$kraft_name" "${!env_var}" "/app/config_container/controller.properties" + updateConfig "$kraft_name" "${!env_var}" "/app/config_container/server.properties" + fi done ) @@ -253,6 +270,18 @@ else # Exit with status of process that exited first exit $? + elif [ ${KNET_DOCKER_RUNNING_MODE} = "kraft-broker" ]; then + echo "Starting KRaft broker" + # Start kafka broker + dotnet /app/MASES.KNetCLI.dll kafkastart -Log4JConfiguration /app/config_container/log4j.properties /app/config_container/kraft/broker.properties + elif [ ${KNET_DOCKER_RUNNING_MODE} = "kraft-controller" ]; then + echo "Starting KRaft controller" + # Start kafka broker + dotnet /app/MASES.KNetCLI.dll kafkastart -Log4JConfiguration /app/config_container/log4j.properties /app/config_container/kraft/controller.properties + elif [ ${KNET_DOCKER_RUNNING_MODE} = "kraft-server" ]; then + echo "Starting KRaft server" + # Start kafka broker + dotnet /app/MASES.KNetCLI.dll kafkastart -Log4JConfiguration /app/config_container/log4j.properties /app/config_container/kraft/server.properties else echo "KNET_DOCKER_RUNNING_MODE exist, but its value (${KNET_DOCKER_RUNNING_MODE}) is not zookeeper, broker, server, (knet)connect-standalone, (knet)connect-distributed or (knet)connect-standalone-server" fi diff --git a/src/container/config_container/kraft/broker.properties b/src/container/config_container/kraft/broker.properties new file mode 100644 index 0000000000..2d15997f28 --- /dev/null +++ b/src/container/config_container/kraft/broker.properties @@ -0,0 +1,129 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=broker + +# The node id associated with this instance's roles +node.id=2 + +# The connect string for the controller quorum +controller.quorum.voters=1@localhost:9093 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. If not configured, the host name will be equal to the value of +# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=PLAINTEXT://localhost:9092 + +# Name of listener used for communication between brokers. +inter.broker.listener.name=PLAINTEXT + +# Listener name, hostname and port the broker will advertise to clients. +# If not set, it uses the value for "listeners". +advertised.listeners=PLAINTEXT://localhost:9092 + +# A comma-separated list of the names of the listeners used by the controller. +# This is required if running in KRaft mode. On a node with `process.roles=broker`, only the first listed listener will be used by the broker. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kraft-broker-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 diff --git a/src/container/config_container/kraft/controller.properties b/src/container/config_container/kraft/controller.properties new file mode 100644 index 0000000000..9d152f7829 --- /dev/null +++ b/src/container/config_container/kraft/controller.properties @@ -0,0 +1,122 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=controller + +# The node id associated with this instance's roles +node.id=1 + +# The connect string for the controller quorum +controller.quorum.voters=1@localhost:9093 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. +# Note that only the controller listeners are allowed here when `process.roles=controller`, and this listener should be consistent with `controller.quorum.voters` value. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=CONTROLLER://:9093 + +# A comma-separated list of the names of the listeners used by the controller. +# This is required if running in KRaft mode. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kraft-controller-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 diff --git a/src/container/config_container/kraft/server.properties b/src/container/config_container/kraft/server.properties new file mode 100644 index 0000000000..6461c988d3 --- /dev/null +++ b/src/container/config_container/kraft/server.properties @@ -0,0 +1,132 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=broker,controller + +# The node id associated with this instance's roles +node.id=1 + +# The connect string for the controller quorum +controller.quorum.voters=1@localhost:9093 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. +# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. +# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), +# with PLAINTEXT listener name, and port 9092. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=PLAINTEXT://:9092,CONTROLLER://:9093 + +# Name of listener used for communication between brokers. +inter.broker.listener.name=PLAINTEXT + +# Listener name, hostname and port the broker will advertise to clients. +# If not set, it uses the value for "listeners". +advertised.listeners=PLAINTEXT://localhost:9092 + +# A comma-separated list of the names of the listeners used by the controller. +# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol +# This is required if running in KRaft mode. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kraft-combined-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 diff --git a/src/documentation/articles/docker.md b/src/documentation/articles/docker.md index 9c3e8155dd..4140944401 100644 --- a/src/documentation/articles/docker.md +++ b/src/documentation/articles/docker.md @@ -23,28 +23,59 @@ The container image can work in multiple modes based on the environment variable - the Docker image will issue the command: > dotnet /app/MASES.KNetCLI.dll $@ -- - **KNET_DOCKER_RUNNING_MODE**=**zookeeper**: starts a [ZooKeeper™](https://zookeeper.apache.org/) node, defaults to run a standalone [ZooKeeper™](https://zookeeper.apache.org/) exposing on port 2181 - - the Docker image will issue the command: - > dotnet /app/MASES.KNetCLI.dll zookeeperstart -Log4JConfiguration /app/config_container/log4j.properties /app/config_container/zookeeper.properties - - the image can add, or update, configuration variables of zookeeper.properties using the following pattern for environment variables: - - Shall start with **ZOOKEEPER_** - - Each property shall be capitalized and the '.' shall be replaced with '_' - - The value of the environment variable is the value will be used in the configuration file - - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/zookeeper.properties +- The image can run [ZooKeeper™](https://zookeeper.apache.org/) node and/or [Apache Kafka™](https://kafka.apache.org/) broker node using specific values for **KNET_DOCKER_RUNNING_MODE**: -- **KNET_DOCKER_RUNNING_MODE**=**broker**: starts an [Apache Kafka™](https://kafka.apache.org/) broker node, defaults to run a standalone broker exposing on port 9092 - - the Docker image will issue the command: - > dotnet /app/MASES.KNetCLI.dll kafkastart -Log4JConfiguration /app/config_container/log4j.properties /app/config_container/server.properties - - the image can add, or update, configuration variables of server.properties using the following pattern for environment variables: - - Shall start with **KAFKA_** - - Each property shall be capitalized and the '.' shall be replaced with '_' - - The value of the environment variable is the value will be used in the configuration file - - As an example: the environment variable **KAFKA_ADVERTISED_LISTENERS** represents **advertised.listeners** - - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/server.properties + - **KNET_DOCKER_RUNNING_MODE**=**zookeeper**: starts a [ZooKeeper™](https://zookeeper.apache.org/) node, defaults to run a standalone [ZooKeeper™](https://zookeeper.apache.org/) exposing on port 2181 + - the Docker image will issue the command: + > dotnet /app/MASES.KNetCLI.dll zookeeperstart -Log4JConfiguration /app/config_container/log4j.properties /app/config_container/zookeeper.properties + - the image can add, or update, configuration variables of zookeeper.properties using the following pattern for environment variables: + - Shall start with **ZOOKEEPER_** + - Each property shall be capitalized and the '.' shall be replaced with '_' + - The value of the environment variable is the value will be used in the configuration file + - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/zookeeper.properties, all configuration information are available at https://zookeeper.apache.org/doc/r3.8.3/index.html + + - **KNET_DOCKER_RUNNING_MODE**=**broker**: starts an [Apache Kafka™](https://kafka.apache.org/) broker node, defaults to run a standalone broker exposing on port 9092 + - the Docker image will issue the command: + > dotnet /app/MASES.KNetCLI.dll kafkastart -Log4JConfiguration /app/config_container/log4j.properties /app/config_container/server.properties + - the image can add, or update, configuration variables of server.properties using the following pattern for environment variables: + - Shall start with **KAFKA_** + - Each property shall be capitalized and the '.' shall be replaced with '_' + - The value of the environment variable is the value will be used in the configuration file + - As an example: the environment variable **KAFKA_ADVERTISED_LISTENERS** represents **advertised.listeners** + - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/server.properties, all configuration information are available at https://kafka.apache.org/documentation/ + + - **KNET_DOCKER_RUNNING_MODE**=**server**: starts, within the container, both a [ZooKeeper™](https://zookeeper.apache.org/) node and an [Apache Kafka™](https://kafka.apache.org/) broker node, it defaults to run exposing [ZooKeeper™](https://zookeeper.apache.org/) on port 2181 and [Apache Kafka™](https://kafka.apache.org/) broker on port 9092; the image can add, or update, configuration variables of [ZooKeeper™](https://zookeeper.apache.org/) and [Apache Kafka™](https://kafka.apache.org/) using the same pattern of previous points. -- **KNET_DOCKER_RUNNING_MODE**=**server**: starts, within the container, both a [ZooKeeper™](https://zookeeper.apache.org/) node and an [Apache Kafka™](https://kafka.apache.org/) broker node, it defaults to run exposing [ZooKeeper™](https://zookeeper.apache.org/) on port 2181 and [Apache Kafka™](https://kafka.apache.org/) broker on port 9092; the image can add, or update, configuration variables of [ZooKeeper™](https://zookeeper.apache.org/) and [Apache Kafka™](https://kafka.apache.org/) using the same pattern of previous points. +- The image can run [Apache Kafka™](https://kafka.apache.org/) in [KRaft](https://developer.confluent.io/learn/kraft/) mode using specific values for **KNET_DOCKER_RUNNING_MODE**: + + - **KNET_DOCKER_RUNNING_MODE**=**kraft-broker**: starts a [Apache Kafka™](https://kafka.apache.org/) as broker node + - the Docker image will issue the command: + > dotnet /app/MASES.KNetCLI.dll kafkastart -Log4JConfiguration /app/config_container/log4j.properties /app/config_container/kraft/broker.properties + - the image can add, or update, configuration variables of broker.properties using the following pattern for environment variables: + - Shall start with **KRAFT_** + - Each property shall be capitalized and the '.' shall be replaced with '_' + - The value of the environment variable is the value will be used in the configuration file + - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/kraft/broker.properties, all configuration information are available at https://kafka.apache.org/documentation/ + + - **KNET_DOCKER_RUNNING_MODE**=**kraft-controller**: starts a [Apache Kafka™](https://kafka.apache.org/) as controller node + - the Docker image will issue the command: + > dotnet /app/MASES.KNetCLI.dll kafkastart -Log4JConfiguration /app/config_container/log4j.properties /app/config_container/kraft/controller.properties + - the image can add, or update, configuration variables of controller.properties using the following pattern for environment variables: + - Shall start with **KRAFT_** + - Each property shall be capitalized and the '.' shall be replaced with '_' + - The value of the environment variable is the value will be used in the configuration file + - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/kraft/controller.properties, all configuration information are available at https://kafka.apache.org/documentation/ + + - **KNET_DOCKER_RUNNING_MODE**=**kraft-server**: starts a [Apache Kafka™](https://kafka.apache.org/) as server node + - the Docker image will issue the command: + > dotnet /app/MASES.KNetCLI.dll kafkastart -Log4JConfiguration /app/config_container/log4j.properties /app/config_container/kraft/server.properties + - the image can add, or update, configuration variables of server.properties using the following pattern for environment variables: + - Shall start with **KRAFT_** + - Each property shall be capitalized and the '.' shall be replaced with '_' + - The value of the environment variable is the value will be used in the configuration file + - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/kraft/server.properties, all configuration information are available at https://kafka.apache.org/documentation/ -- Then the image can run both Apache Connect and KNet Connect using specific values for **KNET_DOCKER_RUNNING_MODE**: +- The image can run both Apache Connect and KNet Connect using specific values for **KNET_DOCKER_RUNNING_MODE**: - **KNET_DOCKER_RUNNING_MODE**=**knet-connect-standalone**: starts, within the container, a standalone KNet Connect instance - the Docker image will issue the command: > dotnet /app/MASES.KNetConnect.dll -s -k -Log4JConfiguration /app/config_container/log4j.properties /app/config_container/connect-standalone.properties /app/config_container/connect-knet-specific.properties @@ -52,12 +83,12 @@ The container image can work in multiple modes based on the environment variable - Shall start with **CONNECT_** - Each property shall be capitalized and the '.' shall be replaced with '_' - The value of the environment variable is the value will be used in the configuration file - - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/connect-standalone.properties + - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/connect-standalone.properties, all configuration information are available at https://kafka.apache.org/documentation/ - the image can add, or update, configuration variables of connect-knet-specific.properties using the following pattern for environment variables: - Shall start with **KNETCONNECT_** - Each property shall be capitalized and the '.' shall be replaced with '_' - The value of the environment variable is the value will be used in the configuration file - - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/connect-knet-specific.properties + - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/connect-knet-specific.properties, all configuration information depends from the connector in use - **KNET_DOCKER_RUNNING_MODE**=**connect-standalone**: starts, within the container, a standalone Apache Connect instance - the Docker image will issue the command: @@ -77,7 +108,7 @@ The container image can work in multiple modes based on the environment variable - Shall start with **CONNECT_** - Each property shall be capitalized and the '.' shall be replaced with '_' - The value of the environment variable is the value will be used in the configuration file - - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/connect-distributed.properties + - The default file is available at https://github.com/masesgroup/KNet/blob/master/src/container/config_container/connect-distributed.properties, all configuration information are available at https://kafka.apache.org/documentation/ - **KNET_DOCKER_RUNNING_MODE**=**connect-distributed**: starts, within the container, a distributed Apache Connect instance - the Docker image will issue the command: