Skip to content

Commit

Permalink
Merge pull request #738 from Altinity/testflows_new_kafka_tests
Browse files Browse the repository at this point in the history
Testflows new kafka tests
  • Loading branch information
Selfeer authored Sep 2, 2024
2 parents 3c98575 + bda3400 commit 1b76c3b
Show file tree
Hide file tree
Showing 43 changed files with 2,397 additions and 519 deletions.
43 changes: 41 additions & 2 deletions .github/workflows/testflows-sink-connector-kafka.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
name: Kafka - TestFlows Tests
run-name: ${{ inputs.custom_run_name || 'Kafka - TestFlows Tests' }}

on:
workflow_call:
Expand All @@ -7,6 +8,14 @@ on:
description: "Kafka connector docker image"
required: true
type: string
package:
description: "Package either 'docker://' or 'https://'. Example: 'https://s3.amazonaws.com/clickhouse-builds/23.3/.../package_release/clickhouse-common-static_23.3.1.64_amd64.deb', or 'docker://altinity/clickhouse-server:23.8.8'"
type: string
default: docker://clickhouse/clickhouse-server:23.3
output_format:
description: "Testflows output style."
type: string
default: nice-new-fails
secrets:
DOCKERHUB_USERNAME:
required: false
Expand All @@ -22,7 +31,37 @@ on:
description: "Kafka connector docker image"
required: true
type: string

package:
description: "Package either 'docker://' or 'https://'. Example: 'https://s3.amazonaws.com/clickhouse-builds/23.3/.../package_release/clickhouse-common-static_23.3.1.64_amd64.deb', or 'docker://altinity/clickhouse-server:23.8.8'"
type: string
default: docker://clickhouse/clickhouse-server:23.3
extra_args:
description: "Specific Suite To Run (Default * to run everything)."
required: false
type: string
custom_run_name:
description: 'Custom run name (optional)'
required: false
output_format:
description: "Testflows output style."
type: choice
options:
- nice-new-fails
- brisk-new-fails
- plain-new-fails
- pnice-new-fails
- new-fails
- classic
- nice
- fails
- slick
- brisk
- quiet
- short
- manual
- dots
- progress
- raw
env:
SINK_CONNECTOR_IMAGE: ${{ inputs.SINK_CONNECTOR_IMAGE }}

Expand Down Expand Up @@ -75,7 +114,7 @@ jobs:

- name: Run testflows tests
working-directory: sink-connector/tests/integration
run: python3 -u regression.py --only "/mysql to clickhouse replication/*" --clickhouse-binary-path=docker://clickhouse/clickhouse-server:22.8 --test-to-end -o classic --collect-service-logs --attr project="${GITHUB_REPOSITORY}" project.id="$GITHUB_RUN_NUMBER" user.name="$GITHUB_ACTOR" github_actions_run="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" sink_version="altinity/clickhouse-sink-connector:${SINK_CONNECTOR_VERSION}" s3_url="https://altinity-test-reports.s3.amazonaws.com/index.html#altinity-sink-connector/testflows/${{ steps.date.outputs.date }}_${{github.run.number}}/" --log logs/raw.log
run: python3 -u regression.py --only "/regression/${{ inputs.extra_args != '' && inputs.extra_args || '*' }}" --clickhouse-binary-path="${{inputs.package}}" --test-to-end --output ${{ inputs.output_format }} --collect-service-logs --attr project="${GITHUB_REPOSITORY}" project.id="$GITHUB_RUN_NUMBER" user.name="$GITHUB_ACTOR" github_actions_run="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" sink_version="altinity/clickhouse-sink-connector:${SINK_CONNECTOR_VERSION}" s3_url="https://altinity-test-reports.s3.amazonaws.com/index.html#altinity-sink-connector/testflows/${{ steps.date.outputs.date }}_${{github.run.number}}/" --log logs/raw.log

- name: Create tfs results report
if: always()
Expand Down
28 changes: 26 additions & 2 deletions .github/workflows/testflows-sink-connector-lightweight.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@ on:
description: "Package either 'docker://' or 'https://'. Example: 'https://s3.amazonaws.com/clickhouse-builds/23.3/.../package_release/clickhouse-common-static_23.3.1.64_amd64.deb', or 'docker://altinity/clickhouse-server:23.8.8'"
type: string
default: docker://clickhouse/clickhouse-server:23.3
output_format:
description: "Testflows output style."
type: string
default: nice-new-fails
secrets:
DOCKERHUB_USERNAME:
required: false
Expand All @@ -38,6 +42,26 @@ on:
custom_run_name:
description: 'Custom run name (optional)'
required: false
output_format:
description: "Testflows output style."
type: choice
options:
- nice-new-fails
- brisk-new-fails
- plain-new-fails
- pnice-new-fails
- new-fails
- classic
- nice
- fails
- slick
- brisk
- quiet
- short
- manual
- dots
- progress
- raw

env:
SINK_CONNECTOR_IMAGE: ${{ inputs.SINK_CONNECTOR_IMAGE }}
Expand Down Expand Up @@ -91,7 +115,7 @@ jobs:

- name: Run testflows tests
working-directory: sink-connector-lightweight/tests/integration
run: python3 -u regression.py --only "/mysql to clickhouse replication/auto table creation/${{ inputs.extra_args != '' && inputs.extra_args || '*' }}" --clickhouse-binary-path="${{inputs.package}}" --test-to-end -o classic --collect-service-logs --attr project="${GITHUB_REPOSITORY}" project.id="$GITHUB_RUN_NUMBER" user.name="$GITHUB_ACTOR" github_actions_run="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" sink_version="registry.gitlab.com/altinity-public/container-images/clickhouse_debezium_embedded:latest" s3_url="https://altinity-test-reports.s3.amazonaws.com/index.html#altinity-sink-connector/testflows/${{ steps.date.outputs.date }}_${{github.run.number}}/" --log logs/raw.log
run: python3 -u regression.py --only "/mysql to clickhouse replication/auto table creation/${{ inputs.extra_args != '' && inputs.extra_args || '*' }}" --clickhouse-binary-path="${{inputs.package}}" --test-to-end --output ${{ inputs.output_format }} --collect-service-logs --attr project="${GITHUB_REPOSITORY}" project.id="$GITHUB_RUN_NUMBER" user.name="$GITHUB_ACTOR" github_actions_run="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" sink_version="registry.gitlab.com/altinity-public/container-images/clickhouse_debezium_embedded:latest" s3_url="https://altinity-test-reports.s3.amazonaws.com/index.html#altinity-sink-connector/testflows/${{ steps.date.outputs.date }}_${{github.run.number}}/" --log logs/raw.log

- name: Create tfs results report
if: always()
Expand Down Expand Up @@ -169,7 +193,7 @@ jobs:

- name: Run testflows tests
working-directory: sink-connector-lightweight/tests/integration
run: python3 -u regression.py --only "/mysql to clickhouse replication/auto replicated table creation/${{ inputs.extra_args != '' && inputs.extra_args || '*' }}" --clickhouse-binary-path="${{inputs.package}}" --test-to-end -o classic --collect-service-logs --attr project="${GITHUB_REPOSITORY}" project.id="$GITHUB_RUN_NUMBER" user.name="$GITHUB_ACTOR" github_actions_run="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" sink_version="registry.gitlab.com/altinity-public/container-images/clickhouse_debezium_embedded:latest" s3_url="https://altinity-test-reports.s3.amazonaws.com/index.html#altinity-sink-connector/testflows/${{ steps.date.outputs.date }}_${{github.run.number}}/" --log logs/raw.log
run: python3 -u regression.py --only "/mysql to clickhouse replication/auto replicated table creation/${{ inputs.extra_args != '' && inputs.extra_args || '*' }}" --clickhouse-binary-path="${{inputs.package}}" --test-to-end --output ${{ inputs.output_format }} --collect-service-logs --attr project="${GITHUB_REPOSITORY}" project.id="$GITHUB_RUN_NUMBER" user.name="$GITHUB_ACTOR" github_actions_run="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" sink_version="registry.gitlab.com/altinity-public/container-images/clickhouse_debezium_embedded:latest" s3_url="https://altinity-test-reports.s3.amazonaws.com/index.html#altinity-sink-connector/testflows/${{ steps.date.outputs.date }}_${{github.run.number}}/" --log logs/raw.log

- name: Create tfs results report
if: always()
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
testflows==2.1.5
testflows==2.4.10
python-dateutil==2.9.0
numpy==1.26.4
pyarrow==16.1.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ def create_table_with_is_deleted(
clickhouse_node = self.context.clickhouse_node

if not backticks:
columns = "col1 varchar(255), col2 int, "
columns = r"col1 varchar(255), col2 int, "
else:
columns = "\`col1\` varchar(255), \`col2\` int, "
columns = r"\`col1\` varchar(255), \`col2\` int, "

with By(
f"creating a {table_name} table with is_deleted column and {datatype} datatype"
Expand Down Expand Up @@ -196,7 +196,7 @@ def column_with_is_deleted_backticks(self):
table_name = "tb_" + getuid()

with Given(f"I create the {table_name} table and populate it with data"):
create_table_with_is_deleted(table_name=table_name, column="\`is_deleted\`")
create_table_with_is_deleted(table_name=table_name, column=r"\`is_deleted\`")

with Then("I check that the data was inserted correctly into the ClickHouse table"):
for retry in retries(timeout=40, delay=1):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -641,10 +641,10 @@ def different_database_names(self):
def different_database_names_with_source_backticks(self):
"""Check that the tables are replicated when we have source and destination databases with different names and source database name contains backticks."""
database_map = {
"\`mysql1\`": "ch1",
"\`mysql2\`": "ch2",
"\`mysql3\`": "ch3",
"\`mysql4\`": "ch4",
r"\`mysql1\`": r"ch1",
r"\`mysql2\`": r"ch2",
r"\`mysql3\`": r"ch3",
r"\`mysql4\`": r"ch4",
}
check_different_database_names(database_map=database_map)

Expand All @@ -653,10 +653,10 @@ def different_database_names_with_source_backticks(self):
def different_database_names_with_destination_backticks(self):
"""Check that the tables are replicated when we have source and destination databases with different names and destination database name contains backticks."""
database_map = {
"mysql1": "\`ch1\`",
"mysql2": "\`ch2\`",
"mysql3": "\`ch3\`",
"mysql4": "\`ch4\`",
r"mysql1": r"\`ch1\`",
r"mysql2": r"\`ch2\`",
r"mysql3": r"\`ch3\`",
r"mysql4": r"\`ch4\`",
}
check_different_database_names(database_map=database_map)

Expand All @@ -665,10 +665,10 @@ def different_database_names_with_destination_backticks(self):
def different_database_names_with_backticks(self):
"""Check that the tables are replicated when we have source and destination databases with the same names and they contain backticks."""
database_map = {
"\`mysql1\`": "\`ch1\`",
"\`mysql2\`": "\`ch2\`",
"\`mysql3\`": "\`ch3\`",
"\`mysql4\`": "\`ch4\`",
r"\`mysql1\`": r"\`ch1\`",
r"\`mysql2\`": r"\`ch2\`",
r"\`mysql3\`": r"\`ch3\`",
r"\`mysql4\`": r"\`ch4\`",
}
check_different_database_names(database_map=database_map)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ def insert(self, table_name, values, node=None, database_name=None):
node = self.context.cluster.node("mysql-master")

with When("I insert data into MySQL table"):
node.query(f"INSERT INTO {database_name}.\`{table_name}\` VALUES ({values});")
node.query(rf"INSERT INTO {database_name}.\`{table_name}\` VALUES ({values});")


@TestStep(Given)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
version: "2.3"

services:
clickhouse-sink-connector-kafka:
hostname: clickhouse-sink-connector-kafka
image: ${SINK_CONNECTOR_IMAGE}
restart: "no"
expose:
- "8083"
- "5005"
- "39999"
environment:
- BOOTSTRAP_SERVERS=kafka:9092
- GROUP_ID=2
- CONFIG_STORAGE_TOPIC=config-storage-topic-sink
- OFFSET_STORAGE_TOPIC=offset-storage-topic-sink
- STATUS_STORAGE_TOPIC=status-storage-topic-sink
- LOG_LEVEL=INFO
- JAVA_DEBUG_PORT=*:5005
- DEFAULT_JAVA_DEBUG_PORT=*:5005
- KAFKA_DEBUG=true
- JMX_PORT=39999
23 changes: 23 additions & 0 deletions sink-connector/tests/integration/env/debezium-service.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
version: "2.3"

services:
debezium:
container_name: debezium
hostname: debezium
build:
context: ../../../docker/debezium_jmx
args:
DEBEZIUM_VERSION: 2.1.0.Alpha1
restart: "no"
expose:
- "8083"
- "1976"
environment:
- BOOTSTRAP_SERVERS=kafka:9092
- GROUP_ID=1
- CONFIG_STORAGE_TOPIC=config-storage-topic-debezium
- OFFSET_STORAGE_TOPIC=offset-storage-topic-debezium
- STATUS_STORAGE_TOPIC=status-storage-topic-debezium
- LOG_LEVEL=INFO
- KEY_CONVERTER=io.confluent.connect.avro.AvroConverter
- VALUE_CONVERTER=io.confluent.connect.avro.AvroConverter
98 changes: 17 additions & 81 deletions sink-connector/tests/integration/env/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,26 +3,9 @@ version: "2.3"

services:
mysql-master:
container_name: mysql-master
image: docker.io/bitnami/mysql:8.0.36
restart: "no"
expose:
- "3306"
environment:
- MYSQL_ROOT_PASSWORD=root
- MYSQL_DATABASE=test
- MYSQL_REPLICATION_MODE=master
- MYSQL_REPLICATION_USER=repl_user
- ALLOW_EMPTY_PASSWORD=yes
volumes:
- ./mysqld.cnf:/opt/bitnami/mysql/conf/my_custom.cnf
- ../sql/init_mysql.sql:/docker-entrypoint-initdb.d/init_mysql.sql
- "${CLICKHOUSE_TESTS_DIR}/_instances/share_folder:/tmp/share_folder"
healthcheck:
test: [ 'CMD', '/opt/bitnami/scripts/mysql/healthcheck.sh' ]
interval: 15s
timeout: 5s
retries: 6
extends:
file: mysql-master-service.yml
service: mysql-master


schemaregistry:
Expand All @@ -36,70 +19,25 @@ services:
- SCHEMA_REGISTRY_HOST_NAME=schemaregistry
- SCHEMA_REGISTRY_LISTENERS=http://schemaregistry:8081
- SCHEMA_REGISTRY_DEBUG=true

depends_on:
- kafka

debezium:
container_name: debezium
hostname: debezium
# image: debezium/connect:1.9.5.Final
build:
context: ../../../docker/debezium_jmx
args:
DEBEZIUM_VERSION: 2.1.0.Alpha1
restart: "no"
expose:
- "8083"
- "1976"
environment:
- BOOTSTRAP_SERVERS=kafka:9092
- GROUP_ID=1
- CONFIG_STORAGE_TOPIC=config-storage-topic-debezium
- OFFSET_STORAGE_TOPIC=offset-storage-topic-debezium
- STATUS_STORAGE_TOPIC=status-storage-topic-debezium
- LOG_LEVEL=INFO
- KEY_CONVERTER=io.confluent.connect.avro.AvroConverter
- VALUE_CONVERTER=io.confluent.connect.avro.AvroConverter
extends:
file: debezium-service.yml
service: debezium
depends_on:
- kafka

kafka:
container_name: kafka
hostname: kafka
image: vectorized/redpanda
restart: "no"
expose:
- "19092"
command:
- redpanda
- start
- --overprovisioned
- --kafka-addr
- DOCKER_NETWORK://0.0.0.0:9092,LOCALHOST_NETWORK://0.0.0.0:19092
- --advertise-kafka-addr
- DOCKER_NETWORK://kafka:9092,LOCALHOST_NETWORK://127.0.0.1:19092

sink:
container_name: sink
hostname: sink
image: ${SINK_CONNECTOR_IMAGE}
restart: "no"
expose:
- "8083"
- "5005"
- "39999"
environment:
- BOOTSTRAP_SERVERS=kafka:9092
- GROUP_ID=2
- CONFIG_STORAGE_TOPIC=config-storage-topic-sink
- OFFSET_STORAGE_TOPIC=offset-storage-topic-sink
- STATUS_STORAGE_TOPIC=status-storage-topic-sink
- LOG_LEVEL=INFO
- JAVA_DEBUG_PORT=*:5005
- DEFAULT_JAVA_DEBUG_PORT=*:5005
- KAFKA_DEBUG=true
- JMX_PORT=39999
extends:
file: kafka-service.yml
service: kafka

clickhouse-sink-connector-kafka:
extends:
file: clickhouse-sink-connector-kafka-service.yml
service: clickhouse-sink-connector-kafka
depends_on:
- kafka

Expand All @@ -114,10 +52,6 @@ services:
file: clickhouse-service.yml
service: clickhouse
hostname: clickhouse
# environment:
# - CLICKHOUSE_USER=1000
# - CLICKHOUSE_PASSWORD=1000
# - CLICKHOUSE_DB=test
ulimits:
nofile:
soft: 262144
Expand Down Expand Up @@ -186,7 +120,7 @@ services:
condition: service_healthy

# dummy service which does nothing, but allows to postpone
# 'docker-compose up -d' till all dependecies will go healthy
# 'docker-compose up -d' till all dependencies will go healthy
all_services_ready:
image: hello-world
depends_on:
Expand All @@ -200,4 +134,6 @@ services:
condition: service_healthy
zookeeper:
condition: service_healthy
mysql-master:
condition: service_healthy

Loading

0 comments on commit 1b76c3b

Please sign in to comment.