From 447a1ae75f13effa06f8f92d56192e3f03b974d0 Mon Sep 17 00:00:00 2001 From: mchades Date: Fri, 9 Aug 2024 14:45:46 +0800 Subject: [PATCH] reuse hive docker container in CI --- .../workflows/backend-integration-test.yml | 24 ++- .github/workflows/build.yml | 142 ------------------ .github/workflows/flink-integration-test.yml | 104 ------------- .github/workflows/python-integration-test.yml | 86 ----------- .github/workflows/spark-integration-test.yml | 110 -------------- .../integration/test/RangerHiveE2EIT.java | 2 + .../integration/test/HDFSKerberosIT.java | 6 +- .../test/HadoopUserAuthenticationIT.java | 41 ++--- .../test/HiveUserAuthenticationIT.java | 20 +-- .../test/CatalogIcebergKerberosHiveIT.java | 19 +-- .../test/TestMultipleJDBCLoad.java | 9 +- .../CatalogPaimonKerberosFilesystemIT.java | 18 +-- .../apache_gravitino.egg-info/PKG-INFO | 48 ++++++ .../apache_gravitino.egg-info/SOURCES.txt | 101 +++++++++++++ .../dependency_links.txt | 1 + .../apache_gravitino.egg-info/requires.txt | 22 +++ .../apache_gravitino.egg-info/top_level.txt | 1 + .../IcebergRestKerberosHiveCatalogIT.java | 7 +- .../docker-script/docker-compose.yaml | 93 ++++++++++++ .../docker-script/init/hive/init.sh | 5 + .../docker-script/launch.sh | 66 +++++--- .../test/container/BaseContainer.java | 14 +- .../test/container/HiveContainer.java | 101 ++++++++++++- .../test/container/KafkaContainer.java | 6 +- .../test/container/RangerContainer.java | 15 ++ .../test/container/TrinoITContainers.java | 134 +++++++++++++++-- .../integration/test/util/AbstractIT.java | 34 +++-- integration-test/trino-it/init/ranger/init.sh | 22 +++ .../integration/test/TrinoQueryITBase.java | 2 +- .../test/web/ui/CatalogsPageTest.java | 5 +- 30 files changed, 684 insertions(+), 574 deletions(-) delete mode 100644 .github/workflows/build.yml delete mode 100644 .github/workflows/flink-integration-test.yml delete mode 100644 .github/workflows/python-integration-test.yml delete mode 100644 .github/workflows/spark-integration-test.yml create mode 100644 clients/client-python/apache_gravitino.egg-info/PKG-INFO create mode 100644 clients/client-python/apache_gravitino.egg-info/SOURCES.txt create mode 100644 clients/client-python/apache_gravitino.egg-info/dependency_links.txt create mode 100644 clients/client-python/apache_gravitino.egg-info/requires.txt create mode 100644 clients/client-python/apache_gravitino.egg-info/top_level.txt create mode 100644 integration-test/trino-it/init/ranger/init.sh diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index cd4d698cc13..9956f26766a 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -164,11 +164,33 @@ jobs: - name: Free up disk space run: | dev/ci/util_free_space.sh + + + # - name: Setup debug Github Action + # if: ${{ contains(github.event.pull_request.labels.*.name, 'debug action') }} + # uses: csexton/debugger-action@master + + - name: Setup reused hive services + run: | + free -h + docker compose -f integration-test/trino-it/docker-compose.yaml up -d hive hive_with_kerberos || true + if [ $? -ne 0 ]; then + docker compose integration-test/trino-it/docker-compose.yaml ps + docker compose integration-test/trino-it/docker-compose.yaml logs + + exit 1 + else + echo "Services started successfully" + fi + + - name: check services status + run: | + docker compose -f integration-test/trino-it/docker-compose.yaml ps - name: Backend Integration Test (JDK${{ matrix.java-version }}-${{ matrix.test-mode }}-${{ matrix.backend }}) id: integrationTest run: > - ./gradlew test -PskipTests -PtestMode=${{ matrix.test-mode }} -PjdkVersion=${{ matrix.java-version }} -PjdbcBackend=${{ matrix.backend }} -PskipWebITs -PskipDockerTests=false + export ACTIVE_CI=true && ./gradlew test -PskipTests -PtestMode=${{ matrix.test-mode }} -PjdkVersion=${{ matrix.java-version }} -PjdbcBackend=${{ matrix.backend }} -PskipWebITs -PskipDockerTests=false -x :web:test -x :clients:client-python:test -x :flink-connector:test -x :spark-connector:test -x :spark-connector:spark-common:test -x :spark-connector:spark-3.3:test -x :spark-connector:spark-3.4:test -x :spark-connector:spark-3.5:test -x :spark-connector:spark-runtime-3.3:test -x :spark-connector:spark-runtime-3.4:test -x :spark-connector:spark-runtime-3.5:test diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index 0cf17683a0d..00000000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,142 +0,0 @@ -name: build - -# Controls when the workflow will run -on: - # Triggers the workflow on push or pull request events but only for the "main" branch - push: - branches: [ "main", "branch-*" ] - pull_request: - branches: [ "main", "branch-*" ] - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - changes: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: dorny/paths-filter@v2 - id: filter - with: - filters: | - source_changes: - - .github/** - - api/** - - bin/** - - catalogs/** - - clients/client-java/** - - clients/client-java-runtime/** - - clients/filesystem-hadoop3/** - - clients/filesystem-hadoop3-runtime/** - - common/** - - conf/** - - core/** - - dev/** - - gradle/** - - integration-test/** - - meta/** - - server/** - - server-common/** - - spark-connector/** - - flink-connector/** - - trino-connector/** - - web/** - - docs/open-api/** - - build.gradle.kts - - gradle.properties - - gradlew - - setting.gradle.kts - outputs: - source_changes: ${{ steps.filter.outputs.source_changes }} - - compile-check: - runs-on: ubuntu-latest - needs: changes - if: needs.changes.outputs.source_changes != 'true' - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-java@v4 - with: - java-version: 8 - distribution: 'temurin' - cache: 'gradle' - - - name: Build with Gradle - run: ./gradlew build -x test -PjdkVersion=8 - - # To check the spark-connector is compatible with scala2.13 - spark-connector-build: - runs-on: ubuntu-latest - timeout-minutes: 30 - needs: changes - if: needs.changes.outputs.source_changes == 'true' - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-java@v4 - with: - java-version: 8 - distribution: 'temurin' - cache: 'gradle' - - - name: Free up disk space - run: | - dev/ci/util_free_space.sh - - - name: Build with Scala2.13 - run: | - ./gradlew :spark-connector:spark-3.4:build -PscalaVersion=2.13 -PskipITs -PskipDockerTests=false - ./gradlew :spark-connector:spark-3.5:build -PscalaVersion=2.13 -PskipITs -PskipDockerTests=false - - - name: Upload unit tests report - uses: actions/upload-artifact@v3 - if: failure() - with: - name: unit test report - path: | - build/reports - spark-connector/**/*.log - - build: - # The type of runner that the job will run on - runs-on: ubuntu-latest - strategy: - matrix: - java-version: [ 8, 11, 17 ] - timeout-minutes: 30 - needs: changes - if: needs.changes.outputs.source_changes == 'true' - # Steps represent a sequence of tasks that will be executed as part of the job - steps: - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@v3 - - - uses: actions/setup-java@v4 - with: - java-version: ${{ matrix.java-version }} - distribution: 'temurin' - cache: 'gradle' - - - name: Test publish to local - run: ./gradlew publishToMavenLocal -x test -PjdkVersion=${{ matrix.java-version }} - - - name: Free up disk space - run: | - dev/ci/util_free_space.sh - - - name: Build with Gradle - run: ./gradlew build -PskipITs -PjdkVersion=${{ matrix.java-version }} -PskipDockerTests=false -x :clients:client-python:build - - - name: Upload unit tests report - uses: actions/upload-artifact@v3 - if: failure() - with: - name: unit test report - path: | - build/reports - catalogs/**/*.log - catalogs/**/*.tar diff --git a/.github/workflows/flink-integration-test.yml b/.github/workflows/flink-integration-test.yml deleted file mode 100644 index c59c0fd23f7..00000000000 --- a/.github/workflows/flink-integration-test.yml +++ /dev/null @@ -1,104 +0,0 @@ -name: Flink Integration Test - -# Controls when the workflow will run -on: - # Triggers the workflow on push or pull request events but only for the "main" branch - push: - branches: [ "main", "branch-*" ] - pull_request: - branches: [ "main", "branch-*" ] - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - changes: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: dorny/paths-filter@v2 - id: filter - with: - filters: | - source_changes: - - .github/** - - api/** - - bin/** - - catalogs/** - - clients/client-java/** - - clients/client-java-runtime/** - - clients/filesystem-hadoop3/** - - clients/filesystem-hadoop3-runtime/** - - common/** - - conf/** - - core/** - - dev/** - - gradle/** - - meta/** - - server/** - - server-common/** - - flink-connector/** - - docs/open-api/** - - build.gradle.kts - - gradle.properties - - gradlew - - setting.gradle.kts - outputs: - source_changes: ${{ steps.filter.outputs.source_changes }} - - # Integration test for AMD64 architecture - test-amd64-arch: - needs: changes - if: needs.changes.outputs.source_changes == 'true' - runs-on: ubuntu-latest - timeout-minutes: 30 - strategy: - matrix: - architecture: [linux/amd64] - java-version: [ 8, 11, 17 ] - env: - PLATFORM: ${{ matrix.architecture }} - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-java@v4 - with: - java-version: ${{ matrix.java-version }} - distribution: 'temurin' - cache: 'gradle' - - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - - name: Check required command - run: | - dev/ci/check_commands.sh - - - name: Package Gravitino - run: | - ./gradlew compileDistribution -x test -PjdkVersion=${{ matrix.java-version }} - - - name: Free up disk space - run: | - dev/ci/util_free_space.sh - - - name: Flink Integration Test - id: integrationTest - run: | - ./gradlew -PskipTests -PtestMode=embedded -PjdkVersion=${{ matrix.java-version }} -PskipDockerTests=false :flink-connector:test --tests "org.apache.gravitino.flink.connector.integration.test.**" - ./gradlew -PskipTests -PtestMode=deploy -PjdkVersion=${{ matrix.java-version }} -PskipDockerTests=false :flink-connector:test --tests "org.apache.gravitino.flink.connector.integration.test.**" - - - name: Upload integrate tests reports - uses: actions/upload-artifact@v3 - if: ${{ (failure() && steps.integrationTest.outcome == 'failure') || contains(github.event.pull_request.labels.*.name, 'upload log') }} - with: - name: flink-connector-integrate-test-reports-${{ matrix.java-version }} - path: | - build/reports - flink-connector/build/flink-connector-integration-test.log - flink-connector/build/*.tar - distribution/package/logs/gravitino-server.out - distribution/package/logs/gravitino-server.log - catalogs/**/*.log - catalogs/**/*.tar \ No newline at end of file diff --git a/.github/workflows/python-integration-test.yml b/.github/workflows/python-integration-test.yml deleted file mode 100644 index 4e9f96bc690..00000000000 --- a/.github/workflows/python-integration-test.yml +++ /dev/null @@ -1,86 +0,0 @@ -name: Python Client Integration Test - -# Controls when the workflow will run -on: - # Triggers the workflow on push or pull request events but only for the "main" branch - push: - branches: [ "main", "branch-*" ] - pull_request: - branches: [ "main", "branch-*" ] - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - changes: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: dorny/paths-filter@v2 - id: filter - with: - filters: | - source_changes: - - .github/** - - api/** - - catalogs/catalog-hadoop/** - - clients/client-python/** - - common/** - - conf/** - - core/** - - meta/** - - server/** - - server-common/** - outputs: - source_changes: ${{ steps.filter.outputs.source_changes }} - - # Integration test for AMD64 architecture - test-amd64-arch: - needs: changes - if: needs.changes.outputs.source_changes == 'true' - runs-on: ubuntu-latest - timeout-minutes: 30 - strategy: - matrix: - architecture: [linux/amd64] - java-version: [ 8 ] - env: - PLATFORM: ${{ matrix.architecture }} - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-java@v4 - with: - java-version: ${{ matrix.java-version }} - distribution: 'temurin' - cache: 'gradle' - - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - - name: Python Client Integration Test - id: integrationTest - run: | - ./gradlew compileDistribution -x test -PjdkVersion=${{ matrix.java-version }} - - for pythonVersion in "3.8" "3.9" "3.10" "3.11" - do - echo "Use Python version ${pythonVersion} to test the Python client." - ./gradlew -PjdkVersion=${{ matrix.java-version }} -PpythonVersion=${pythonVersion} -PskipDockerTests=false :clients:client-python:test - # Clean Gravitino database to clean test data - rm -rf ./distribution/package/data - done - - - name: Upload integrate tests reports - uses: actions/upload-artifact@v3 - if: ${{ failure() && steps.integrationTest.outcome == 'failure' }} - with: - name: integrate test reports - path: | - build/reports - integration-test/build/integration-test.log - distribution/package/logs/gravitino-server.out - distribution/package/logs/gravitino-server.log - catalogs/**/*.log - catalogs/**/*.tar \ No newline at end of file diff --git a/.github/workflows/spark-integration-test.yml b/.github/workflows/spark-integration-test.yml deleted file mode 100644 index a2f11061cda..00000000000 --- a/.github/workflows/spark-integration-test.yml +++ /dev/null @@ -1,110 +0,0 @@ -name: Spark Integration Test - -# Controls when the workflow will run -on: - # Triggers the workflow on push or pull request events but only for the "main" branch - push: - branches: [ "main", "branch-*" ] - pull_request: - branches: [ "main", "branch-*" ] - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - changes: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: dorny/paths-filter@v2 - id: filter - with: - filters: | - source_changes: - - .github/** - - api/** - - bin/** - - catalogs/** - - clients/client-java/** - - clients/client-java-runtime/** - - clients/filesystem-hadoop3/** - - clients/filesystem-hadoop3-runtime/** - - common/** - - conf/** - - core/** - - dev/** - - gradle/** - - iceberg/** - - meta/** - - server/** - - server-common/** - - spark-connector/** - - docs/open-api/** - - build.gradle.kts - - gradle.properties - - gradlew - - setting.gradle.kts - outputs: - source_changes: ${{ steps.filter.outputs.source_changes }} - - # Integration test for AMD64 architecture - test-amd64-arch: - needs: changes - if: needs.changes.outputs.source_changes == 'true' - runs-on: ubuntu-latest - timeout-minutes: 90 - strategy: - matrix: - architecture: [linux/amd64] - java-version: [ 8, 11, 17 ] - scala-version: [ 2.12 ] - test-mode: [ embedded, deploy ] - env: - PLATFORM: ${{ matrix.architecture }} - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-java@v4 - with: - java-version: ${{ matrix.java-version }} - distribution: 'temurin' - cache: 'gradle' - - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - - name: Check required command - run: | - dev/ci/check_commands.sh - - - name: Package Gravitino - if : ${{ matrix.test-mode == 'deploy' }} - run: | - ./gradlew compileDistribution -x test -PjdkVersion=${{ matrix.java-version }} - - - name: Free up disk space - run: | - dev/ci/util_free_space.sh - - - name: Spark Integration Test - id: integrationTest - run: | - if [ "${{ matrix.scala-version }}" == "2.12" ];then - ./gradlew -PskipTests -PtestMode=${{ matrix.test-mode }} -PjdkVersion=${{ matrix.java-version }} -PscalaVersion=${{ matrix.scala-version }} -PskipDockerTests=false :spark-connector:spark-3.3:test --tests "org.apache.gravitino.spark.connector.integration.test.**" - fi - ./gradlew -PskipTests -PtestMode=${{ matrix.test-mode }} -PjdkVersion=${{ matrix.java-version }} -PscalaVersion=${{ matrix.scala-version }} -PskipDockerTests=false :spark-connector:spark-3.4:test --tests "org.apache.gravitino.spark.connector.integration.test.**" - ./gradlew -PskipTests -PtestMode=${{ matrix.test-mode }} -PjdkVersion=${{ matrix.java-version }} -PscalaVersion=${{ matrix.scala-version }} -PskipDockerTests=false :spark-connector:spark-3.5:test --tests "org.apache.gravitino.spark.connector.integration.test.**" - - - name: Upload integrate tests reports - uses: actions/upload-artifact@v3 - if: ${{ (failure() && steps.integrationTest.outcome == 'failure') || contains(github.event.pull_request.labels.*.name, 'upload log') }} - with: - name: spark-connector-integrate-test-reports-${{ matrix.java-version }}-${{ matrix.test-mode }} - path: | - build/reports - spark-connector/v3.3/spark/build/spark-3.3-integration-test.log - spark-connector/v3.4/spark/build/spark-3.4-integration-test.log - spark-connector/v3.5/spark/build/spark-3.5-integration-test.log - distribution/package/logs/*.out - distribution/package/logs/*.log diff --git a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java index 89ecbc849ad..e1a431fbf44 100644 --- a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java +++ b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java @@ -46,6 +46,7 @@ import org.apache.gravitino.catalog.hive.HiveConstants; import org.apache.gravitino.client.GravitinoMetalake; import org.apache.gravitino.connector.AuthorizationPropertiesMeta; +import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.HiveContainer; import org.apache.gravitino.integration.test.container.RangerContainer; import org.apache.gravitino.integration.test.util.AbstractIT; @@ -73,6 +74,7 @@ @Tag("gravitino-docker-test") public class RangerHiveE2EIT extends AbstractIT { private static final Logger LOG = LoggerFactory.getLogger(RangerHiveE2EIT.class); + private static final ContainerSuite containerSuite = ContainerSuite.getInstance(); private static RangerAuthorizationPlugin rangerAuthPlugin; public static final String metalakeName = diff --git a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HDFSKerberosIT.java b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HDFSKerberosIT.java index acf1ab64612..2717ff401e6 100644 --- a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HDFSKerberosIT.java +++ b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HDFSKerberosIT.java @@ -63,15 +63,11 @@ public static void setup() throws IOException { keytabPath = file.getAbsolutePath() + "/client.keytab"; containerSuite .getKerberosHiveContainer() - .getContainer() .copyFileFromContainer("/etc/admin.keytab", keytabPath); String krb5TmpPath = file.getAbsolutePath() + "/krb5.conf_tmp"; String krb5Path = file.getAbsolutePath() + "/krb5.conf"; - containerSuite - .getKerberosHiveContainer() - .getContainer() - .copyFileFromContainer("/etc/krb5.conf", krb5TmpPath); + containerSuite.getKerberosHiveContainer().copyFileFromContainer("/etc/krb5.conf", krb5TmpPath); // Modify the krb5.conf and change the kdc and admin_server to the container IP String ip = containerSuite.getKerberosHiveContainer().getContainerIpAddress(); diff --git a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserAuthenticationIT.java b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserAuthenticationIT.java index 7a56f8503a3..710a7521be1 100644 --- a/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserAuthenticationIT.java +++ b/catalogs/catalog-hadoop/src/test/java/org/apache/gravitino/catalog/hadoop/integration/test/HadoopUserAuthenticationIT.java @@ -137,25 +137,22 @@ public static void stop() { private static void prepareKerberosConfig() throws IOException, KrbException { // Keytab of the Gravitino SDK client - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/gravitino_client.keytab", TMP_DIR + GRAVITINO_CLIENT_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + "/gravitino_client.keytab", TMP_DIR + GRAVITINO_CLIENT_KEYTAB); createKeyTableForSchemaAndFileset(); // Keytab of the Gravitino server - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/gravitino_server.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + "/gravitino_server.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); // Keytab of Gravitino server to connector to HDFS - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/etc/admin.keytab", TMP_DIR + HADOOP_CLIENT_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + "/etc/admin.keytab", TMP_DIR + HADOOP_CLIENT_KEYTAB); String tmpKrb5Path = TMP_DIR + "krb5.conf_tmp"; String krb5Path = TMP_DIR + "krb5.conf"; - kerberosHiveContainer.getContainer().copyFileFromContainer("/etc/krb5.conf", tmpKrb5Path); + kerberosHiveContainer.copyFileFromContainer("/etc/krb5.conf", tmpKrb5Path); // Modify the krb5.conf and change the kdc and admin_server to the container IP String ip = containerSuite.getKerberosHiveContainer().getContainerIpAddress(); @@ -190,10 +187,8 @@ private static void createKeyTableForSchemaAndFileset() throws IOException { HADOOP_SCHEMA_PRINCIPAL, REALM), StandardCharsets.UTF_8); - kerberosHiveContainer - .getContainer() - .copyFileToContainer( - MountableFile.forHostPath(TMP_DIR + createSchemaShellFile), createSchemaShellFile); + kerberosHiveContainer.copyFileToContainer( + MountableFile.forHostPath(TMP_DIR + createSchemaShellFile), createSchemaShellFile); kerberosHiveContainer.executeInContainer("bash", createSchemaShellFile); FileUtils.writeStringToFile( @@ -208,18 +203,14 @@ private static void createKeyTableForSchemaAndFileset() throws IOException { HADOOP_FILESET_PRINCIPAL, REALM), StandardCharsets.UTF_8); - kerberosHiveContainer - .getContainer() - .copyFileToContainer( - MountableFile.forHostPath(TMP_DIR + createFileSetShellFile), createFileSetShellFile); + kerberosHiveContainer.copyFileToContainer( + MountableFile.forHostPath(TMP_DIR + createFileSetShellFile), createFileSetShellFile); kerberosHiveContainer.executeInContainer("bash", createFileSetShellFile); - kerberosHiveContainer - .getContainer() - .copyFileFromContainer(HADOOP_SCHEMA_KEYTAB, TMP_DIR + HADOOP_SCHEMA_KEYTAB); - kerberosHiveContainer - .getContainer() - .copyFileFromContainer(HADOOP_FILESET_KEYTAB, TMP_DIR + HADOOP_FILESET_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + HADOOP_SCHEMA_KEYTAB, TMP_DIR + HADOOP_SCHEMA_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + HADOOP_FILESET_KEYTAB, TMP_DIR + HADOOP_FILESET_KEYTAB); } private static void addKerberosConfig() { @@ -256,7 +247,7 @@ public void testUserAuthentication() { properties.put(PRINCIPAL_KEY, HADOOP_CLIENT_PRINCIPAL); properties.put("location", HDFS_URL + "/user/hadoop/"); - kerberosHiveContainer.executeInContainer("hadoop", "fs", "-mkdir", "/user/hadoop"); + kerberosHiveContainer.executeInContainer("hadoop", "fs", "-mkdir", "-p", "/user/hadoop"); Catalog catalog = gravitinoMetalake.createCatalog( diff --git a/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/HiveUserAuthenticationIT.java b/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/HiveUserAuthenticationIT.java index a4d982e30b8..a7243c32bea 100644 --- a/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/HiveUserAuthenticationIT.java +++ b/catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/HiveUserAuthenticationIT.java @@ -137,23 +137,20 @@ public static void stop() { private static void prepareKerberosConfig() throws Exception { // Keytab of the Gravitino SDK client - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/gravitino_client.keytab", TMP_DIR + GRAVITINO_CLIENT_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + "/gravitino_client.keytab", TMP_DIR + GRAVITINO_CLIENT_KEYTAB); // Keytab of the Gravitino server - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/gravitino_server.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + "/gravitino_server.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); // Keytab of Gravitino server to connector to Hive - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/etc/admin.keytab", TMP_DIR + HIVE_METASTORE_CLIENT_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + "/etc/admin.keytab", TMP_DIR + HIVE_METASTORE_CLIENT_KEYTAB); String tmpKrb5Path = TMP_DIR + "/krb5.conf_tmp"; String krb5Path = TMP_DIR + "/krb5.conf"; - kerberosHiveContainer.getContainer().copyFileFromContainer("/etc/krb5.conf", tmpKrb5Path); + kerberosHiveContainer.copyFileFromContainer("/etc/krb5.conf", tmpKrb5Path); // Modify the krb5.conf and change the kdc and admin_server to the container IP String ip = containerSuite.getKerberosHiveContainer().getContainerIpAddress(); @@ -235,6 +232,9 @@ public void testUserAuthentication() { Assertions.assertTrue( exceptionMessage.contains("Permission denied: user=gravitino_client, access=WRITE")); + // make sure the directory exists + kerberosHiveContainer.executeInContainer( + "hadoop", "fs", "-mkdir", "-p", "/user/hive/warehouse"); // Now try to give the user the permission to create schema again kerberosHiveContainer.executeInContainer( "hadoop", "fs", "-chmod", "-R", "777", "/user/hive/warehouse"); diff --git a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergKerberosHiveIT.java b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergKerberosHiveIT.java index ebd8737f550..f5450c092ef 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergKerberosHiveIT.java +++ b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/CatalogIcebergKerberosHiveIT.java @@ -150,23 +150,20 @@ public static void stop() { private static void prepareKerberosConfig() throws Exception { // Keytab of the Gravitino SDK client - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/gravitino_client.keytab", TMP_DIR + GRAVITINO_CLIENT_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + "/gravitino_client.keytab", TMP_DIR + GRAVITINO_CLIENT_KEYTAB); // Keytab of the Gravitino server - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/gravitino_server.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + "/gravitino_server.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); // Keytab of Gravitino server to connector to Hive - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/etc/admin.keytab", TMP_DIR + HIVE_METASTORE_CLIENT_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + "/etc/admin.keytab", TMP_DIR + HIVE_METASTORE_CLIENT_KEYTAB); String tmpKrb5Path = TMP_DIR + "/krb5.conf_tmp"; String krb5Path = TMP_DIR + "/krb5.conf"; - kerberosHiveContainer.getContainer().copyFileFromContainer("/etc/krb5.conf", tmpKrb5Path); + kerberosHiveContainer.copyFileFromContainer("/etc/krb5.conf", tmpKrb5Path); // Modify the krb5.conf and change the kdc and admin_server to the container IP String ip = containerSuite.getKerberosHiveContainer().getContainerIpAddress(); @@ -258,7 +255,7 @@ void testIcebergWithKerberosAndUserImpersonation() throws IOException { // Now try to permit the user to create the schema again kerberosHiveContainer.executeInContainer( - "hadoop", "fs", "-mkdir", "/user/hive/warehouse-catalog-iceberg"); + "hadoop", "fs", "-mkdir", "-p", "/user/hive/warehouse-catalog-iceberg"); kerberosHiveContainer.executeInContainer( "hadoop", "fs", "-chmod", "-R", "777", "/user/hive/warehouse-catalog-iceberg"); Assertions.assertDoesNotThrow( diff --git a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/TestMultipleJDBCLoad.java b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/TestMultipleJDBCLoad.java index 5f78ab57377..55ae9b16ead 100644 --- a/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/TestMultipleJDBCLoad.java +++ b/catalogs/catalog-lakehouse-iceberg/src/test/java/org/apache/gravitino/catalog/lakehouse/iceberg/integration/test/TestMultipleJDBCLoad.java @@ -32,6 +32,7 @@ import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.client.GravitinoMetalake; import org.apache.gravitino.iceberg.common.IcebergConfig; +import org.apache.gravitino.integration.test.container.ContainerSuite; import org.apache.gravitino.integration.test.container.MySQLContainer; import org.apache.gravitino.integration.test.container.PostgreSQLContainer; import org.apache.gravitino.integration.test.util.AbstractIT; @@ -56,10 +57,10 @@ public class TestMultipleJDBCLoad extends AbstractIT { @BeforeAll public static void startup() throws IOException { - containerSuite.startMySQLContainer(TEST_DB_NAME); - mySQLContainer = containerSuite.getMySQLContainer(); - containerSuite.startPostgreSQLContainer(TEST_DB_NAME); - postgreSQLContainer = containerSuite.getPostgreSQLContainer(); + ContainerSuite.getInstance().startMySQLContainer(TEST_DB_NAME); + mySQLContainer = ContainerSuite.getInstance().getMySQLContainer(); + ContainerSuite.getInstance().startPostgreSQLContainer(TEST_DB_NAME); + postgreSQLContainer = ContainerSuite.getInstance().getPostgreSQLContainer(); } @Test diff --git a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonKerberosFilesystemIT.java b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonKerberosFilesystemIT.java index 233c8aff764..c83e720cf88 100644 --- a/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonKerberosFilesystemIT.java +++ b/catalogs/catalog-lakehouse-paimon/src/test/java/org/apache/gravitino/catalog/lakehouse/paimon/integration/test/CatalogPaimonKerberosFilesystemIT.java @@ -143,23 +143,19 @@ public static void stop() { private static void prepareKerberosConfig() throws Exception { // Keytab of the Gravitino SDK client - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/gravitino_client.keytab", TMP_DIR + GRAVITINO_CLIENT_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + "/gravitino_client.keytab", TMP_DIR + GRAVITINO_CLIENT_KEYTAB); // Keytab of the Gravitino server - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/gravitino_server.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + "/gravitino_server.keytab", TMP_DIR + GRAVITINO_SERVER_KEYTAB); // Keytab of Gravitino server to connector to Hive - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/etc/admin.keytab", TMP_DIR + HDFS_CLIENT_KEYTAB); + kerberosHiveContainer.copyFileFromContainer("/etc/admin.keytab", TMP_DIR + HDFS_CLIENT_KEYTAB); String tmpKrb5Path = TMP_DIR + "/krb5.conf_tmp"; String krb5Path = TMP_DIR + "/krb5.conf"; - kerberosHiveContainer.getContainer().copyFileFromContainer("/etc/krb5.conf", tmpKrb5Path); + kerberosHiveContainer.copyFileFromContainer("/etc/krb5.conf", tmpKrb5Path); // Modify the krb5.conf and change the kdc and admin_server to the container IP String ip = containerSuite.getKerberosHiveContainer().getContainerIpAddress(); @@ -243,7 +239,7 @@ void testPaimonWithKerberos() { // Now try to permit the user to create the schema again kerberosHiveContainer.executeInContainer( - "hadoop", "fs", "-mkdir", "/user/hive/paimon_catalog_warehouse"); + "hadoop", "fs", "-mkdir", "-p", "/user/hive/paimon_catalog_warehouse"); kerberosHiveContainer.executeInContainer( "hadoop", "fs", "-chmod", "-R", "777", "/user/hive/paimon_catalog_warehouse"); Assertions.assertDoesNotThrow( diff --git a/clients/client-python/apache_gravitino.egg-info/PKG-INFO b/clients/client-python/apache_gravitino.egg-info/PKG-INFO new file mode 100644 index 00000000000..49d333dc02b --- /dev/null +++ b/clients/client-python/apache_gravitino.egg-info/PKG-INFO @@ -0,0 +1,48 @@ +Metadata-Version: 2.1 +Name: apache-gravitino +Version: 0.7.0.dev0 +Summary: Python lib/client for Apache Gravitino +Home-page: https://github.com/apache/gravitino +Author: Apache Software Foundation +Author-email: dev@gravitino.apache.org +Maintainer: Apache Gravitino Community +Maintainer-email: dev@gravitino.apache.org +License: Apache-2.0 +Project-URL: Homepage, https://gravitino.apache.org/ +Project-URL: Source Code, https://github.com/apache/gravitino +Project-URL: Documentation, https://gravitino.apache.org/docs/overview +Project-URL: Bug Tracker, https://github.com/apache/gravitino/issues +Project-URL: Slack Chat, https://the-asf.slack.com/archives/C078RESTT19 +Keywords: Data,AI,metadata,catalog +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +Requires-Dist: requests==2.32.2 +Requires-Dist: dataclasses-json==0.6.6 +Requires-Dist: readerwriterlock==1.0.9 +Requires-Dist: fsspec==2024.3.1 +Requires-Dist: pyarrow==15.0.2 +Requires-Dist: cachetools==5.3.3 +Provides-Extra: dev +Requires-Dist: requests==2.32.2; extra == "dev" +Requires-Dist: dataclasses-json==0.6.6; extra == "dev" +Requires-Dist: pylint==3.2.2; extra == "dev" +Requires-Dist: black==24.4.2; extra == "dev" +Requires-Dist: twine==5.1.1; extra == "dev" +Requires-Dist: coverage==7.5.1; extra == "dev" +Requires-Dist: pandas==2.0.3; extra == "dev" +Requires-Dist: pyarrow==15.0.2; extra == "dev" +Requires-Dist: llama-index==0.10.40; extra == "dev" +Requires-Dist: tenacity==8.3.0; extra == "dev" +Requires-Dist: cachetools==5.3.3; extra == "dev" +Requires-Dist: readerwriterlock==1.0.9; extra == "dev" +Requires-Dist: docker==7.1.0; extra == "dev" +Requires-Dist: pyjwt[crypto]==2.8.0; extra == "dev" + +Apache Gravitino Python client diff --git a/clients/client-python/apache_gravitino.egg-info/SOURCES.txt b/clients/client-python/apache_gravitino.egg-info/SOURCES.txt new file mode 100644 index 00000000000..87b75445a70 --- /dev/null +++ b/clients/client-python/apache_gravitino.egg-info/SOURCES.txt @@ -0,0 +1,101 @@ +MANIFEST.in +requirements-dev.txt +requirements.txt +setup.py +apache_gravitino.egg-info/PKG-INFO +apache_gravitino.egg-info/SOURCES.txt +apache_gravitino.egg-info/dependency_links.txt +apache_gravitino.egg-info/requires.txt +apache_gravitino.egg-info/top_level.txt +gravitino/__init__.py +gravitino/name_identifier.py +gravitino/namespace.py +gravitino/typing.py +gravitino/version.ini +gravitino/api/__init__.py +gravitino/api/audit.py +gravitino/api/auditable.py +gravitino/api/catalog.py +gravitino/api/catalog_change.py +gravitino/api/fileset.py +gravitino/api/fileset_change.py +gravitino/api/metalake.py +gravitino/api/metalake_change.py +gravitino/api/schema.py +gravitino/api/schema_change.py +gravitino/api/supports_schemas.py +gravitino/auth/__init__.py +gravitino/auth/auth_constants.py +gravitino/auth/auth_data_provider.py +gravitino/auth/default_oauth2_token_provider.py +gravitino/auth/oauth2_token_provider.py +gravitino/auth/simple_auth_provider.py +gravitino/catalog/__init__.py +gravitino/catalog/base_schema_catalog.py +gravitino/catalog/fileset_catalog.py +gravitino/client/__init__.py +gravitino/client/gravitino_admin_client.py +gravitino/client/gravitino_client.py +gravitino/client/gravitino_client_base.py +gravitino/client/gravitino_metalake.py +gravitino/client/gravitino_version.py +gravitino/constants/__init__.py +gravitino/constants/doc.py +gravitino/constants/error.py +gravitino/constants/root.py +gravitino/constants/timeout.py +gravitino/constants/version.py +gravitino/dto/__init__.py +gravitino/dto/audit_dto.py +gravitino/dto/catalog_dto.py +gravitino/dto/dto_converters.py +gravitino/dto/fileset_dto.py +gravitino/dto/metalake_dto.py +gravitino/dto/schema_dto.py +gravitino/dto/version_dto.py +gravitino/dto/requests/__init__.py +gravitino/dto/requests/catalog_create_request.py +gravitino/dto/requests/catalog_update_request.py +gravitino/dto/requests/catalog_updates_request.py +gravitino/dto/requests/fileset_create_request.py +gravitino/dto/requests/fileset_update_request.py +gravitino/dto/requests/fileset_updates_request.py +gravitino/dto/requests/metalake_create_request.py +gravitino/dto/requests/metalake_update_request.py +gravitino/dto/requests/metalake_updates_request.py +gravitino/dto/requests/oauth2_client_credential_request.py +gravitino/dto/requests/schema_create_request.py +gravitino/dto/requests/schema_update_request.py +gravitino/dto/requests/schema_updates_request.py +gravitino/dto/responses/__init__.py +gravitino/dto/responses/base_response.py +gravitino/dto/responses/catalog_list_response.py +gravitino/dto/responses/catalog_response.py +gravitino/dto/responses/drop_response.py +gravitino/dto/responses/entity_list_response.py +gravitino/dto/responses/error_response.py +gravitino/dto/responses/fileset_response.py +gravitino/dto/responses/metalake_list_response.py +gravitino/dto/responses/metalake_response.py +gravitino/dto/responses/oauth2_error_response.py +gravitino/dto/responses/oauth2_token_response.py +gravitino/dto/responses/schema_response.py +gravitino/dto/responses/version_response.py +gravitino/exceptions/__init__.py +gravitino/exceptions/base.py +gravitino/exceptions/handlers/__init__.py +gravitino/exceptions/handlers/catalog_error_handler.py +gravitino/exceptions/handlers/error_handler.py +gravitino/exceptions/handlers/fileset_error_handler.py +gravitino/exceptions/handlers/metalake_error_handler.py +gravitino/exceptions/handlers/oauth_error_handler.py +gravitino/exceptions/handlers/rest_error_handler.py +gravitino/exceptions/handlers/schema_error_handler.py +gravitino/filesystem/__init__.py +gravitino/filesystem/gvfs.py +gravitino/filesystem/gvfs_config.py +gravitino/rest/__init__.py +gravitino/rest/rest_message.py +gravitino/rest/rest_utils.py +gravitino/utils/__init__.py +gravitino/utils/http_client.py \ No newline at end of file diff --git a/clients/client-python/apache_gravitino.egg-info/dependency_links.txt b/clients/client-python/apache_gravitino.egg-info/dependency_links.txt new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/clients/client-python/apache_gravitino.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/clients/client-python/apache_gravitino.egg-info/requires.txt b/clients/client-python/apache_gravitino.egg-info/requires.txt new file mode 100644 index 00000000000..ae766011a55 --- /dev/null +++ b/clients/client-python/apache_gravitino.egg-info/requires.txt @@ -0,0 +1,22 @@ +requests==2.32.2 +dataclasses-json==0.6.6 +readerwriterlock==1.0.9 +fsspec==2024.3.1 +pyarrow==15.0.2 +cachetools==5.3.3 + +[dev] +requests==2.32.2 +dataclasses-json==0.6.6 +pylint==3.2.2 +black==24.4.2 +twine==5.1.1 +coverage==7.5.1 +pandas==2.0.3 +pyarrow==15.0.2 +llama-index==0.10.40 +tenacity==8.3.0 +cachetools==5.3.3 +readerwriterlock==1.0.9 +docker==7.1.0 +pyjwt[crypto]==2.8.0 diff --git a/clients/client-python/apache_gravitino.egg-info/top_level.txt b/clients/client-python/apache_gravitino.egg-info/top_level.txt new file mode 100644 index 00000000000..1b987cff694 --- /dev/null +++ b/clients/client-python/apache_gravitino.egg-info/top_level.txt @@ -0,0 +1 @@ +gravitino diff --git a/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRestKerberosHiveCatalogIT.java b/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRestKerberosHiveCatalogIT.java index e647c59597b..be768189458 100644 --- a/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRestKerberosHiveCatalogIT.java +++ b/iceberg/iceberg-rest-server/src/test/java/org/apache/gravitino/iceberg/integration/test/IcebergRestKerberosHiveCatalogIT.java @@ -60,13 +60,12 @@ void initEnv() { tempDir = file.getAbsolutePath(); HiveContainer kerberosHiveContainer = containerSuite.getKerberosHiveContainer(); - kerberosHiveContainer - .getContainer() - .copyFileFromContainer("/etc/admin.keytab", tempDir + HIVE_METASTORE_CLIENT_KEYTAB); + kerberosHiveContainer.copyFileFromContainer( + "/etc/admin.keytab", tempDir + HIVE_METASTORE_CLIENT_KEYTAB); String tmpKrb5Path = tempDir + "/krb5.conf_tmp"; String krb5Path = tempDir + "/krb5.conf"; - kerberosHiveContainer.getContainer().copyFileFromContainer("/etc/krb5.conf", tmpKrb5Path); + kerberosHiveContainer.copyFileFromContainer("/etc/krb5.conf", tmpKrb5Path); // Modify the krb5.conf and change the kdc and admin_server to the container IP String ip = containerSuite.getKerberosHiveContainer().getContainerIpAddress(); diff --git a/integration-test-common/docker-script/docker-compose.yaml b/integration-test-common/docker-script/docker-compose.yaml index b9ba4199f5d..bceed687570 100644 --- a/integration-test-common/docker-script/docker-compose.yaml +++ b/integration-test-common/docker-script/docker-compose.yaml @@ -76,6 +76,99 @@ services: timeout: 60s retries: 5 + kafka: + image: apache/kafka:3.7.0 + container_name: trino-ci-kafka + networks: + - trino-net + hostname: gravitino-ci-kafka + environment: + KAFKA_PROCESS_ROLES: 'broker,controller' + KAFKA_NODE_ID: 1 + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@gravitino-ci-kafka:9093' + KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' + KAFKA_LISTENERS: 'PLAINTEXT://:9092,CONTROLLER://:9093' + DEFAULT_BROKER_PORT: 9092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://gravitino-ci-kafka:9092' + ports: + - "9092:9092" + healthcheck: + test: [ "CMD", "true" ] + interval: 5s + timeout: 60s + retries: 5 + + doris: + image: datastrato/gravitino-ci-doris:0.1.5 + container_name: trino-ci-doris + networks: + - trino-net + hostname: gravitino-ci-doris + ports: + - "9030:9030" + - "8030:8030" + healthcheck: + test: [ "CMD", "true" ] + interval: 5s + timeout: 60s + retries: 5 + + ranger: + image: datastrato/gravitino-ci-ranger:0.1.1 + container_name: trino-ci-ranger + networks: + - trino-net + hostname: gravitino-ci-ranger + environment: + RANGER_PASSWORD: 'rangerR0cks!' + entrypoint: /bin/bash /tmp/ranger/init.sh + volumes: + - ./init/ranger:/tmp/ranger + ports: + - "6080:6080" + healthcheck: + test: [ "CMD-SHELL", "lsof -i :6080 | grep -q LISTEN || exit 1" ] + interval: 10s + timeout: 60s + retries: 10 + + hive_with_ranger: + image: datastrato/gravitino-ci-hive:0.1.13 + networks: + - trino-net + container_name: trino-ci-hive-with-ranger + hostname: gravitino-ci-hive-with-ranger + environment: + - HADOOP_USER_NAME=gravitino + - HIVE_RUNTIME_VERSION=hive3 + - RANGER_SERVER_URL=${GRAVITINO_RANGER_URL:-http://gravitino-ci-ranger:6080} + - RANGER_HIVE_REPOSITORY_NAME=${GRAVITINO_RANGER_HIVE_REPOSITORY_NAME:-hiveDev} + - RANGER_HDFS_REPOSITORY_NAME=${GRAVITINO_RANGER_HDFS_REPOSITORY_NAME:-hdfsDev} + entrypoint: /bin/bash /tmp/hive/init.sh + volumes: + - ./init/hive:/tmp/hive + healthcheck: + test: [ "CMD", "/tmp/check-status.sh" ] + interval: 10s + timeout: 60s + retries: 10 + depends_on: + ranger: + condition: service_healthy + + hive_with_kerberos: + image: datastrato/gravitino-ci-kerberos-hive:0.1.5 + networks: + - trino-net + container_name: trino-ci-hive-with-kerberos + hostname: gravitino-ci-kerberos-hive + healthcheck: + test: [ "CMD", "/tmp/check-status.sh" ] + interval: 10s + timeout: 60s + retries: 10 + trino: image: trinodb/trino:435 networks: diff --git a/integration-test-common/docker-script/init/hive/init.sh b/integration-test-common/docker-script/init/hive/init.sh index 35d9a8ccfca..1c002cf9e6a 100755 --- a/integration-test-common/docker-script/init/hive/init.sh +++ b/integration-test-common/docker-script/init/hive/init.sh @@ -18,5 +18,10 @@ IP=$(hostname -I | awk '{print $1}') sed -i "s|hdfs://__REPLACE__HOST_NAME:9000|hdfs://${IP}:9000|g" ${HIVE_TMP_CONF_DIR}/hive-site.xml +# replace "export HADOOP_HEAPSIZE=512" with "export HADOOP_HEAPSIZE=256" in hadoop-env.sh +sed -i "s|export HADOOP_HEAPSIZE=512|export HADOOP_HEAPSIZE=256|g" ${HADOOP_TMP_CONF_DIR}/hadoop-env.sh + +# add jvm options to ${ZK_HOME}/conf/java.env +echo "export JVMFLAGS=\"-Xmx256m\"" >> ${ZK_HOME}/conf/java.env /bin/bash /usr/local/sbin/start.sh diff --git a/integration-test-common/docker-script/launch.sh b/integration-test-common/docker-script/launch.sh index 8705d8c02a6..bac2836b379 100755 --- a/integration-test-common/docker-script/launch.sh +++ b/integration-test-common/docker-script/launch.sh @@ -23,6 +23,7 @@ cd "$(dirname "$0")" playground_dir="$(dirname "${BASH_SOURCE-$0}")" playground_dir="$(cd "${playground_dir}">/dev/null; pwd)" +services=$@ isExist=`which docker` if [ ! $isExist ] then @@ -32,12 +33,51 @@ fi cd ${playground_dir} +checkTrinoService() { + local service_name=$1 + local max_attempts=300 + local attempts=0 + + while true; do +# docker compose exec -T trino trino --execute "SELECT 1" >/dev/null 2>&1 && { +# break; +# } + +# num_container=$(docker ps --format '{{.Names}}' | grep trino-ci | wc -l) +# if [ "$num_container" -lt 4 ]; then +# echo "ERROR: Trino-ci containers start failed." +# exit 0 +# fi + + service_status=$(docker compose ps $service_name | grep "$service_name") + if echo "$service_status" | grep -q "healthy"; then + echo "$service_name service is healthy." + break; + else + echo "$service_name service is not healthy($attempts/$max_attempts)." + # print the service status and logs + docker compose ps + docker compose logs $service_name + fi + + if [ "$attempts" -ge "$max_attempts" ]; then + echo "ERROR: Trino service did not start within the $max_attempts time." + exit 1 + fi + + ((attempts++)) + sleep 1 + done + + echo "Trino service start successfully" +} + # create log dir LOG_DIR=../build/trino-ci-container-log rm -fr $LOG_DIR mkdir -p $LOG_DIR -docker compose up -d +docker compose up -d $services LOG_PATH=$LOG_DIR/trino-ci-docker-compose.log @@ -45,28 +85,8 @@ echo "The docker compose log is: $LOG_PATH" nohup docker compose logs -f -t > $LOG_PATH & -max_attempts=300 -attempts=0 - -while true; do - docker compose exec -T trino trino --execute "SELECT 1" >/dev/null 2>&1 && { - break; - } - - num_container=$(docker ps --format '{{.Names}}' | grep trino-ci | wc -l) - if [ "$num_container" -lt 4 ]; then - echo "ERROR: Trino-ci containers start failed." - exit 0 - fi - - if [ "$attempts" -ge "$max_attempts" ]; then - echo "ERROR: Trino service did not start within the $max_attempts time." - exit 1 - fi - - ((attempts++)) - sleep 1 +for service in $services; do + checkTrinoService $service done - echo "All docker compose service is now available." diff --git a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/BaseContainer.java b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/BaseContainer.java index 192ba2c0cb5..bff2b01d367 100644 --- a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/BaseContainer.java +++ b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/BaseContainer.java @@ -19,6 +19,7 @@ package org.apache.gravitino.integration.test.container; import static java.util.Objects.requireNonNull; +import static org.apache.gravitino.integration.test.util.AbstractIT.ACTIVE_CI; import static org.testcontainers.utility.MountableFile.forHostPath; import com.github.dockerjava.api.DockerClient; @@ -50,6 +51,9 @@ */ public abstract class BaseContainer implements AutoCloseable { public static final Logger LOG = LoggerFactory.getLogger(BaseContainer.class); + + protected static final boolean activeCI = "true".equals(System.getenv(ACTIVE_CI)); + // Host name of the container protected final String hostName; // Exposed ports of the container @@ -57,7 +61,10 @@ public abstract class BaseContainer implements AutoCloseable { // Files to mount in the container private final Map filesToMount; // environment variables of the container - private final Map envVars; + protected final Map envVars; + + protected final String image; + // Additional host and IP address mapping private final Map extraHosts; // Network of the container @@ -81,6 +88,7 @@ protected BaseContainer( .withSysctls( Collections.singletonMap( "net.ipv4.ip_local_port_range", "20000 40000"))); + this.image = image; this.ports = requireNonNull(ports, "ports is null"); this.hostName = requireNonNull(hostName, "hostName is null"); this.extraHosts = extraHosts; @@ -128,10 +136,6 @@ public Integer getMappedPort(int exposedPort) { return container.getMappedPort(exposedPort); } - public GenericContainer getContainer() { - return container; - } - // This method is used to get the IP address of the container. public String getContainerIpAddress() { DockerClient dockerClient = DockerClientFactory.instance().client(); diff --git a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/HiveContainer.java b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/HiveContainer.java index c08094e7bfb..fed665d410d 100644 --- a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/HiveContainer.java +++ b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/HiveContainer.java @@ -19,21 +19,27 @@ package org.apache.gravitino.integration.test.container; import static java.lang.String.format; +import static org.apache.gravitino.integration.test.container.RangerContainer.DOCKER_ENV_RANGER_SERVER_URL; +import static org.apache.gravitino.integration.test.util.ProcessData.TypesOfData.STREAMS_MERGED; import static org.testcontainers.shaded.org.awaitility.Awaitility.await; import com.google.common.collect.ImmutableSet; import java.io.File; +import java.lang.reflect.Constructor; import java.net.InetSocketAddress; import java.net.Socket; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; +import org.apache.gravitino.integration.test.util.AbstractIT; +import org.apache.gravitino.integration.test.util.CommandExecutor; import org.rnorth.ducttape.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testcontainers.containers.Container; import org.testcontainers.containers.Network; +import org.testcontainers.utility.MountableFile; public class HiveContainer extends BaseContainer { public static final Logger LOG = LoggerFactory.getLogger(HiveContainer.class); @@ -57,6 +63,8 @@ public class HiveContainer extends BaseContainer { private static final String HIVE_LOG_PATH = "/tmp/root/"; private static final String HDFS_LOG_PATH = "/usr/local/hadoop/logs/"; + private String containerName = "trino-ci-hive-with-kerberos"; + public static Builder builder() { return new Builder(); } @@ -78,14 +86,103 @@ public String getHostName() { @Override protected void setupContainer() { + super.setupContainer(); withLogConsumer(new PrintingContainerLog(format("%-14s| ", "HiveContainer-" + hostName))); } @Override public void start() { - super.start(); - Preconditions.check("Hive container startup failed!", checkContainerStatus(15)); + if (activeCI) { + if (envVars.containsKey(DOCKER_ENV_RANGER_SERVER_URL)) { + ContainerSuite.getTrinoITContainers().launch(envVars, AbstractIT.Service.HIVE_WITH_RANGER); + + } else if (image.equals(KERBEROS_IMAGE)) { + ContainerSuite.getTrinoITContainers().launch(AbstractIT.Service.HIVE_WITH_KERBEROS); + + } else { + ContainerSuite.getTrinoITContainers().launch(AbstractIT.Service.HIVE); + } + } else { + super.start(); + Preconditions.check("Hive container startup failed!", checkContainerStatus(15)); + } + } + + @Override + public String getContainerIpAddress() { + if (activeCI) { + if (envVars.containsKey(DOCKER_ENV_RANGER_SERVER_URL)) { + return ContainerSuite.getTrinoITContainers() + .getServiceIpAddress(AbstractIT.Service.HIVE_WITH_RANGER); + + } else if (image.equals(KERBEROS_IMAGE)) { + return ContainerSuite.getTrinoITContainers() + .getServiceIpAddress(AbstractIT.Service.HIVE_WITH_KERBEROS); + + } else { + return ContainerSuite.getTrinoITContainers().getServiceIpAddress(AbstractIT.Service.HIVE); + } + } + return super.getContainerIpAddress(); + } + + /** Copies a file which resides inside the container to user defined directory */ + public void copyFileFromContainer(String containerPath, String destinationPath) { + if (activeCI) { + Object output = + CommandExecutor.executeCommandLocalHost( + format("docker cp %s:%s %s", containerName, containerPath, destinationPath), + false, + STREAMS_MERGED); + LOG.info("Command output:\n{}", output); + } else { + container.copyFileFromContainer(containerPath, destinationPath); + } + } + + /** + * Copies a file or directory to the container. + * + * @param mountableFile file or directory which is copied into the container + * @param containerPath destination path inside the container + */ + public void copyFileToContainer(MountableFile mountableFile, String containerPath) { + if (activeCI) { + File file = new File(mountableFile.getResolvedPath()); + Object output = + CommandExecutor.executeCommandLocalHost( + format("docker cp %s %s:%s", file.getAbsolutePath(), containerName, containerPath), + false, + STREAMS_MERGED); + LOG.info("Command output:\n{}", output); + } else { + container.copyFileToContainer(mountableFile, containerPath); + } + } + + @Override + public Container.ExecResult executeInContainer(String... commandAndArgs) { + if (activeCI) { + Object output = + CommandExecutor.executeCommandLocalHost( + format("docker exec %s %s", containerName, String.join(" ", commandAndArgs)), + false, + STREAMS_MERGED); + LOG.info("Command output:\n{}", output); + // todo: overwrite this method + try { + Constructor constructor = + Container.ExecResult.class.getDeclaredConstructor( + int.class, String.class, String.class); + constructor.setAccessible(true); + return constructor.newInstance(0, "output", "error"); + } catch (Exception e) { + throw new RuntimeException(e); + } + } else { + return super.executeInContainer(commandAndArgs); + } } @Override diff --git a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/KafkaContainer.java b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/KafkaContainer.java index 7f027a32224..990beccfad1 100644 --- a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/KafkaContainer.java +++ b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/KafkaContainer.java @@ -30,7 +30,6 @@ import org.apache.gravitino.integration.test.util.ITUtils; import org.apache.logging.log4j.util.Strings; import org.testcontainers.containers.Container; -import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.Network; import org.testcontainers.shaded.com.google.common.collect.ImmutableMap; import org.testcontainers.shaded.com.google.common.collect.ImmutableSet; @@ -77,9 +76,8 @@ private void copyKafkaLogs() { String kafkaLogJarPath = "/home/appuser/kafka-logs.tar"; - GenericContainer kafkaContainer = getContainer(); - kafkaContainer.execInContainer("tar", "cf", kafkaLogJarPath, KAFKA_LOGS_DIR); - kafkaContainer.copyFileFromContainer(kafkaLogJarPath, destPath + "/kafka-logs.tar"); + executeInContainer("tar", "cf", kafkaLogJarPath, KAFKA_LOGS_DIR); + container.copyFileFromContainer(kafkaLogJarPath, destPath + "/kafka-logs.tar"); } catch (Exception e) { LOG.error("Failed to package Kafka logs", e); } diff --git a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/RangerContainer.java b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/RangerContainer.java index 54b2afc0c77..006724ec0c5 100644 --- a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/RangerContainer.java +++ b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/RangerContainer.java @@ -25,6 +25,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import org.apache.gravitino.integration.test.util.AbstractIT; import org.apache.ranger.RangerClient; import org.apache.ranger.RangerServiceException; import org.rnorth.ducttape.Preconditions; @@ -80,6 +81,12 @@ protected void setupContainer() { @Override public void start() { + if (activeCI) { + ContainerSuite.getTrinoITContainers().launch(AbstractIT.Service.RANGER); + rangerUrl = format("http://%s:%d", getContainerIpAddress(), RANGER_SERVER_PORT); + rangerClient = new RangerClient(rangerUrl, authType, rangerUserName, rangerPassword, null); + return; + } super.start(); rangerUrl = String.format("http://localhost:%s", this.getMappedPort(RANGER_SERVER_PORT)); @@ -88,6 +95,14 @@ public void start() { Preconditions.check("Ranger container startup failed!", checkContainerStatus(10)); } + @Override + public String getContainerIpAddress() { + if (activeCI) { + return ContainerSuite.getTrinoITContainers().getServiceIpAddress(AbstractIT.Service.RANGER); + } + return super.getContainerIpAddress(); + } + @Override protected boolean checkContainerStatus(int retryLimit) { int nRetry = 0; diff --git a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/TrinoITContainers.java b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/TrinoITContainers.java index 3dce2671cdc..ee37029c40a 100644 --- a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/TrinoITContainers.java +++ b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/container/TrinoITContainers.java @@ -18,8 +18,23 @@ */ package org.apache.gravitino.integration.test.container; +import static org.apache.gravitino.integration.test.util.AbstractIT.ACTIVE_CI; +import static org.apache.gravitino.integration.test.util.AbstractIT.Service.DORIS; +import static org.apache.gravitino.integration.test.util.AbstractIT.Service.HIVE; +import static org.apache.gravitino.integration.test.util.AbstractIT.Service.HIVE_WITH_KERBEROS; +import static org.apache.gravitino.integration.test.util.AbstractIT.Service.HIVE_WITH_RANGER; +import static org.apache.gravitino.integration.test.util.AbstractIT.Service.KAFKA; +import static org.apache.gravitino.integration.test.util.AbstractIT.Service.MYSQL; +import static org.apache.gravitino.integration.test.util.AbstractIT.Service.POSTGRESQL; +import static org.apache.gravitino.integration.test.util.AbstractIT.Service.RANGER; +import static org.apache.gravitino.integration.test.util.AbstractIT.Service.TRINO; + +import java.util.Arrays; import java.util.HashMap; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import org.apache.gravitino.integration.test.util.AbstractIT; import org.apache.gravitino.integration.test.util.CommandExecutor; import org.apache.gravitino.integration.test.util.ITUtils; import org.apache.gravitino.integration.test.util.ProcessData; @@ -27,17 +42,24 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testcontainers.containers.ContainerLaunchException; -import org.testcontainers.shaded.com.google.common.collect.ImmutableSet; +import org.testcontainers.shaded.com.google.common.collect.Maps; public class TrinoITContainers implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(TrinoITContainers.class); public static String dockerComposeDir; - private static ImmutableSet servicesName = - ImmutableSet.of("trino", "hive_metastore", "hdfs", "mysql", "postgresql"); + // private static ImmutableSet servicesName = + // ImmutableSet.of("trino", "hive_metastore", "hdfs", "mysql", "postgresql"); + + // private static final String HiVE_SERVICE_NAME = "hive"; + // private static final String HDFS_SERVICE_NAME = "hdfs"; + // private static final String MYSQL_SERVICE_NAME = "mysql"; + // private static final String POSTGRESQL_SERVICE_NAME = "postgresql"; - Map servicesUri = new HashMap<>(); + private Map servicesUri = new HashMap<>(); + + private Map serviceIP = new HashMap<>(); TrinoITContainers() { String dir = System.getenv("GRAVITINO_ROOT_DIR"); @@ -48,7 +70,11 @@ public class TrinoITContainers implements AutoCloseable { dockerComposeDir = ITUtils.joinPath(dir, "integration-test-common", "docker-script"); } - public void launch(int gravitinoServerPort) throws Exception { + public void launch(AbstractIT.Service... services) { + launch(Maps.newHashMap(), services); + } + + public void launchTrino(int gravitinoServerPort) { shutdown(); Map env = new HashMap<>(); @@ -57,7 +83,15 @@ public void launch(int gravitinoServerPort) throws Exception { env.put("GRAVITINO_LOG_PATH", System.getProperty("gravitino.log.path")); } - String command = ITUtils.joinPath(dockerComposeDir, "launch.sh"); + launch(env, TRINO, HIVE, MYSQL, POSTGRESQL); + } + + public void launch(Map env, AbstractIT.Service... services) { + Set serviceNames = + Arrays.stream(services).map(s -> s.toString().toLowerCase()).collect(Collectors.toSet()); + + String command = + ITUtils.joinPath(dockerComposeDir, "launch.sh") + " " + String.join(" ", serviceNames); Object output = CommandExecutor.executeCommandLocalHost( command, false, ProcessData.TypesOfData.STREAMS_MERGED, env); @@ -69,10 +103,34 @@ public void launch(int gravitinoServerPort) throws Exception { throw new ContainerLaunchException("Failed to start containers:\n " + outputString); } - resolveServerAddress(); + resolveServerAddress(serviceNames); } - private void resolveServerAddress() throws Exception { + // public void launch(int gravitinoServerPort) throws Exception { + // shutdown(); + // + // Map env = new HashMap<>(); + // env.put("GRAVITINO_SERVER_PORT", String.valueOf(gravitinoServerPort)); + // if (System.getProperty("gravitino.log.path") != null) { + // env.put("GRAVITINO_LOG_PATH", System.getProperty("gravitino.log.path")); + // } + // + // String command = ITUtils.joinPath(dockerComposeDir, "launch.sh"); + // Object output = + // CommandExecutor.executeCommandLocalHost( + // command, false, ProcessData.TypesOfData.STREAMS_MERGED, env); + // LOG.info("Command {} output:\n{}", command, output); + // + // String outputString = output.toString(); + // if (Strings.isNotEmpty(outputString) + // && !outputString.contains("All docker compose service is now available")) { + // throw new ContainerLaunchException("Failed to start containers:\n " + outputString); + // } + // + // resolveServerAddress(servicesName); + // } + + private void resolveServerAddress(Set serviceNames) { String command = ITUtils.joinPath(dockerComposeDir, "inspect_ip.sh"); Object output = CommandExecutor.executeCommandLocalHost( @@ -100,22 +158,50 @@ private void resolveServerAddress() throws Exception { String address = info[1]; if (containerName.equals("trino")) { + serviceIP.put(TRINO, address); servicesUri.put("trino", String.format("http://%s:8080", address)); + } else if (containerName.equals("hive")) { - servicesUri.put("hive_metastore", String.format("thrift://%s:9083", address)); + serviceIP.put(HIVE, address); + servicesUri.put("hive", String.format("thrift://%s:9083", address)); servicesUri.put("hdfs", String.format("hdfs://%s:9000", address)); + + } else if (containerName.equals("hive-with-ranger")) { + serviceIP.put(HIVE_WITH_RANGER, address); + + } else if (containerName.equals("hive-with-kerberos")) { + serviceIP.put(HIVE_WITH_KERBEROS, address); + } else if (containerName.equals("mysql")) { + serviceIP.put(MYSQL, address); servicesUri.put("mysql", String.format("jdbc:mysql://%s:3306", address)); + } else if (containerName.equals("postgresql")) { + serviceIP.put(POSTGRESQL, address); servicesUri.put("postgresql", String.format("jdbc:postgresql://%s", address)); + + } else if (containerName.equals("kafka")) { + serviceIP.put(KAFKA, address); + servicesUri.put("kafka", String.format("%s:9092", address)); + } else if (containerName.equals("doris")) { + serviceIP.put(DORIS, address); + servicesUri.put("doris", String.format("jdbc:mysql://%s:9030", address)); + + } else if (containerName.equals("ranger")) { + serviceIP.put(RANGER, address); + servicesUri.put("ranger", String.format("http://%s:6080", "localhost")); + + } else { + throw new RuntimeException("Unexpected container name: " + containerName); } } } catch (Exception e) { throw new ContainerLaunchException("Unexpected container status :\n" + containerIpMapping, e); } - for (String serviceName : servicesName) { - if (!servicesUri.containsKey(serviceName)) { + for (String serviceName : serviceNames) { + if (!servicesUri.containsKey(serviceName) + && !serviceIP.containsKey(AbstractIT.Service.valueOf(serviceName.toUpperCase()))) { throw new ContainerLaunchException( String.format("The container for the %s service is not started: ", serviceName)); } @@ -123,6 +209,10 @@ private void resolveServerAddress() throws Exception { } public void shutdown() { + if ("true".equals(System.getenv(ACTIVE_CI))) { + return; + } + String command = ITUtils.joinPath(dockerComposeDir, "shutdown.sh"); Object output = CommandExecutor.executeCommandLocalHost( @@ -130,18 +220,26 @@ public void shutdown() { LOG.info("Command {} output:\n{}", command, output); } + public String getServiceIpAddress(AbstractIT.Service service) { + return serviceIP.get(service); + } + public String getTrinoUri() { return servicesUri.get("trino"); } public String getHiveMetastoreUri() { - return servicesUri.get("hive_metastore"); + return servicesUri.get("hive"); } public String getHdfsUri() { return servicesUri.get("hdfs"); } + public String getHdfsIpAddress() { + return serviceIP.get(HIVE); + } + public String getMysqlUri() { return servicesUri.get("mysql"); } @@ -150,6 +248,18 @@ public String getPostgresqlUri() { return servicesUri.get("postgresql"); } + public String getKafkaUri() { + return servicesUri.get("kafka"); + } + + public String getDorisUri() { + return servicesUri.get("doris"); + } + + public String getRangerUri() { + return servicesUri.get("ranger"); + } + @Override public void close() throws Exception { shutdown(); diff --git a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/AbstractIT.java b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/AbstractIT.java index 968eeeef884..5ee8be8902c 100644 --- a/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/AbstractIT.java +++ b/integration-test-common/src/test/java/org/apache/gravitino/integration/test/util/AbstractIT.java @@ -55,16 +55,12 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.extension.ExtendWith; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.CsvSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testcontainers.shaded.org.awaitility.Awaitility; @ExtendWith({PrintFuncNameExtension.class, CloseContainerExtension.class}) public class AbstractIT { - protected static final ContainerSuite containerSuite = ContainerSuite.getInstance(); - private static final Logger LOG = LoggerFactory.getLogger(AbstractIT.class); private static final Splitter COMMA = Splitter.on(",").omitEmptyStrings().trimResults(); @@ -90,6 +86,8 @@ public class AbstractIT { public static final String DOWNLOAD_POSTGRESQL_JDBC_DRIVER_URL = "https://jdbc.postgresql.org/download/postgresql-42.7.0.jar"; + public static final String ACTIVE_CI = "ACTIVE_CI"; + private static TestDatabaseName META_DATA; private static MySQLContainer MYSQL_CONTAINER; @@ -97,6 +95,23 @@ public class AbstractIT { protected static String originConfig; + public enum Service { + TRINO, + HIVE, + HIVE_WITH_RANGER, + HIVE_WITH_KERBEROS, + MYSQL, + POSTGRESQL, + KAFKA, + DORIS, + RANGER; + + @Override + public String toString() { + return this.name().toLowerCase(); + } + } + public static int getGravitinoServerPort() { JettyServerConfig jettyServerConfig = JettyServerConfig.fromConfig(serverConfig, WEBSERVER_CONF_PREFIX); @@ -198,13 +213,6 @@ private static void setMySQLBackend() { } } - @ParameterizedTest - @CsvSource({ - "embedded, jdbcBackend", - "embedded, kvBackend", - "deploy, jdbcBackend", - "deploy, kvBackend" - }) @BeforeAll public static void startIntegrationTest() throws Exception { testMode = @@ -217,8 +225,8 @@ public static void startIntegrationTest() throws Exception { if ("MySQL".equalsIgnoreCase(System.getenv("jdbcBackend"))) { // Start MySQL docker instance. META_DATA = TestDatabaseName.MYSQL_JDBC_BACKEND; - containerSuite.startMySQLContainer(META_DATA); - MYSQL_CONTAINER = containerSuite.getMySQLContainer(); + ContainerSuite.getInstance().startMySQLContainer(META_DATA); + MYSQL_CONTAINER = ContainerSuite.getInstance().getMySQLContainer(); setMySQLBackend(); } diff --git a/integration-test/trino-it/init/ranger/init.sh b/integration-test/trino-it/init/ranger/init.sh new file mode 100644 index 00000000000..0cf016936f9 --- /dev/null +++ b/integration-test/trino-it/init/ranger/init.sh @@ -0,0 +1,22 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +sed -i -E 's/-Xmx[^ ]+/-Xmx256m/g; s/-Xms[^ ]+/-Xms256m/g' /opt/ranger-admin/ews/ranger-admin-services.sh + +/bin/bash /tmp/start-ranger-services.sh diff --git a/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryITBase.java b/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryITBase.java index 1c8e7559fd4..dbf4bf63888 100644 --- a/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryITBase.java +++ b/trino-connector/integration-test/src/test/java/org/apache/gravitino/trino/connector/integration/test/TrinoQueryITBase.java @@ -75,7 +75,7 @@ private static void setEnv() throws Exception { gravitinoUri = String.format("http://127.0.0.1:%d", AbstractIT.getGravitinoServerPort()); trinoITContainers = ContainerSuite.getTrinoITContainers(); - trinoITContainers.launch(AbstractIT.getGravitinoServerPort()); + trinoITContainers.launchTrino(AbstractIT.getGravitinoServerPort()); trinoUri = trinoITContainers.getTrinoUri(); hiveMetastoreUri = trinoITContainers.getHiveMetastoreUri(); diff --git a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageTest.java b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageTest.java index 75d385ece8e..8b37c0d9461 100644 --- a/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageTest.java +++ b/web/integration-test/src/test/java/org/apache/gravitino/integration/test/web/ui/CatalogsPageTest.java @@ -19,6 +19,9 @@ package org.apache.gravitino.integration.test.web.ui; +import static org.apache.gravitino.integration.test.util.AbstractIT.Service.HIVE; +import static org.apache.gravitino.integration.test.util.AbstractIT.Service.MYSQL; +import static org.apache.gravitino.integration.test.util.AbstractIT.Service.POSTGRESQL; import static org.apache.gravitino.rel.expressions.transforms.Transforms.identity; import com.google.common.collect.Maps; @@ -121,7 +124,7 @@ public static void before() throws Exception { gravitinoUri = String.format("http://127.0.0.1:%d", AbstractIT.getGravitinoServerPort()); trinoITContainers = ContainerSuite.getTrinoITContainers(); - trinoITContainers.launch(AbstractIT.getGravitinoServerPort()); + trinoITContainers.launch(HIVE, MYSQL, POSTGRESQL); hiveMetastoreUri = trinoITContainers.getHiveMetastoreUri(); hdfsUri = trinoITContainers.getHdfsUri();