diff --git a/.asf.yaml b/.asf.yaml index b2593788948..ec00d6db07a 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -39,7 +39,7 @@ github: issues: true discussions: true wiki: false - projects: false + projects: true notifications: commits: commits@kyuubi.apache.org issues: notifications@kyuubi.apache.org diff --git a/.github/ISSUE_TEMPLATE/dependency.yml b/.github/ISSUE_TEMPLATE/dependency.yml new file mode 100644 index 00000000000..e71c7d1c64a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/dependency.yml @@ -0,0 +1,109 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# See https://gh-community.github.io/issue-template-feedback/structured/ + +name: Dependency +title: ":arrow_up: Upgrade from to " +description: Keep upstream dependencies fresh and stable +labels: [ "kind:build, priority:major, good first issue, help wanted" ] +body: + - type: markdown + attributes: + value: | + Thank you for finding the time to report the issue! We really appreciate the community's efforts to improve Kyuubi. + + It doesn't really matter whether what you are reporting is a bug or not, just feel free to share the problem you have + encountered with the community. For best practices, if it is indeed a bug, please try your best to provide the reproducible + steps. If you want to ask questions or share ideas, please [subscribe to our mailing list](mailto:dev-subscribe@kyuubi.apache.org) + and send emails to [our mailing list](mailto:dev@kyuubi.apache.org), you can also head to our + [Discussions](https://github.com/apache/kyuubi/discussions) tab. + + - type: checkboxes + attributes: + label: Code of Conduct + description: The Code of Conduct helps create a safe space for everyone. We require that everyone agrees to it. + options: + - label: > + I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct) + required: true + + - type: checkboxes + attributes: + label: Search before asking + options: + - label: > + I have searched in the [issues](https://github.com/apache/kyuubi/issues?q=is%3Aissue) and found no similar + issues. + required: true + + - type: dropdown + id: priority + attributes: + label: Why do we need to upgrade this artifact? + options: + - Common Vulnerabilities and Exposures (CVE) + - Bugfixes + - Usage of New Features + - Performance Improvements + - Regular Updates + validations: + required: true + + - type: input + id: artifact + attributes: + label: Artifact Name + description: Which artifact shall be upgraded? + placeholder: e.g. spark-sql + value: https://mvnrepository.com/search?q= + validations: + required: true + + - type: input + id: versions + attributes: + label: Target Version + description: Which version shall be upgraded? + placeholder: e.g. 1.2.1 + validations: + required: true + + - type: textarea + id: changes + attributes: + label: Notable Changes + description: Please provide notable changes, or release notes if any + validations: + required: false + + - type: checkboxes + attributes: + label: Are you willing to submit PR? + description: > + A pull request is optional, but we are glad to help you in the contribution process + especially if you already know a good understanding of how to implement the fix. + Kyuubi is a community-driven project and we love to bring new contributors in. + options: + - label: Yes. I would be willing to submit a PR with guidance from the Kyuubi community to fix. + - label: No. I cannot submit a PR at this time. + + - type: markdown + attributes: + value: > + After changing the corresponding dependency version and before submitting your pull request, + it is necessary to execute `build/dependency.sh --replace` locally to update `dev/dependencyList`. diff --git a/.github/ISSUE_TEMPLATE/doc-improvement-report.yml b/.github/ISSUE_TEMPLATE/documentation.yml similarity index 67% rename from .github/ISSUE_TEMPLATE/doc-improvement-report.yml rename to .github/ISSUE_TEMPLATE/documentation.yml index 668ddb25695..87b87a6cd9e 100644 --- a/.github/ISSUE_TEMPLATE/doc-improvement-report.yml +++ b/.github/ISSUE_TEMPLATE/documentation.yml @@ -15,16 +15,11 @@ # limitations under the License. # -name: Doc Improvement Report -title: "[DOCS] " +name: Documentation fixes or improvement +title: ":memo: Fix/Add for page" description: Fix errors, or improve the content or refactor architecture of online documentation -labels: ["kind:documentation"] +labels: ["kind:documentation,kind:minor,help wanted,good first issue"] body: - - type: markdown - attributes: - value: | - Thank you for finding the time to report the problem! We really appreciate the community efforts to improve Kyuubi. - - type: checkboxes attributes: label: Code of Conduct @@ -43,22 +38,25 @@ body: issues. required: true - - type: textarea + - type: dropdown + id: priority attributes: - label: Which parts of the documentation do you think need improvement? - description: Please describe the details with documentation you have. - placeholder: > - Please include links to the documentation that you want to improve and possibly screenshots showing - the details. Explain why do you think it needs to improve. Make sure you include view of the target - audience of the documentation. Please explain why you think the docs are wrong. + label: What type of changes will we make to the documentation? + options: + - Bugfixes + - Usage of New Feature + - Showcase + - Refactoring + - Typo, layout, grammar, spelling, punctuation errors, etc. + validations: + required: true - type: input id: versions attributes: label: Affects Version(s) description: Which versions of Kyuubi Documentation are affected by this issue? - placeholder: > - e.g. master/1.5.0/1.4.1/... + placeholder: e.g. master/1.5.0/1.4.1/... validations: required: true @@ -67,20 +65,9 @@ body: label: Improving the documentation description: How do you think the documentation can be improved? placeholder: > - Please explain how you think the documentation could be improved. Ideally specify where a new or missing - documentation should be added and what kind of information should be included. Sometimes people - writing the documentation do not realise that some assumptions they have might not be in the heads - of the reader, so try to explain exactly what you would like to see in the docs and why. - - - type: textarea - attributes: - label: Anything else - description: Anything else we need to know? - placeholder: > - How often does this problem occur? (Once? Every time? Only when certain conditions are met?) - Any relevant logs to include? Put them here inside fenced - ``` ``` blocks or inside a foldable details tag if it's long: -
x.log lots of stuff
+ Please include links to the documentation that you want to improve and possibly screenshots showing + the details. Explain why do you think it needs to improve. Make sure you include view of the target + audience of the documentation. Please explain why you think the docs are wrong. - type: checkboxes attributes: diff --git a/.github/PULL_REQUEST_TEMPLATE b/.github/PULL_REQUEST_TEMPLATE index bdb71f30fb1..3cab99d1fe8 100644 --- a/.github/PULL_REQUEST_TEMPLATE +++ b/.github/PULL_REQUEST_TEMPLATE @@ -20,4 +20,13 @@ Please clarify why the changes are needed. For instance, - [ ] Add screenshots for manual tests if appropriate -- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request +- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/contributing/code/testing.html#running-tests) locally before make a pull request + + +### _Was this patch authored or co-authored using generative AI tooling?_ + diff --git a/.github/actions/setup-mvnd/action.yaml b/.github/actions/setup-maven/action.yaml similarity index 74% rename from .github/actions/setup-mvnd/action.yaml rename to .github/actions/setup-maven/action.yaml index 55c8139ff8b..0cb4b54c289 100644 --- a/.github/actions/setup-mvnd/action.yaml +++ b/.github/actions/setup-maven/action.yaml @@ -15,22 +15,17 @@ # limitations under the License. # -name: 'setup-mvnd' -description: 'Setup the maven daemon' -continue-on-error: true +name: setup-maven +description: 'Install and cache maven' runs: using: composite steps: - - name: Cache Mvnd + - name: Restore cached Maven uses: actions/cache@v3 with: - path: | - build/maven-mvnd-* - build/apache-maven-* - key: setup-mvnd-${{ runner.os }} - - name: Check Maven - run: build/mvn -v - shell: bash - - name: Check Mvnd - run: build/mvnd -v + path: build/apache-maven-* + key: setup-maven-${{ hashFiles('pom.xml') }} + restore-keys: setup-maven- + - name: Install Maven shell: bash + run: build/mvn -v diff --git a/.github/labeler.yml b/.github/labeler.yml index bbc64ed66fc..ecec1253274 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -102,7 +102,8 @@ "module:server": - "bin/kyuubi" - - "kyuubi-server/**/*" + - "kyuubi-server/src/**/*" + - "kyuubi-server/pom.xml" - "extension/server/kyuubi-server-plugin/**/*" "module:spark": @@ -121,3 +122,6 @@ "module:authz": - "extensions/spark/kyuubi-spark-authz/**/*" + +"module:ui": + - "kyuubi-server/web-ui/**/*" diff --git a/.github/workflows/dep.yml b/.github/workflows/dep.yml index 72f5c915da7..f39e5e6a212 100644 --- a/.github/workflows/dep.yml +++ b/.github/workflows/dep.yml @@ -26,7 +26,6 @@ on: # when pom or dependency workflow changes - '**/pom.xml' - '.github/workflows/dep.yml' - - .github/actions/setup-mvnd/*.yaml concurrency: group: dep-${{ github.head_ref || github.run_id }} @@ -45,12 +44,12 @@ jobs: java-version: 8 cache: 'maven' check-latest: false - - name: Setup Mvnd - uses: ./.github/actions/setup-mvnd + - name: Setup Maven + uses: ./.github/actions/setup-maven - name: Check kyuubi modules available id: modules-check run: >- - build/mvnd dependency:resolve validate -q + build/mvn dependency:resolve validate -DincludeGroupIds="org.apache.kyuubi" -DincludeScope="compile" -Pfast -Denforcer.skip=false -pl kyuubi-ctl,kyuubi-server,kyuubi-assembly -am @@ -60,7 +59,7 @@ jobs: MAVEN_OPTS: -Dorg.slf4j.simpleLogger.defaultLogLevel=error if: steps.modules-check.conclusion == 'success' && steps.modules-check.outcome == 'failure' run: >- - build/mvnd clean install + build/mvn clean install -Pflink-provided,spark-provided,hive-provided -Dmaven.javadoc.skip=true -Drat.skip=true diff --git a/.github/workflows/license.yml b/.github/workflows/license.yml index a490def9161..91c53a7a173 100644 --- a/.github/workflows/license.yml +++ b/.github/workflows/license.yml @@ -42,12 +42,10 @@ jobs: java-version: 8 cache: 'maven' check-latest: false - - name: Setup Mvnd - uses: ./.github/actions/setup-mvnd - run: >- - build/mvnd org.apache.rat:apache-rat-plugin:check + build/mvn org.apache.rat:apache-rat-plugin:check -Ptpcds -Pspark-block-cleaner -Pkubernetes-it - -Pspark-3.1 -Pspark-3.2 -Pspark-3.3 + -Pspark-3.1 -Pspark-3.2 -Pspark-3.3 -Pspark-3.4 - name: Upload rat report if: failure() uses: actions/upload-artifact@v3 diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index a70117826cc..3b85530d44a 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -32,7 +32,7 @@ concurrency: cancel-in-progress: true env: - MVN_OPT: -Dmaven.javadoc.skip=true -Drat.skip=true -Dscalastyle.skip=true -Dspotless.check.skip -Dorg.slf4j.simpleLogger.defaultLogLevel=warn -Pjdbc-shaded -Dmaven.plugin.download.cache.path=/tmp/engine-archives + MVN_OPT: -Dmaven.javadoc.skip=true -Drat.skip=true -Dscalastyle.skip=true -Dspotless.check.skip -Dorg.slf4j.simpleLogger.defaultLogLevel=warn -Pjdbc-shaded,gen-policy -Dmaven.plugin.download.cache.path=/tmp/engine-archives KUBERNETES_VERSION: v1.26.1 MINIKUBE_VERSION: v1.29.0 @@ -46,24 +46,42 @@ jobs: java: - 8 - 11 + - 17 spark: - '3.1' - '3.2' - '3.3' + - '3.4' spark-archive: [""] exclude-tags: [""] comment: ["normal"] include: - java: 8 - spark: '3.3' - spark-archive: '-Dspark.archive.mirror=https://archive.apache.org/dist/spark/spark-3.1.3 -Dspark.archive.name=spark-3.1.3-bin-hadoop3.2.tgz' - exclude-tags: '-Dmaven.plugin.scalatest.exclude.tags=org.scalatest.tags.Slow,org.apache.kyuubi.tags.DeltaTest,org.apache.kyuubi.tags.HudiTest,org.apache.kyuubi.tags.IcebergTest' + spark: '3.4' + spark-archive: '-Dspark.archive.mirror=https://archive.apache.org/dist/spark/spark-3.1.3 -Dspark.archive.name=spark-3.1.3-bin-hadoop3.2.tgz -Pzookeeper-3.6' + exclude-tags: '-Dmaven.plugin.scalatest.exclude.tags=org.scalatest.tags.Slow,org.apache.kyuubi.tags.DeltaTest,org.apache.kyuubi.tags.IcebergTest,org.apache.kyuubi.tags.SparkLocalClusterTest' comment: 'verify-on-spark-3.1-binary' - java: 8 - spark: '3.3' - spark-archive: '-Dspark.archive.mirror=https://archive.apache.org/dist/spark/spark-3.2.3 -Dspark.archive.name=spark-3.2.3-bin-hadoop3.2.tgz' - exclude-tags: '-Dmaven.plugin.scalatest.exclude.tags=org.scalatest.tags.Slow,org.apache.kyuubi.tags.DeltaTest,org.apache.kyuubi.tags.HudiTest,org.apache.kyuubi.tags.IcebergTest' + spark: '3.4' + spark-archive: '-Dspark.archive.mirror=https://archive.apache.org/dist/spark/spark-3.2.4 -Dspark.archive.name=spark-3.2.4-bin-hadoop3.2.tgz -Pzookeeper-3.6' + exclude-tags: '-Dmaven.plugin.scalatest.exclude.tags=org.scalatest.tags.Slow,org.apache.kyuubi.tags.DeltaTest,org.apache.kyuubi.tags.IcebergTest,org.apache.kyuubi.tags.SparkLocalClusterTest' comment: 'verify-on-spark-3.2-binary' + - java: 8 + spark: '3.4' + spark-archive: '-Dspark.archive.mirror=https://archive.apache.org/dist/spark/spark-3.3.3 -Dspark.archive.name=spark-3.3.3-bin-hadoop3.tgz -Pzookeeper-3.6' + exclude-tags: '-Dmaven.plugin.scalatest.exclude.tags=org.scalatest.tags.Slow,org.apache.kyuubi.tags.DeltaTest,org.apache.kyuubi.tags.IcebergTest,org.apache.kyuubi.tags.SparkLocalClusterTest' + comment: 'verify-on-spark-3.3-binary' + - java: 8 + spark: '3.4' + spark-archive: '-Dspark.archive.mirror=https://archive.apache.org/dist/spark/spark-3.5.0 -Dspark.archive.name=spark-3.5.0-bin-hadoop3.tgz -Pzookeeper-3.6' + exclude-tags: '-Dmaven.plugin.scalatest.exclude.tags=org.scalatest.tags.Slow,org.apache.kyuubi.tags.DeltaTest,org.apache.kyuubi.tags.IcebergTest,org.apache.kyuubi.tags.SparkLocalClusterTest' + comment: 'verify-on-spark-3.5-binary' + exclude: + # SPARK-33772: Spark supports JDK 17 since 3.3.0 + - java: 17 + spark: '3.1' + - java: 17 + spark: '3.2' env: SPARK_LOCAL_IP: localhost steps: @@ -79,6 +97,8 @@ jobs: check-latest: false - name: Cache Engine Archives uses: ./.github/actions/cache-engine-archives + - name: Setup Maven + uses: ./.github/actions/setup-maven - name: Setup Python uses: actions/setup-python@v4 with: @@ -91,7 +111,7 @@ jobs: - name: Code coverage if: | matrix.java == 8 && - matrix.spark == '3.2' && + matrix.spark == '3.4' && matrix.spark-archive == '' uses: codecov/codecov-action@v3 with: @@ -131,6 +151,8 @@ jobs: java-version: ${{ matrix.java }} cache: 'maven' check-latest: false + - name: Setup Maven + uses: ./.github/actions/setup-maven - name: Cache Engine Archives uses: ./.github/actions/cache-engine-archives - name: Build and test Kyuubi AuthZ with supported Spark versions @@ -147,6 +169,38 @@ jobs: **/target/unit-tests.log **/kyuubi-spark-sql-engine.log* + scala213: + name: Scala Compilation Test + runs-on: ubuntu-22.04 + strategy: + fail-fast: false + matrix: + java: + - '8' + scala: + - '2.13' + steps: + - uses: actions/checkout@v3 + - name: Tune Runner VM + uses: ./.github/actions/tune-runner-vm + - name: Setup JDK ${{ matrix.java }} + uses: actions/setup-java@v3 + with: + distribution: temurin + java-version: ${{ matrix.java }} + cache: 'maven' + check-latest: false + - name: Setup Maven + uses: ./.github/actions/setup-maven + - name: Build on Scala ${{ matrix.scala }} + run: | + MODULES='!externals/kyuubi-flink-sql-engine' + ./build/mvn clean install -pl ${MODULES} -am \ + -DskipTests -Pflink-provided,hive-provided,spark-provided \ + -Pjava-${{ matrix.java }} \ + -Pscala-${{ matrix.scala }} \ + -Pspark-3.3 + flink-it: name: Flink Test runs-on: ubuntu-22.04 @@ -157,20 +211,15 @@ jobs: - 8 - 11 flink: - - '1.14' - - '1.15' - '1.16' + - '1.17' flink-archive: [ "" ] comment: [ "normal" ] include: - java: 8 - flink: '1.16' - flink-archive: '-Dflink.archive.mirror=https://archive.apache.org/dist/flink/flink-1.14.6 -Dflink.archive.name=flink-1.14.6-bin-scala_2.12.tgz' - comment: 'verify-on-flink-1.14-binary' - - java: 8 - flink: '1.16' - flink-archive: '-Dflink.archive.mirror=https://archive.apache.org/dist/flink/flink-1.15.3 -Dflink.archive.name=flink-1.15.3-bin-scala_2.12.tgz' - comment: 'verify-on-flink-1.15-binary' + flink: '1.17' + flink-archive: '-Dflink.archive.mirror=https://archive.apache.org/dist/flink/flink-1.16.1 -Dflink.archive.name=flink-1.16.1-bin-scala_2.12.tgz' + comment: 'verify-on-flink-1.16-binary' steps: - uses: actions/checkout@v3 - name: Tune Runner VM @@ -182,6 +231,8 @@ jobs: java-version: ${{ matrix.java }} cache: 'maven' check-latest: false + - name: Setup Maven + uses: ./.github/actions/setup-maven - name: Cache Engine Archives uses: ./.github/actions/cache-engine-archives - name: Build Flink with maven w/o linters @@ -228,6 +279,8 @@ jobs: java-version: ${{ matrix.java }} cache: 'maven' check-latest: false + - name: Setup Maven + uses: ./.github/actions/setup-maven - name: Cache Engine Archives uses: ./.github/actions/cache-engine-archives - name: Build and test Hive with maven w/o linters @@ -265,6 +318,8 @@ jobs: java-version: ${{ matrix.java }} cache: 'maven' check-latest: false + - name: Setup Maven + uses: ./.github/actions/setup-maven - name: Cache Engine Archives uses: ./.github/actions/cache-engine-archives - name: Build and test JDBC with maven w/o linters @@ -302,6 +357,8 @@ jobs: java-version: ${{ matrix.java }} cache: 'maven' check-latest: false + - name: Setup Maven + uses: ./.github/actions/setup-maven - name: Cache Engine Archives uses: ./.github/actions/cache-engine-archives - name: Build and test Trino with maven w/o linters @@ -334,6 +391,8 @@ jobs: java-version: 8 cache: 'maven' check-latest: false + - name: Setup Maven + uses: ./.github/actions/setup-maven - name: Cache Engine Archives uses: ./.github/actions/cache-engine-archives - name: Run TPC-DS Tests @@ -471,6 +530,8 @@ jobs: java-version: ${{ matrix.java }} cache: 'maven' check-latest: false + - name: Setup Maven + uses: ./.github/actions/setup-maven - name: Cache Engine Archives uses: ./.github/actions/cache-engine-archives - name: zookeeper integration tests diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 149da6d82b3..5ff634da6d8 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -43,6 +43,8 @@ jobs: java-version: 8 cache: 'maven' check-latest: false + - name: Setup Maven + uses: ./.github/actions/setup-maven - name: Build with Maven run: ./build/mvn clean install ${{ matrix.profiles }} -Dmaven.javadoc.skip=true -V - name: Upload test logs diff --git a/.github/workflows/publish-snapshot-docker.yml b/.github/workflows/publish-snapshot-docker.yml index 5c9c04d276d..3afccee7aa8 100644 --- a/.github/workflows/publish-snapshot-docker.yml +++ b/.github/workflows/publish-snapshot-docker.yml @@ -29,14 +29,23 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 - name: Login to Docker Hub uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build Kyuubi Docker Image - run: docker build --tag apache/kyuubi:master-snapshot --file build/Dockerfile . - - name: Docker image - run: docker images - - name: Push Docker image - run: docker push apache/kyuubi:master-snapshot + - name: Build and Push Kyuubi Docker Image + uses: docker/build-push-action@v4 + with: + # build cache on Github Actions, See: https://docs.docker.com/build/cache/backends/gha/#using-dockerbuild-push-action + cache-from: type=gha + cache-to: type=gha,mode=max + context: . + file: build/Dockerfile + platforms: linux/amd64,linux/arm64 + push: true + tags: apache/kyuubi:master-snapshot diff --git a/.github/workflows/publish-snapshot-nexus.yml b/.github/workflows/publish-snapshot-nexus.yml index 0d4222b044a..b4191396b1f 100644 --- a/.github/workflows/publish-snapshot-nexus.yml +++ b/.github/workflows/publish-snapshot-nexus.yml @@ -30,16 +30,17 @@ jobs: matrix: branch: - master - - branch-1.6 - - branch-1.5 + - branch-1.7 + - branch-1.8 profiles: - -Pflink-provided,spark-provided,hive-provided,spark-3.1 - - -Pflink-provided,spark-provided,hive-provided,spark-3.2,tpcds + - -Pflink-provided,spark-provided,hive-provided,spark-3.2 + - -Pflink-provided,spark-provided,hive-provided,spark-3.3,tpcds include: - branch: master - profiles: -Pflink-provided,spark-provided,hive-provided,spark-3.3 - - branch: branch-1.6 - profiles: -Pflink-provided,spark-provided,hive-provided,spark-3.3 + profiles: -Pflink-provided,spark-provided,hive-provided,spark-3.4 + - branch: branch-1.8 + profiles: -Pflink-provided,spark-provided,hive-provided,spark-3.4 steps: - name: Checkout repository uses: actions/checkout@v3 diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 93fce5c4ea4..6f575302ea1 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -34,10 +34,12 @@ jobs: strategy: matrix: profiles: - - '-Pflink-provided,hive-provided,spark-provided,spark-block-cleaner,spark-3.3,spark-3.2,spark-3.1,tpcds' + - '-Pflink-provided,hive-provided,spark-provided,spark-block-cleaner,spark-3.3,spark-3.2,spark-3.1,tpcds,kubernetes-it' steps: - uses: actions/checkout@v3 + with: + fetch-depth: 0 - name: Setup JDK 8 uses: actions/setup-java@v3 with: @@ -45,16 +47,16 @@ jobs: java-version: 8 cache: 'maven' check-latest: false - - name: Setup Mvnd - uses: ./.github/actions/setup-mvnd + - name: Setup Maven + uses: ./.github/actions/setup-maven - name: Setup Python 3 uses: actions/setup-python@v4 with: python-version: '3.9' cache: 'pip' - - name: Check kyuubi modules available + - name: Check kyuubi modules avaliable id: modules-check - run: build/mvnd dependency:resolve -DincludeGroupIds="org.apache.kyuubi" -DincludeScope="compile" -DexcludeTransitive=true -q ${{ matrix.profiles }} + run: build/mvn dependency:resolve -DincludeGroupIds="org.apache.kyuubi" -DincludeScope="compile" -DexcludeTransitive=true ${{ matrix.profiles }} continue-on-error: true - name: Install @@ -63,13 +65,14 @@ jobs: if: steps.modules-check.conclusion == 'success' && steps.modules-check.outcome == 'failure' run: | MVN_OPT="-DskipTests -Dorg.slf4j.simpleLogger.defaultLogLevel=warn -Dmaven.javadoc.skip=true -Drat.skip=true -Dscalastyle.skip=true -Dspotless.check.skip" - build/mvnd clean install ${MVN_OPT} -Pflink-provided,hive-provided,spark-provided,spark-block-cleaner,spark-3.2,tpcds - build/mvnd clean install ${MVN_OPT} -pl extensions/spark/kyuubi-extension-spark-3-1 -Pspark-3.1 - build/mvnd clean install ${MVN_OPT} -pl extensions/spark/kyuubi-extension-spark-3-3,extensions/spark/kyuubi-spark-connector-kudu,extensions/spark/kyuubi-spark-connector-hive -Pspark-3.3 + build/mvn clean install ${MVN_OPT} -Pflink-provided,hive-provided,spark-provided,spark-block-cleaner,spark-3.2,tpcds + build/mvn clean install ${MVN_OPT} -pl extensions/spark/kyuubi-extension-spark-3-1 -Pspark-3.1 + build/mvn clean install ${MVN_OPT} -pl extensions/spark/kyuubi-extension-spark-3-3,extensions/spark/kyuubi-spark-connector-hive -Pspark-3.3 + build/mvn clean install ${MVN_OPT} -pl extensions/spark/kyuubi-extension-spark-3-4 -Pspark-3.4 - name: Scalastyle with maven id: scalastyle-check - run: build/mvnd scalastyle:check -q ${{ matrix.profiles }} + run: build/mvn scalastyle:check ${{ matrix.profiles }} - name: Print scalastyle error report if: failure() && steps.scalastyle-check.outcome != 'success' run: >- @@ -83,15 +86,15 @@ jobs: run: | SPOTLESS_BLACK_VERSION=$(build/mvn help:evaluate -Dexpression=spotless.python.black.version -q -DforceStdout) pip install black==$SPOTLESS_BLACK_VERSION - build/mvnd spotless:check -q ${{ matrix.profiles }} -Pspotless-python + build/mvn spotless:check ${{ matrix.profiles }} -Pspotless-python - name: setup npm uses: actions/setup-node@v3 with: - node-version: 16 + node-version: 18 - name: Web UI Style with node run: | cd ./kyuubi-server/web-ui - npm install pnpm -g + npm install pnpm@8 -g pnpm install pnpm run lint echo "---------------------------------------Notice------------------------------------" @@ -104,10 +107,32 @@ jobs: echo "---------------------------------------------------------------------------------" shellcheck: - name: Shellcheck + name: Super Linter and Shellcheck runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v3 + - name: Super Linter Checks + uses: github/super-linter/slim@v5 + env: + CREATE_LOG_FILE: true + ERROR_ON_MISSING_EXEC_BIT: true + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + IGNORE_GENERATED_FILES: true + IGNORE_GITIGNORED_FILES: true + LINTER_RULES_PATH: / + LOG_LEVEL: NOTICE + SUPPRESS_POSSUM: true + VALIDATE_BASH_EXEC: true + VALIDATE_ENV: true + VALIDATE_JSONC: true + VALIDATE_POWERSHELL: true + VALIDATE_XML: true + - name: Upload Super Linter logs + if: failure() + uses: actions/upload-artifact@v3 + with: + name: super-linter-log + path: super-linter.log - name: check bin directory uses: ludeeus/action-shellcheck@1.1.0 with: diff --git a/.github/workflows/web-ui.yml b/.github/workflows/web-ui.yml index d9907899095..9de7a599d45 100644 --- a/.github/workflows/web-ui.yml +++ b/.github/workflows/web-ui.yml @@ -21,14 +21,35 @@ jobs: steps: - name: checkout uses: actions/checkout@v3 - - name: setup npm + - name: Setup JDK 8 + uses: actions/setup-java@v3 + with: + distribution: temurin + java-version: 8 + - name: Setup Maven + uses: ./.github/actions/setup-maven + - name: Get NodeJS and PNPM version + run: | + NODEJS_VERSION=$(build/mvn help:evaluate -Dexpression=node.version -q -DforceStdout) + PNPM_VERSION=$(build/mvn help:evaluate -Dexpression=pnpm.version -q -DforceStdout) + echo "NODEJS_VERSION=${NODEJS_VERSION}" >> "$GITHUB_ENV" + echo "PNPM_VERSION=${PNPM_VERSION}" >> "$GITHUB_ENV" + - name: Setup Nodejs and NPM uses: actions/setup-node@v3 with: - node-version: 16 + node-version: ${{env.NODEJS_VERSION}} + cache: npm + cache-dependency-path: ./kyuubi-server/web-ui/package.json + - name: Cache NPM dependencies + uses: actions/cache@v3 + with: + path: ./kyuubi-server/web-ui/node_modules + key: webui-dependencies-${{ hashFiles('kyuubi-server/web-ui/pnpm-lock.yaml') }} + restore-keys: webui-dependencies- - name: npm run coverage & build run: | cd ./kyuubi-server/web-ui - npm install pnpm -g + npm install pnpm@${PNPM_VERSION} -g pnpm install pnpm run coverage pnpm run build diff --git a/.gitignore b/.gitignore index 9a115ab0a9d..a2f6fb1efe4 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,7 @@ *#*# *.#* +*.db *.iml *.ipr *.iws @@ -40,7 +41,6 @@ .scala_dependencies .settings build/apache-maven* -build/maven-mvnd* build/release/tmp build/scala* build/test @@ -58,8 +58,7 @@ metastore_db derby.log rest-audit.log **/dependency-reduced-pom.xml -metrics/report.json -metrics/.report.json.crc +metrics/ /kyuubi-ha/embedded_zookeeper/ embedded_zookeeper/ /externals/kyuubi-spark-sql-engine/operation_logs/ diff --git a/.idea/vcs.xml b/.idea/vcs.xml index 7405930199c..9c45aa8a469 100644 --- a/.idea/vcs.xml +++ b/.idea/vcs.xml @@ -25,7 +25,7 @@ GitHub share the sequence number of issues and pull requests, and it will redirect to the right place when the the sequence number not match kind. --> - diff --git a/.rat-excludes b/.rat-excludes index 7a841cf9c6c..5735ba69b9c 100644 --- a/.rat-excludes +++ b/.rat-excludes @@ -32,7 +32,6 @@ NOTICE* docs/** build/apache-maven-*/** -build/maven-mvnd-*/** build/scala-*/** **/**/operation_logs/**/** **/**/server_operation_logs/**/** @@ -51,8 +50,10 @@ build/scala-*/** **/metadata-store-schema*.sql **/*.derby.sql **/*.mysql.sql +**/*.sqlite.sql **/node/** **/web-ui/dist/** +**/web-ui/coverage/** **/pnpm-lock.yaml **/node_modules/** **/gen/* diff --git a/.readthedocs.yml b/.readthedocs.yaml similarity index 76% rename from .readthedocs.yml rename to .readthedocs.yaml index 671f2926628..115d9c33885 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yaml @@ -16,23 +16,19 @@ # version: 2 +build: + os: ubuntu-22.04 + tools: + python: "3.11" -# Build documentation in the docs/ directory with Sphinx sphinx: builder: html configuration: docs/conf.py -# Build documentation with MkDocs -#mkdocs: -# configuration: mkdocs.yml - -# Optionally build your docs in additional formats such as PDF formats: - pdf - epub -# Optionally set the version of Python and requirements required to build your docs python: - version: 3.7 install: - requirements: docs/requirements.txt diff --git a/.scalafmt.conf b/.scalafmt.conf index e682a17f71f..b0e130715de 100644 --- a/.scalafmt.conf +++ b/.scalafmt.conf @@ -1,4 +1,4 @@ -version = 3.7.1 +version = 3.7.5 runner.dialect=scala212 project.git=true diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ef28d560e36..dc9094b8bb7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -60,4 +60,4 @@ TBD, please be patient for the surprise. ## IDE Setup Guide -[IntelliJ IDEA Setup Guide](https://kyuubi.readthedocs.io/en/master/develop_tools/idea_setup.html) +[IntelliJ IDEA Setup Guide](https://kyuubi.readthedocs.io/en/master/contributing/code/idea_setup.html) diff --git a/LICENSE-binary b/LICENSE-binary index 92daf62ab7a..4cad931d70d 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -212,9 +212,6 @@ com.google.android:annotations commons-lang:commons-lang commons-logging:commons-logging org.apache.commons:commons-lang3 -org.apache.curator:curator-client -org.apache.curator:curator-framework -org.apache.curator:curator-recipes org.apache.derby:derby com.google.errorprone:error_prone_annotations net.jodah:failsafe @@ -264,6 +261,7 @@ io.etcd:jetcd-api io.etcd:jetcd-common io.etcd:jetcd-core io.etcd:jetcd-grpc +org.eclipse.jetty:jetty-client org.eclipse.jetty:jetty-http org.eclipse.jetty:jetty-io org.eclipse.jetty:jetty-security @@ -271,6 +269,7 @@ org.eclipse.jetty:jetty-server org.eclipse.jetty:jetty-servlet org.eclipse.jetty:jetty-util-ajax org.eclipse.jetty:jetty-util +org.eclipse.jetty:jetty-proxy org.apache.thrift:libfb303 org.apache.thrift:libthrift org.apache.logging.log4j:log4j-1.2-api @@ -318,7 +317,12 @@ io.swagger.core.v3:swagger-jaxrs2 io.swagger.core.v3:swagger-models io.vertx:vertx-core io.vertx:vertx-grpc -org.apache.zookeeper:zookeeper +com.squareup.retrofit2:retrofit +com.squareup.okhttp3:okhttp +org.apache.kafka:kafka-clients +org.lz4:lz4-java +org.xerial.snappy:snappy-java +org.xerial:sqlite-jdbc BSD ------------ @@ -330,6 +334,7 @@ com.thoughtworks.paranamer:paranamer dk.brics.automaton:automaton com.google.protobuf:protobuf-java-util com.google.protobuf:protobuf-java +com.github.luben:zstd-jni Eclipse Distribution License - v 1.0 ------------------------------------ @@ -356,6 +361,9 @@ org.codehaus.mojo:animal-sniffer-annotations org.slf4j:slf4j-api org.slf4j:jcl-over-slf4j org.slf4j:jul-over-slf4j +com.theokanning.openai-gpt3-java:api +com.theokanning.openai-gpt3-java:client +com.theokanning.openai-gpt3-java:service kyuubi-server/src/main/resources/org/apache/kyuubi/ui/static/assets/fonts/* kyuubi-server/src/main/resources/org/apache/kyuubi/ui/static/icon.min.css @@ -369,6 +377,8 @@ is auto-generated by `pnpm licenses list --prod`. ┌────────────────────────────────────┬──────────────┐ │ Package │ License │ ├────────────────────────────────────┼──────────────┤ +│ swagger-ui-dist │ Apache-2.0 │ +├────────────────────────────────────┼──────────────┤ │ typescript │ Apache-2.0 │ ├────────────────────────────────────┼──────────────┤ │ normalize-wheel-es │ BSD-3-Clause │ @@ -451,6 +461,8 @@ is auto-generated by `pnpm licenses list --prod`. ├────────────────────────────────────┼──────────────┤ │ csstype │ MIT │ ├────────────────────────────────────┼──────────────┤ +│ date-fns │ MIT │ +├────────────────────────────────────┼──────────────┤ │ dayjs │ MIT │ ├────────────────────────────────────┼──────────────┤ │ delayed-stream │ MIT │ diff --git a/NOTICE-binary b/NOTICE-binary index ef58e21f6a2..40ec15010c4 100644 --- a/NOTICE-binary +++ b/NOTICE-binary @@ -92,15 +92,6 @@ Copyright 2001-2020 The Apache Software Foundation Apache Commons Logging Copyright 2003-2013 The Apache Software Foundation -Curator Client -Copyright 2011-2017 The Apache Software Foundation - -Curator Framework -Copyright 2011-2017 The Apache Software Foundation - -Curator Recipes -Copyright 2011-2017 The Apache Software Foundation - ========================================================================= == NOTICE file corresponding to section 4(d) of the Apache License, == Version 2.0, in this case for the Apache Derby distribution. @@ -1236,7 +1227,7 @@ This product optionally depends on 'zstd-jni', a zstd-jni Java compression and decompression library, which can be obtained at: * LICENSE: - * license/LICENSE.zstd-jni.txt (Apache License 2.0) + * license/LICENSE.zstd-jni.txt (BSD License) * HOMEPAGE: * https://github.com/luben/zstd-jni @@ -1370,3 +1361,26 @@ decompression for Java., which can be obtained at: * HOMEPAGE: * https://github.com/hyperxpro/Brotli4j +This product depends on 'kafka-clients', Java clients for Kafka, +which can be obtained at: + + * LICENSE: + * license/LICENSE.kafka.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/apache/kafka + +This product optionally depends on 'snappy-java', Snappy compression and +decompression for Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.snappy-java.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/xerial/snappy-java + +This product optionally depends on 'lz4-java', Lz4 compression and +decompression for Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.lz4-java.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/lz4/lz4-java diff --git a/README.md b/README.md index e54f6fac00d..6b1423e7855 100644 --- a/README.md +++ b/README.md @@ -20,20 +20,20 @@

+ + + - + - - - - - + + - + - +

@@ -49,8 +49,6 @@ Apache Kyuubi™ is a distributed and multi-tenant gateway to provide serverless SQL on data warehouses and lakehouses. - - ## What is Kyuubi? Kyuubi provides a pure SQL gateway through Thrift JDBC/ODBC interface for end-users to manipulate large-scale data with pre-programmed and extensible Spark SQL engines. This "out-of-the-box" model minimizes the barriers and costs for end-users to use Spark at the client side. At the server-side, Kyuubi server and engines' multi-tenant architecture provides the administrators a way to achieve computing resource isolation, data security, high availability, high client concurrency, etc. @@ -84,7 +82,7 @@ HiveServer2 can identify and authenticate a caller, and then if the caller also Kyuubi extends the use of STS in a multi-tenant model based on a unified interface and relies on the concept of multi-tenancy to interact with cluster managers to finally gain the ability of resources sharing/isolation and data security. The loosely coupled architecture of the Kyuubi server and engine dramatically improves the client concurrency and service stability of the service itself. -#### DataLake/LakeHouse Support +#### DataLake/Lakehouse Support The vision of Kyuubi is to unify the portal and become an easy-to-use data lake management platform. Different kinds of workloads, such as ETL processing and BI analytics, can be supported by one platform, using one copy of data, with one SQL interface. @@ -105,11 +103,7 @@ and others would not be possible without your help. ![](./docs/imgs/kyuubi_ecosystem.drawio.png) -## Online Documentation - -Since Kyuubi 1.3.0-incubating, the Kyuubi online documentation is hosted by [https://kyuubi.apache.org/](https://kyuubi.apache.org/). -You can find the latest Kyuubi documentation on [this web page](https://kyuubi.readthedocs.io/en/master/). -For 1.2 and earlier versions, please check the [Readthedocs](https://kyuubi.readthedocs.io/en/v1.2.0/) directly. +## Online Documentation Documentation Status ## Quick Start @@ -117,9 +111,32 @@ Ready? [Getting Started](https://kyuubi.readthedocs.io/en/master/quick_start/) w ## [Contributing](./CONTRIBUTING.md) -## Contributor over time +## Project & Community Status -[![Contributor over time](https://contributor-graph-api.apiseven.com/contributors-svg?chart=contributorOverTime&repo=apache/kyuubi)](https://api7.ai/contributor-graph?chart=contributorOverTime&repo=apache/kyuubi) +

+ + + + + + + + + + + + + + + + + + + +

+

+ +

## Aside @@ -127,7 +144,3 @@ The project took its name from a character of a popular Japanese manga - `Naruto The character is named `Kyuubi Kitsune/Kurama`, which is a nine-tailed fox in mythology. `Kyuubi` spread the power and spirit of fire, which is used here to represent the powerful [Apache Spark](http://spark.apache.org). Its nine tails stand for end-to-end multi-tenancy support of this project. - -## License - -This project is licensed under the Apache 2.0 License. See the [LICENSE](./LICENSE) file for details. diff --git a/bin/docker-image-tool.sh b/bin/docker-image-tool.sh index f3efc8bf5d4..14d5fe7b09d 100755 --- a/bin/docker-image-tool.sh +++ b/bin/docker-image-tool.sh @@ -200,8 +200,7 @@ Examples: EOF } -# shellcheck disable=SC2199 -if [[ "$@" = *--help ]] || [[ "$@" = *-h ]]; then +if [[ "$*" = *--help ]] || [[ "$*" = *-h ]]; then usage exit 0 fi diff --git a/bin/kyuubi b/bin/kyuubi index 09c8e9373be..9132aae39e8 100755 --- a/bin/kyuubi +++ b/bin/kyuubi @@ -30,8 +30,7 @@ function usage() { echo " -h | --help - Show this help message" } -# shellcheck disable=SC2199 -if [[ "$@" = *--help ]] || [[ "$@" = *-h ]]; then +if [[ "$*" = *--help ]] || [[ "$*" = *-h ]]; then usage exit 0 fi @@ -156,7 +155,7 @@ function start_kyuubi() { function run_kyuubi() { echo "Starting $CLASS" - nice -n "${KYUUBI_NICENESS:-0}" ${cmd} + exec nice -n "${KYUUBI_NICENESS:-0}" ${cmd} } function stop_kyuubi() { diff --git a/bin/kyuubi-logo b/bin/kyuubi-logo index 15a45a4bbc0..1f95ca02e52 100755 --- a/bin/kyuubi-logo +++ b/bin/kyuubi-logo @@ -15,18 +15,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # - # Bugzilla 37848: When no TTY is available, don't output to console have_tty=0 -# shellcheck disable=SC2006 -if [[ "`tty`" != "not a tty" ]]; then +if [[ "$(tty)" != "not a tty" ]]; then have_tty=1 fi # Bugzilla 37848: When no TTY is available, don't output to console have_tty=0 -# shellcheck disable=SC2006 -if [[ "`tty`" != "not a tty" ]]; then +if [[ "$(tty)" != "not a tty" ]]; then have_tty=1 fi diff --git a/bin/kyuubi-zk-cli b/bin/kyuubi-zk-cli index 089b7ad186c..f503c3e5a5e 100755 --- a/bin/kyuubi-zk-cli +++ b/bin/kyuubi-zk-cli @@ -17,7 +17,7 @@ # ## Zookeeper Shell Client Entrance -CLASS="org.apache.zookeeper.ZooKeeperMain" +CLASS="org.apache.kyuubi.shaded.zookeeper.ZooKeeperMain" export KYUUBI_HOME="$(cd "$(dirname "$0")"/..; pwd)" diff --git a/bin/load-kyuubi-env.sh b/bin/load-kyuubi-env.sh index bfb92265869..4d6f72ddf3e 100755 --- a/bin/load-kyuubi-env.sh +++ b/bin/load-kyuubi-env.sh @@ -69,6 +69,44 @@ if [[ -z ${JAVA_HOME} ]]; then fi fi +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS -XX:+IgnoreUnrecognizedVMOptions" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS -Dio.netty.tryReflectionSetAccessible=true" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/java.lang=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/java.lang.invoke=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/java.lang.reflect=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/java.io=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/java.net=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/java.nio=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/java.util=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/java.util.concurrent=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/sun.nio.ch=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/sun.nio.cs=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/sun.security.action=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/sun.security.tools.keytool=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/sun.security.x509=ALL-UNNAMED" +KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS --add-opens=java.base/sun.util.calendar=ALL-UNNAMED" +export KYUUBI_JAVA_OPTS="$KYUUBI_JAVA_OPTS" + +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS -XX:+IgnoreUnrecognizedVMOptions" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS -Dio.netty.tryReflectionSetAccessible=true" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/java.lang=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/java.lang.invoke=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/java.lang.reflect=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/java.io=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/java.net=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/java.nio=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/java.util=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/java.util.concurrent=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/sun.nio.ch=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/sun.nio.cs=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/sun.security.action=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/sun.security.tools.keytool=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/sun.security.x509=ALL-UNNAMED" +KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS --add-opens=java.base/sun.util.calendar=ALL-UNNAMED" +export KYUUBI_CTL_JAVA_OPTS="$KYUUBI_CTL_JAVA_OPTS" + export KYUUBI_SCALA_VERSION="${KYUUBI_SCALA_VERSION:-"2.12"}" if [[ -f ${KYUUBI_HOME}/RELEASE ]]; then diff --git a/bin/stop-application.sh b/bin/stop-application.sh index b208ab50527..000eb4cdd7c 100755 --- a/bin/stop-application.sh +++ b/bin/stop-application.sh @@ -16,8 +16,7 @@ # limitations under the License. # -# shellcheck disable=SC2071 -if [[ $# < 1 ]] ; then +if [[ $# -lt 1 ]] ; then echo "USAGE: $0 " exit 1 fi diff --git a/build/dist b/build/dist index c155dce3ff4..b81a2661ece 100755 --- a/build/dist +++ b/build/dist @@ -215,7 +215,7 @@ else echo "Making distribution for Kyuubi $VERSION in '$DISTDIR'..." fi -MVN_DIST_OPT="-DskipTests" +MVN_DIST_OPT="-DskipTests -Dmaven.javadoc.skip=true -Dmaven.scaladoc.skip=true -Dmaven.source.skip" if [[ "$ENABLE_WEBUI" == "true" ]]; then MVN_DIST_OPT="$MVN_DIST_OPT -Pweb-ui" @@ -256,6 +256,7 @@ mkdir -p "$DISTDIR/externals/engines/spark" mkdir -p "$DISTDIR/externals/engines/trino" mkdir -p "$DISTDIR/externals/engines/hive" mkdir -p "$DISTDIR/externals/engines/jdbc" +mkdir -p "$DISTDIR/externals/engines/chat" echo "Kyuubi $VERSION $GITREVSTRING built for" > "$DISTDIR/RELEASE" echo "Java $JAVA_VERSION" >> "$DISTDIR/RELEASE" echo "Scala $SCALA_VERSION" >> "$DISTDIR/RELEASE" @@ -313,6 +314,18 @@ for jar in $(ls "$DISTDIR/jars/"); do fi done +# Copy chat engines +cp "$KYUUBI_HOME/externals/kyuubi-chat-engine/target/kyuubi-chat-engine_${SCALA_VERSION}-${VERSION}.jar" "$DISTDIR/externals/engines/chat/" +cp -r "$KYUUBI_HOME"/externals/kyuubi-chat-engine/target/scala-$SCALA_VERSION/jars/*.jar "$DISTDIR/externals/engines/chat/" + +# Share the jars w/ server to reduce binary size +# shellcheck disable=SC2045 +for jar in $(ls "$DISTDIR/jars/"); do + if [[ -f "$DISTDIR/externals/engines/chat/$jar" ]]; then + (cd $DISTDIR/externals/engines/chat; ln -snf "../../../jars/$jar" "$DISTDIR/externals/engines/chat/$jar") + fi +done + # Copy kyuubi tools if [[ -f "$KYUUBI_HOME/tools/spark-block-cleaner/target/spark-block-cleaner_${SCALA_VERSION}-${VERSION}.jar" ]]; then mkdir -p "$DISTDIR/tools/spark-block-cleaner/kubernetes" @@ -322,7 +335,7 @@ if [[ -f "$KYUUBI_HOME/tools/spark-block-cleaner/target/spark-block-cleaner_${SC fi # Copy Kyuubi Spark extension -SPARK_EXTENSION_VERSIONS=('3-1' '3-2' '3-3') +SPARK_EXTENSION_VERSIONS=('3-1' '3-2' '3-3' '3-4') # shellcheck disable=SC2068 for SPARK_EXTENSION_VERSION in ${SPARK_EXTENSION_VERSIONS[@]}; do if [[ -f $"$KYUUBI_HOME/extensions/spark/kyuubi-extension-spark-$SPARK_EXTENSION_VERSION/target/kyuubi-extension-spark-${SPARK_EXTENSION_VERSION}_${SCALA_VERSION}-${VERSION}.jar" ]]; then @@ -371,7 +384,11 @@ if [[ "$MAKE_TGZ" == "true" ]]; then TARDIR="$KYUUBI_HOME/$TARDIR_NAME" rm -rf "$TARDIR" cp -R "$DISTDIR" "$TARDIR" - tar czf "$TARDIR_NAME.tgz" -C "$KYUUBI_HOME" "$TARDIR_NAME" + TAR="tar" + if [ "$(uname -s)" = "Darwin" ]; then + TAR="tar --no-mac-metadata --no-xattrs --no-fflags" + fi + $TAR -czf "$TARDIR_NAME.tgz" -C "$KYUUBI_HOME" "$TARDIR_NAME" rm -rf "$TARDIR" echo "The Kyuubi tarball $TARDIR_NAME.tgz is successfully generated in $KYUUBI_HOME." fi diff --git a/build/kyuubi-build-info.cmd b/build/kyuubi-build-info.cmd index 7717b48e4d0..d9e8e6c6a94 100755 --- a/build/kyuubi-build-info.cmd +++ b/build/kyuubi-build-info.cmd @@ -36,6 +36,7 @@ echo kyuubi_trino_version=%~9 echo user=%username% FOR /F %%i IN ('git rev-parse HEAD') DO SET "revision=%%i" +FOR /F "delims=" %%i IN ('git show -s --format^=%%ci HEAD') DO SET "revision_time=%%i" FOR /F %%i IN ('git rev-parse --abbrev-ref HEAD') DO SET "branch=%%i" FOR /F %%i IN ('git config --get remote.origin.url') DO SET "url=%%i" @@ -44,6 +45,7 @@ FOR /f %%i IN ("%TIME%") DO SET current_time=%%i set date=%current_date%_%current_time% echo revision=%revision% +echo revision_time=%revision_time% echo branch=%branch% echo date=%date% echo url=%url% diff --git a/build/mvn b/build/mvn index 67aa02b4f79..cd6c0c796d1 100755 --- a/build/mvn +++ b/build/mvn @@ -35,7 +35,7 @@ fi ## Arg2 - Tarball Name ## Arg3 - Checkable Binary install_app() { - local remote_tarball="$1/$2" + local remote_tarball="$1/$2$4" local local_tarball="${_DIR}/$2" local binary="${_DIR}/$3" @@ -77,12 +77,25 @@ install_mvn() { # See simple version normalization: http://stackoverflow.com/questions/16989598/bash-comparing-version-numbers function version { echo "$@" | awk -F. '{ printf("%03d%03d%03d\n", $1,$2,$3); }'; } if [ $(version $MVN_DETECTED_VERSION) -ne $(version $MVN_VERSION) ]; then - local APACHE_MIRROR=${APACHE_MIRROR:-'https://archive.apache.org/dist/'} + local APACHE_MIRROR=${APACHE_MIRROR:-'https://www.apache.org/dyn/closer.lua'} + local MIRROR_URL_QUERY="?action=download" + local MVN_TARBALL="apache-maven-${MVN_VERSION}-bin.tar.gz" + local FILE_PATH="maven/maven-3/${MVN_VERSION}/binaries" + + if [ $(command -v curl) ]; then + if ! curl -L --output /dev/null --silent --head --fail "${APACHE_MIRROR}/${FILE_PATH}/${MVN_TARBALL}${MIRROR_URL_QUERY}" ; then + # Fall back to archive.apache.org for older Maven + echo "Falling back to archive.apache.org to download Maven" + APACHE_MIRROR="https://archive.apache.org/dist" + MIRROR_URL_QUERY="" + fi + fi install_app \ - "${APACHE_MIRROR}/maven/maven-3/${MVN_VERSION}/binaries" \ - "apache-maven-${MVN_VERSION}-bin.tar.gz" \ - "apache-maven-${MVN_VERSION}/bin/mvn" + "${APACHE_MIRROR}/${FILE_PATH}" \ + "${MVN_TARBALL}" \ + "apache-maven-${MVN_VERSION}/bin/mvn" \ + "${MIRROR_URL_QUERY}" MVN_BIN="${_DIR}/apache-maven-${MVN_VERSION}/bin/mvn" fi diff --git a/build/mvnd b/build/mvnd deleted file mode 100755 index 9af3429f34b..00000000000 --- a/build/mvnd +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Determine the current working directory -_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -# Preserve the calling directory -_CALLING_DIR="$(pwd)" -# Options used during compilation -_COMPILE_JVM_OPTS="-Xms2g -Xmx2g -XX:ReservedCodeCacheSize=1g -Xss128m" - -if [ "$CI" ]; then - export MAVEN_CLI_OPTS="-Dmvnd.minThreads=8 --no-transfer-progress --errors --fail-fast -Dstyle.color=always" -fi - -# Installs any application tarball given a URL, the expected tarball name, -# and, optionally, a checkable binary path to determine if the binary has -# already been installed -## Arg1 - URL -## Arg2 - Tarball Name -## Arg3 - Checkable Binary -install_app() { - local remote_tarball="$1/$2" - local local_tarball="${_DIR}/$2" - local binary="${_DIR}/$3" - - # setup `curl` and `wget` silent options if we're running on Jenkins - local curl_opts="-L" - local wget_opts="" - curl_opts="--progress-bar ${curl_opts}" - wget_opts="--progress=bar:force ${wget_opts}" - - if [ -z "$3" ] || [ ! -f "$binary" ]; then - # check if we already have the tarball - # check if we have curl installed - # download application - rm -f "$local_tarball" - [ ! -f "${local_tarball}" ] && [ "$(command -v curl)" ] && \ - echo "exec: curl ${curl_opts} ${remote_tarball}" 1>&2 && \ - curl ${curl_opts} "${remote_tarball}" > "${local_tarball}" - # if the file still doesn't exist, lets try `wget` and cross our fingers - [ ! -f "${local_tarball}" ] && [ "$(command -v wget)" ] && \ - echo "exec: wget ${wget_opts} ${remote_tarball}" 1>&2 && \ - wget ${wget_opts} -O "${local_tarball}" "${remote_tarball}" - # if both were unsuccessful, exit - [ ! -f "${local_tarball}" ] && \ - echo -n "ERROR: Cannot download $2 with cURL or wget; " && \ - echo "please install manually and try again." && \ - exit 2 - cd "${_DIR}" && tar -xzf "$2" - rm -rf "$local_tarball" - fi -} - -function get_os_type() { - local unameOsOut=$(uname -s) - local osType - case "${unameOsOut}" in - Linux*) osType=linux ;; - Darwin*) osType=darwin ;; - CYGWIN*) osType=windows ;; - MINGW*) osType=windows ;; - *) osType="UNKNOWN:${unameOsOut}" ;; - esac - echo "$osType" -} - -function get_os_arch() { - local unameArchOut="$(uname -m)" - local arch - case "${unameArchOut}" in - x86_64*) arch=amd64 ;; - arm64*) arch=aarch64 ;; - *) arch="UNKNOWN:${unameOsOut}" ;; - esac - echo "$arch" -} - -# Determine the Mvnd version from the root pom.xml file and -# install mvnd under the build/ folder if needed. -function install_mvnd() { - local MVND_VERSION=$(grep "" "${_DIR}/../pom.xml" | head -n1 | awk -F '[<>]' '{print $3}') - local MVN_VERSION=$(grep "" "${_DIR}/../pom.xml" | head -n1 | awk -F '[<>]' '{print $3}') - MVND_BIN="$(command -v mvnd)" - if [ "$MVND_BIN" ]; then - local MVND_DETECTED_VERSION="$(mvnd -v 2>&1 | grep '(mvnd)' | awk '{print $5}')" - local MVN_DETECTED_VERSION="$(mvnd -v 2>&1 | grep 'Apache Maven' | awk 'NR==2 {print $3}')" - fi - # See simple version normalization: http://stackoverflow.com/questions/16989598/bash-comparing-version-numbers - function version { echo "$@" | awk -F. '{ printf("%03d%03d%03d\n", $1,$2,$3); }'; } - - if [ $(version $MVND_DETECTED_VERSION) -ne $(version $MVND_VERSION) ]; then - local APACHE_MIRROR=${APACHE_MIRROR:-'https://downloads.apache.org'} - local OS_TYPE=$(get_os_type) - local ARCH=$(get_os_arch) - - install_app \ - "${APACHE_MIRROR}/maven/mvnd/${MVND_VERSION}" \ - "maven-mvnd-${MVND_VERSION}-${OS_TYPE}-${ARCH}.tar.gz" \ - "maven-mvnd-${MVND_VERSION}-${OS_TYPE}-${ARCH}/bin/mvnd" - - MVND_BIN="${_DIR}/maven-mvnd-${MVND_VERSION}-${OS_TYPE}-${ARCH}/bin/mvnd" - else - if [ "$(version $MVN_DETECTED_VERSION)" -ne "$(version $MVN_VERSION)" ]; then - echo "Mvnd $MVND_DETECTED_VERSION embedded maven version $MVN_DETECTED_VERSION is not equivalent to $MVN_VERSION required in pom." - exit 1 - fi - fi -} - -install_mvnd - -cd "${_CALLING_DIR}" - -# Set any `mvn` options if not already present -export MAVEN_OPTS=${MAVEN_OPTS:-"$_COMPILE_JVM_OPTS"} - -echo "Using \`mvnd\` from path: $MVND_BIN" 1>&2 - -if [ "$MAVEN_CLI_OPTS" != "" ]; then - echo "MAVEN_CLI_OPTS=$MAVEN_CLI_OPTS" -fi - -${MVND_BIN} $MAVEN_CLI_OPTS "$@" diff --git a/build/release/release.sh b/build/release/release.sh index fefcce6a913..89ecd5230b9 100755 --- a/build/release/release.sh +++ b/build/release/release.sh @@ -52,6 +52,21 @@ if [[ ${RELEASE_VERSION} =~ .*-SNAPSHOT ]]; then exit 1 fi +if [ -n "${JAVA_HOME}" ]; then + JAVA="${JAVA_HOME}/bin/java" +elif [ "$(command -v java)" ]; then + JAVA="java" +else + echo "JAVA_HOME is not set" >&2 + exit 1 +fi + +JAVA_VERSION=$($JAVA -version 2>&1 | awk -F '"' '/version/ {print $2}') +if [[ $JAVA_VERSION != 1.8.* ]]; then + echo "Unexpected Java version: $JAVA_VERSION. Java 8 is required for release." + exit 1 +fi + RELEASE_TAG="v${RELEASE_VERSION}-rc${RELEASE_RC_NO}" SVN_STAGING_REPO="https://dist.apache.org/repos/dist/dev/kyuubi" @@ -101,6 +116,9 @@ upload_nexus_staging() { -s "${KYUUBI_DIR}/build/release/asf-settings.xml" \ -pl extensions/spark/kyuubi-extension-spark-3-2 -am ${KYUUBI_DIR}/build/mvn clean deploy -DskipTests -Papache-release,flink-provided,spark-provided,hive-provided,spark-3.3 \ + -s "${KYUUBI_DIR}/build/release/asf-settings.xml" \ + -pl extensions/spark/kyuubi-extension-spark-3-3 -am + ${KYUUBI_DIR}/build/mvn clean deploy -DskipTests -Papache-release,flink-provided,spark-provided,hive-provided,spark-3.4 \ -s "${KYUUBI_DIR}/build/release/asf-settings.xml" } diff --git a/charts/kyuubi/Chart.yaml b/charts/kyuubi/Chart.yaml index 6b377ecc5d1..56abc9edc88 100644 --- a/charts/kyuubi/Chart.yaml +++ b/charts/kyuubi/Chart.yaml @@ -20,7 +20,7 @@ name: kyuubi description: A Helm chart for Kyuubi server type: application version: 0.1.0 -appVersion: "master-snapshot" +appVersion: 1.7.3 home: https://kyuubi.apache.org icon: https://raw.githubusercontent.com/apache/kyuubi/master/docs/imgs/logo.png sources: diff --git a/charts/kyuubi/README.md b/charts/kyuubi/README.md new file mode 100644 index 00000000000..dfec578dd7b --- /dev/null +++ b/charts/kyuubi/README.md @@ -0,0 +1,57 @@ + + +# Helm Chart for Apache Kyuubi + +[Apache Kyuubi](https://kyuubi.apache.org) is a distributed and multi-tenant gateway to provide serverless SQL on Data Warehouses and Lakehouses. + + +## Introduction + +This chart will bootstrap an [Kyuubi](https://kyuubi.apache.org) deployment on a [Kubernetes](http://kubernetes.io) +cluster using the [Helm](https://helm.sh) package manager. + +## Requirements + +- Kubernetes cluster +- Helm 3.0+ + +## Template rendering + +When you want to test the template rendering, but not actually install anything. [Debugging templates](https://helm.sh/docs/chart_template_guide/debugging/) provide a quick way of viewing the generated content without YAML parse errors blocking. + +There are two ways to render templates. It will return the rendered template to you so you can see the output. + +- Local rendering chart templates +```shell +helm template --debug ../kyuubi +``` +- Server side rendering chart templates +```shell +helm install --dry-run --debug --generate-name ../kyuubi +``` + + +## Documentation + +Configuration guide documentation for Kyuubi lives [on the website](https://kyuubi.readthedocs.io/en/master/configuration/settings.html#kyuubi-configurations). (Not just for Helm Chart) + +## Contributing + +Want to help build Apache Kyuubi? Check out our [contributing documentation](https://kyuubi.readthedocs.io/en/master/community/CONTRIBUTING.html). \ No newline at end of file diff --git a/charts/kyuubi/templates/NOTES.txt b/charts/kyuubi/templates/NOTES.txt index 0da72d0ebb5..2693f5ef6ff 100644 --- a/charts/kyuubi/templates/NOTES.txt +++ b/charts/kyuubi/templates/NOTES.txt @@ -30,9 +30,13 @@ In order to check the release status, use: {{ $name | snakecase | upper }}: - To access {{ $.Release.Name }}-{{ $name | kebabcase }} service within the cluster, use the following URL: {{ $.Release.Name }}-{{ $name | kebabcase }}.{{ $.Release.Namespace }}.svc.cluster.local +{{- if $.Values.kyuubiConf.kyuubiDefaults }} +{{- if regexMatch "(^|\\s)kyuubi.frontend.bind.host\\s*=?\\s*(localhost|127\\.0\\.0\\.1)($|\\s)" $.Values.kyuubiConf.kyuubiDefaults }} - To access {{ $.Release.Name }}-{{ $name | kebabcase }} service from outside the cluster for debugging, run the following command: kubectl port-forward svc/{{ $.Release.Name }}-{{ $name | kebabcase }} {{ tpl $frontend.service.port $ }}:{{ tpl $frontend.service.port $ }} -n {{ $.Release.Namespace }} and use 127.0.0.1:{{ tpl $frontend.service.port $ }} +{{- end }} +{{- end }} {{- if eq $frontend.service.type "NodePort" }} - To access {{ $.Release.Name }}-{{ $name | kebabcase }} service from outside the cluster through configured NodePort, run the following commands: export NODE_PORT=$(kubectl get service {{ $.Release.Name }}-{{ $name | kebabcase }} -n {{ $.Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}") diff --git a/charts/kyuubi/templates/_helpers.tpl b/charts/kyuubi/templates/_helpers.tpl index cd4865a1288..502bf4646c1 100644 --- a/charts/kyuubi/templates/_helpers.tpl +++ b/charts/kyuubi/templates/_helpers.tpl @@ -17,17 +17,35 @@ {{/* A comma separated string of enabled frontend protocols, e.g. "REST,THRIFT_BINARY". -For details, see 'kyuubi.frontend.protocols': https://kyuubi.readthedocs.io/en/master/deployment/settings.html#frontend +For details, see 'kyuubi.frontend.protocols': https://kyuubi.readthedocs.io/en/master/configuration/settings.html#frontend */}} {{- define "kyuubi.frontend.protocols" -}} -{{- $protocols := list }} -{{- range $name, $frontend := .Values.server }} - {{- if $frontend.enabled }} - {{- $protocols = $name | snakecase | upper | append $protocols }} + {{- $protocols := list }} + {{- range $name, $frontend := .Values.server }} + {{- if $frontend.enabled }} + {{- $protocols = $name | snakecase | upper | append $protocols }} + {{- end }} {{- end }} + {{- if not $protocols }} + {{ fail "At least one frontend protocol must be enabled!" }} + {{- end }} + {{- $protocols | join "," }} {{- end }} -{{- if not $protocols }} - {{ fail "At least one frontend protocol must be enabled!" }} -{{- end }} -{{- $protocols | join "," }} -{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kyuubi.selectorLabels" -}} +app.kubernetes.io/name: {{ .Chart.Name }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "kyuubi.labels" -}} +helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} +{{ include "kyuubi.selectorLabels" . }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} diff --git a/charts/kyuubi/templates/kyuubi-alert.yaml b/charts/kyuubi/templates/kyuubi-alert.yaml new file mode 100644 index 00000000000..8637e9e0395 --- /dev/null +++ b/charts/kyuubi/templates/kyuubi-alert.yaml @@ -0,0 +1,28 @@ +{{/* + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + +{{- if and .Values.server.prometheus.enabled (eq .Values.metricsReporters "PROMETHEUS") .Values.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ .Release.Name }} + labels: + {{- include "kyuubi.labels" . | nindent 4 }} +spec: + groups: + {{- toYaml .Values.prometheusRule.groups | nindent 4 }} +{{- end }} diff --git a/charts/kyuubi/templates/kyuubi-configmap.yaml b/charts/kyuubi/templates/kyuubi-configmap.yaml index 7a96daaf7a8..1e5e195d399 100644 --- a/charts/kyuubi/templates/kyuubi-configmap.yaml +++ b/charts/kyuubi/templates/kyuubi-configmap.yaml @@ -1,30 +1,26 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# +{{/* + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} apiVersion: v1 kind: ConfigMap metadata: name: {{ .Release.Name }} labels: - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} - app.kubernetes.io/name: {{ .Chart.Name }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} - app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- include "kyuubi.labels" . | nindent 4 }} data: {{- with .Values.kyuubiConf.kyuubiEnv }} kyuubi-env.sh: | @@ -34,16 +30,20 @@ data: kyuubi-defaults.conf: | ## Helm chart provided Kyuubi configurations kyuubi.kubernetes.namespace={{ .Release.Namespace }} - kyuubi.frontend.bind.host=localhost + kyuubi.frontend.connection.url.use.hostname=false kyuubi.frontend.thrift.binary.bind.port={{ .Values.server.thriftBinary.port }} kyuubi.frontend.thrift.http.bind.port={{ .Values.server.thriftHttp.port }} kyuubi.frontend.rest.bind.port={{ .Values.server.rest.port }} kyuubi.frontend.mysql.bind.port={{ .Values.server.mysql.port }} kyuubi.frontend.protocols={{ include "kyuubi.frontend.protocols" . }} + # Kyuubi Metrics + kyuubi.metrics.enabled={{ .Values.server.prometheus.enabled }} + kyuubi.metrics.reporters={{ .Values.metricsReporters }} + ## User provided Kyuubi configurations {{- with .Values.kyuubiConf.kyuubiDefaults }} - {{- tpl . $ | nindent 4 }} + {{- tpl . $ | nindent 4 }} {{- end }} {{- with .Values.kyuubiConf.log4j2 }} log4j2.xml: | diff --git a/charts/kyuubi/templates/kyuubi-headless-service.yaml b/charts/kyuubi/templates/kyuubi-headless-service.yaml new file mode 100644 index 00000000000..895859bac2c --- /dev/null +++ b/charts/kyuubi/templates/kyuubi-headless-service.yaml @@ -0,0 +1,35 @@ +{{/* + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-headless + labels: + {{- include "kyuubi.labels" $ | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + {{- range $name, $frontend := .Values.server }} + - name: {{ $name | kebabcase }} + port: {{ tpl $frontend.service.port $ }} + targetPort: {{ $frontend.port }} + {{- end }} + selector: + {{- include "kyuubi.selectorLabels" $ | nindent 4 }} + diff --git a/charts/kyuubi/templates/kyuubi-podmonitor.yaml b/charts/kyuubi/templates/kyuubi-podmonitor.yaml new file mode 100644 index 00000000000..ea0f762141a --- /dev/null +++ b/charts/kyuubi/templates/kyuubi-podmonitor.yaml @@ -0,0 +1,31 @@ +{{/* + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + +{{- if and .Values.server.prometheus.enabled (eq .Values.metricsReporters "PROMETHEUS") .Values.podMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ .Release.Name }} + labels: + {{- include "kyuubi.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + app: {{ .Release.Name }} + podMetricsEndpoints: + {{- toYaml .Values.podMonitor.podMetricsEndpoint | nindent 4 }} +{{- end }} diff --git a/charts/kyuubi/templates/kyuubi-priorityclass.yaml b/charts/kyuubi/templates/kyuubi-priorityclass.yaml new file mode 100644 index 00000000000..c756108aeeb --- /dev/null +++ b/charts/kyuubi/templates/kyuubi-priorityclass.yaml @@ -0,0 +1,26 @@ +{{/* + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + +{{- if .Values.priorityClass.create }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ .Values.priorityClass.name | default .Release.Name }} + labels: + {{- include "kyuubi.labels" . | nindent 4 }} +value: {{ .Values.priorityClass.value }} +{{- end }} diff --git a/charts/kyuubi/templates/kyuubi-role.yaml b/charts/kyuubi/templates/kyuubi-role.yaml index fcb5a9f6e4f..5ee8c1dff5a 100644 --- a/charts/kyuubi/templates/kyuubi-role.yaml +++ b/charts/kyuubi/templates/kyuubi-role.yaml @@ -1,19 +1,19 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# +{{/* + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} {{- if .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 @@ -21,10 +21,6 @@ kind: Role metadata: name: {{ .Release.Name }} labels: - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} - app.kubernetes.io/name: {{ .Chart.Name }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} - app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- include "kyuubi.labels" . | nindent 4 }} rules: {{- toYaml .Values.rbac.rules | nindent 2 }} {{- end }} diff --git a/charts/kyuubi/templates/kyuubi-rolebinding.yaml b/charts/kyuubi/templates/kyuubi-rolebinding.yaml index 8f74efc2dba..0f9dbd049c0 100644 --- a/charts/kyuubi/templates/kyuubi-rolebinding.yaml +++ b/charts/kyuubi/templates/kyuubi-rolebinding.yaml @@ -1,19 +1,19 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# +{{/* + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} {{- if .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 @@ -21,11 +21,7 @@ kind: RoleBinding metadata: name: {{ .Release.Name }} labels: - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} - app.kubernetes.io/name: {{ .Chart.Name }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} - app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- include "kyuubi.labels" . | nindent 4 }} subjects: - kind: ServiceAccount name: {{ .Values.serviceAccount.name | default .Release.Name }} diff --git a/charts/kyuubi/templates/kyuubi-service.yaml b/charts/kyuubi/templates/kyuubi-service.yaml index 963f1fcc709..64c8b06ac20 100644 --- a/charts/kyuubi/templates/kyuubi-service.yaml +++ b/charts/kyuubi/templates/kyuubi-service.yaml @@ -1,19 +1,19 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# +{{/* + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} {{- range $name, $frontend := .Values.server }} {{- if $frontend.enabled }} @@ -22,14 +22,9 @@ kind: Service metadata: name: {{ $.Release.Name }}-{{ $name | kebabcase }} labels: - helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version }} - app.kubernetes.io/name: {{ $.Chart.Name }} - app.kubernetes.io/instance: {{ $.Release.Name }} - app.kubernetes.io/version: {{ $.Values.image.tag | default $.Chart.AppVersion | quote }} - app.kubernetes.io/managed-by: {{ $.Release.Service }} + {{- include "kyuubi.labels" $ | nindent 4 }} {{- with $frontend.service.annotations }} - annotations: - {{- toYaml . | nindent 4 }} + annotations: {{- toYaml . | nindent 4 }} {{- end }} spec: type: {{ $frontend.service.type }} @@ -41,8 +36,7 @@ spec: nodePort: {{ $frontend.service.nodePort }} {{- end }} selector: - app.kubernetes.io/name: {{ $.Chart.Name }} - app.kubernetes.io/instance: {{ $.Release.Name }} + {{- include "kyuubi.selectorLabels" $ | nindent 4 }} --- {{- end }} {{- end }} diff --git a/charts/kyuubi/templates/kyuubi-serviceaccount.yaml b/charts/kyuubi/templates/kyuubi-serviceaccount.yaml index 770d5013669..a8e282a1fba 100644 --- a/charts/kyuubi/templates/kyuubi-serviceaccount.yaml +++ b/charts/kyuubi/templates/kyuubi-serviceaccount.yaml @@ -1,19 +1,19 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# +{{/* + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} {{- if .Values.serviceAccount.create }} apiVersion: v1 @@ -21,9 +21,5 @@ kind: ServiceAccount metadata: name: {{ .Values.serviceAccount.name | default .Release.Name }} labels: - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} - app.kubernetes.io/name: {{ .Chart.Name }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} - app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- include "kyuubi.labels" . | nindent 4 }} {{- end }} diff --git a/charts/kyuubi/templates/kyuubi-servicemonitor.yaml b/charts/kyuubi/templates/kyuubi-servicemonitor.yaml new file mode 100644 index 00000000000..7d997fc1199 --- /dev/null +++ b/charts/kyuubi/templates/kyuubi-servicemonitor.yaml @@ -0,0 +1,31 @@ +{{/* + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} + +{{- if and .Values.server.prometheus.enabled (eq .Values.metricsReporters "PROMETHEUS") .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ .Release.Name }} + labels: + {{- include "kyuubi.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + app: {{ .Release.Name }} + endpoints: + {{- toYaml .Values.serviceMonitor.endpoints | nindent 4 }} +{{- end }} diff --git a/charts/kyuubi/templates/kyuubi-deployment.yaml b/charts/kyuubi/templates/kyuubi-statefulset.yaml similarity index 51% rename from charts/kyuubi/templates/kyuubi-deployment.yaml rename to charts/kyuubi/templates/kyuubi-statefulset.yaml index 998a877769b..626796a78d6 100644 --- a/charts/kyuubi/templates/kyuubi-deployment.yaml +++ b/charts/kyuubi/templates/kyuubi-statefulset.yaml @@ -1,48 +1,54 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# +{{/* + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} apiVersion: apps/v1 -kind: Deployment +kind: StatefulSet metadata: name: {{ .Release.Name }} labels: - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} - app.kubernetes.io/name: {{ .Chart.Name }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} - app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- include "kyuubi.labels" . | nindent 4 }} spec: - replicas: {{ .Values.replicaCount }} selector: matchLabels: - app.kubernetes.io/name: {{ .Chart.Name }} - app.kubernetes.io/instance: {{ .Release.Name }} + {{- include "kyuubi.selectorLabels" . | nindent 6 }} + serviceName: {{ .Release.Name }}-headless + minReadySeconds: {{ .Values.minReadySeconds }} + replicas: {{ .Values.replicaCount }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + {{- with .Values.updateStrategy }} + updateStrategy: {{- toYaml . | nindent 4 }} + {{- end }} template: metadata: labels: - app.kubernetes.io/name: {{ .Chart.Name }} - app.kubernetes.io/instance: {{ .Release.Name }} + {{- include "kyuubi.selectorLabels" . | nindent 8 }} annotations: checksum/conf: {{ include (print $.Template.BasePath "/kyuubi-configmap.yaml") . | sha256sum }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} + {{- if or .Values.serviceAccount.name .Values.serviceAccount.create }} serviceAccountName: {{ .Values.serviceAccount.name | default .Release.Name }} + {{- end }} + {{- if or .Values.priorityClass.name .Values.priorityClass.create }} + priorityClassName: {{ .Values.priorityClass.name | default .Release.Name }} + {{- end }} {{- with .Values.initContainers }} initContainers: {{- tpl (toYaml .) $ | nindent 8 }} {{- end }} @@ -50,6 +56,12 @@ spec: - name: kyuubi-server image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- with .Values.command }} + command: {{- tpl (toYaml .) $ | nindent 12 }} + {{- end }} + {{- with .Values.args }} + args: {{- tpl (toYaml .) $ | nindent 12 }} + {{- end }} {{- with .Values.env }} env: {{- tpl (toYaml .) $ | nindent 12 }} {{- end }} @@ -63,28 +75,28 @@ spec: containerPort: {{ $frontend.port }} {{- end }} {{- end }} - {{- if .Values.probe.liveness.enabled }} + {{- if .Values.livenessProbe.enabled }} livenessProbe: exec: command: ["/bin/bash", "-c", "bin/kyuubi status"] - initialDelaySeconds: {{ .Values.probe.liveness.initialDelaySeconds }} - periodSeconds: {{ .Values.probe.liveness.periodSeconds }} - timeoutSeconds: {{ .Values.probe.liveness.timeoutSeconds }} - failureThreshold: {{ .Values.probe.liveness.failureThreshold }} - successThreshold: {{ .Values.probe.liveness.successThreshold }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} {{- end }} - {{- if .Values.probe.readiness.enabled }} + {{- if .Values.readinessProbe.enabled }} readinessProbe: exec: command: ["/bin/bash", "-c", "$KYUUBI_HOME/bin/kyuubi status"] - initialDelaySeconds: {{ .Values.probe.readiness.initialDelaySeconds }} - periodSeconds: {{ .Values.probe.readiness.periodSeconds }} - timeoutSeconds: {{ .Values.probe.readiness.timeoutSeconds }} - failureThreshold: {{ .Values.probe.readiness.failureThreshold }} - successThreshold: {{ .Values.probe.readiness.successThreshold }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} {{- end }} {{- with .Values.resources }} - resources: {{- toYaml . | nindent 12 }} + resources: {{- toYaml . | nindent 12 }} {{- end }} volumeMounts: - name: conf diff --git a/charts/kyuubi/values.yaml b/charts/kyuubi/values.yaml index ddd16a9b7d4..cfc79fae5be 100644 --- a/charts/kyuubi/values.yaml +++ b/charts/kyuubi/values.yaml @@ -22,41 +22,59 @@ # Kyuubi server numbers replicaCount: 2 +# controls how Kyuubi server pods are created during initial scale up, +# when replacing pods on nodes, or when scaling down. +# The default policy is `OrderedReady`, alternative policy is `Parallel`. +podManagementPolicy: OrderedReady + +# Minimum number of seconds for which a newly created kyuubi server +# should be ready without any of its container crashing for it to be considered available. +minReadySeconds: 30 + +# maximum number of revisions that will be maintained in the StatefulSet's revision history. +revisionHistoryLimit: 10 + +# indicates the StatefulSetUpdateStrategy that will be employed to update Kyuubi server Pods in the StatefulSet +# when a revision is made to Template. +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + partition: 0 + image: repository: apache/kyuubi - pullPolicy: Always + pullPolicy: IfNotPresent tag: ~ imagePullSecrets: [] -# ServiceAccount used for Kyuubi create/list/delete pod in kubernetes +# ServiceAccount used for Kyuubi create/list/delete pod in Kubernetes serviceAccount: + # Specifies whether a ServiceAccount should be created create: true + # Specifies ServiceAccount name to be used (created if `create: true`) + name: ~ + +# priorityClass used for Kyuubi server pod +priorityClass: + # Specifies whether a priorityClass should be created + create: false + # Specifies priorityClass name to be used (created if `create: true`) name: ~ + # half of system-cluster-critical by default + value: 1000000000 +# Role-based access control rbac: + # Specifies whether RBAC resources should be created create: true + # RBAC rules rules: - apiGroups: [""] resources: ["pods"] verbs: ["create", "list", "delete"] -probe: - liveness: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 2 - failureThreshold: 10 - successThreshold: 1 - readiness: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 2 - failureThreshold: 10 - successThreshold: 1 - server: # Thrift Binary protocol (HiveServer2 compatible) thriftBinary: @@ -80,7 +98,7 @@ server: # REST API protocol (experimental) rest: - enabled: false + enabled: true port: 10099 service: type: ClusterIP @@ -98,22 +116,53 @@ server: nodePort: ~ annotations: {} + # Exposes metrics in Prometheus format + prometheus: + enabled: true + port: 10019 + service: + type: ClusterIP + port: "{{ .Values.server.prometheus.port }}" + nodePort: ~ + annotations: {} + +# $KYUUBI_CONF_DIR directory kyuubiConfDir: /opt/kyuubi/conf +# Kyuubi configurations files kyuubiConf: # The value (templated string) is used for kyuubi-env.sh file - # See https://kyuubi.apache.org/docs/latest/deployment/settings.html#environments for more details + # See example at conf/kyuubi-env.sh.template and https://kyuubi.readthedocs.io/en/master/configuration/settings.html#environments for more details kyuubiEnv: ~ + # kyuubiEnv: | + # export JAVA_HOME=/usr/jdk64/jdk1.8.0_152 + # export SPARK_HOME=/opt/spark + # export FLINK_HOME=/opt/flink + # export HIVE_HOME=/opt/hive # The value (templated string) is used for kyuubi-defaults.conf file - # See https://kyuubi.apache.org/docs/latest/deployment/settings.html#kyuubi-configurations for more details + # See https://kyuubi.readthedocs.io/en/master/configuration/settings.html#kyuubi-configurations for more details kyuubiDefaults: ~ + # kyuubiDefaults: | + # kyuubi.authentication=NONE + # kyuubi.frontend.bind.host=10.0.0.1 + # kyuubi.engine.type=SPARK_SQL + # kyuubi.engine.share.level=USER + # kyuubi.session.engine.initialize.timeout=PT3M + # kyuubi.ha.addresses=zk1:2181,zk2:2181,zk3:2181 + # kyuubi.ha.namespace=kyuubi # The value (templated string) is used for log4j2.xml file - # See https://kyuubi.apache.org/docs/latest/deployment/settings.html#logging for more details + # See example at conf/log4j2.xml.template https://kyuubi.readthedocs.io/en/master/configuration/settings.html#logging for more details log4j2: ~ +# Command to launch Kyuubi server (templated) +command: ~ +# Arguments to launch Kyuubi server (templated) +args: ~ + # Environment variables (templated) env: [] +# Environment variables from ConfigMaps and Secrets (templated) envFrom: [] # Additional volumes for Kyuubi pod (templated) @@ -126,21 +175,67 @@ initContainers: [] # Additional containers for Kyuubi pod (templated) containers: [] +# Resource requests and limits for Kyuubi pods resources: {} - # Used to specify resource, default unlimited. - # If you do want to specify resources: - # 1. remove the curly braces after 'resources:' - # 2. uncomment the following lines - # limits: - # cpu: 4 - # memory: 10Gi - # requests: - # cpu: 2 - # memory: 4Gi - -# Constrain Kyuubi server pods to specific nodes +# resources: +# requests: +# cpu: 2 +# memory: 4Gi +# limits: +# cpu: 4 +# memory: 10Gi + +# Liveness probe +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 2 + failureThreshold: 10 + successThreshold: 1 + +# Readiness probe +readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 2 + failureThreshold: 10 + successThreshold: 1 + +# Constrain Kyuubi pods to nodes with specific node labels nodeSelector: {} +# Allow to schedule Kyuubi pods on nodes with matching taints tolerations: [] +# Constrain Kyuubi pods to nodes by complex affinity/anti-affinity rules affinity: {} +# Kyuubi pods security context securityContext: {} + +# Monitoring Kyuubi - Server Metrics +# PROMETHEUS - PrometheusReporter which exposes metrics in Prometheus format +metricsReporters: ~ + +# Prometheus pod monitor +podMonitor: + # If enabled, podMonitor for operator's pod will be created + enabled: false + # The podMetricsEndpoint contains metrics information such as port, interval, scheme, and possibly other relevant details. + # This information is used to configure the endpoint from which Prometheus can scrape and collect metrics for a specific Pod in Kubernetes. + podMetricsEndpoint: [] + +# Prometheus service monitor +serviceMonitor: + # If enabled, ServiceMonitor resources for Prometheus Operator are created + enabled: false + # The endpoints section in a ServiceMonitor specifies the metrics information for each target endpoint. + # This allows you to collect metrics from multiple Services across your Kubernetes cluster in a standardized and automated way. + endpoints: [] + +# Rules for the Prometheus Operator +prometheusRule: + # If enabled, a PrometheusRule resource for Prometheus Operator is created + enabled: false + # Contents of Prometheus rules file + groups: [] diff --git a/conf/kyuubi-defaults.conf.template b/conf/kyuubi-defaults.conf.template index 6522e32e433..eef36ad10c3 100644 --- a/conf/kyuubi-defaults.conf.template +++ b/conf/kyuubi-defaults.conf.template @@ -21,8 +21,9 @@ # kyuubi.authentication NONE # # kyuubi.frontend.bind.host 10.0.0.1 -# kyuubi.frontend.protocols THRIFT_BINARY +# kyuubi.frontend.protocols THRIFT_BINARY,REST # kyuubi.frontend.thrift.binary.bind.port 10009 +# kyuubi.frontend.rest.bind.port 10099 # # kyuubi.engine.type SPARK_SQL # kyuubi.engine.share.level USER @@ -32,4 +33,4 @@ # kyuubi.ha.namespace kyuubi # -# Details in https://kyuubi.readthedocs.io/en/master/deployment/settings.html +# Details in https://kyuubi.readthedocs.io/en/master/configuration/settings.html diff --git a/conf/log4j2.xml.template b/conf/log4j2.xml.template index 37fc8acf036..86f9459a11e 100644 --- a/conf/log4j2.xml.template +++ b/conf/log4j2.xml.template @@ -24,6 +24,10 @@ rest-audit.log rest-audit-%d{yyyy-MM-dd}-%i.log + + k8s-audit.log + k8s-audit-%d{yyyy-MM-dd}-%i.log + @@ -39,6 +43,14 @@ + + + + + + + @@ -58,5 +70,8 @@ + + + diff --git a/dev/dependencyList b/dev/dependencyList index 7932c5cdf6b..0675f56f04a 100644 --- a/dev/dependencyList +++ b/dev/dependencyList @@ -22,34 +22,34 @@ annotations/4.1.1.4//annotations-4.1.1.4.jar antlr-runtime/3.5.3//antlr-runtime-3.5.3.jar antlr4-runtime/4.9.3//antlr4-runtime-4.9.3.jar aopalliance-repackaged/2.6.1//aopalliance-repackaged-2.6.1.jar -automaton/1.11-8//automaton-1.11-8.jar +arrow-format/12.0.0//arrow-format-12.0.0.jar +arrow-memory-core/12.0.0//arrow-memory-core-12.0.0.jar +arrow-memory-netty/12.0.0//arrow-memory-netty-12.0.0.jar +arrow-vector/12.0.0//arrow-vector-12.0.0.jar classgraph/4.8.138//classgraph-4.8.138.jar commons-codec/1.15//commons-codec-1.15.jar commons-collections/3.2.2//commons-collections-3.2.2.jar commons-lang/2.6//commons-lang-2.6.jar -commons-lang3/3.12.0//commons-lang3-3.12.0.jar +commons-lang3/3.13.0//commons-lang3-3.13.0.jar commons-logging/1.1.3//commons-logging-1.1.3.jar -curator-client/2.12.0//curator-client-2.12.0.jar -curator-framework/2.12.0//curator-framework-2.12.0.jar -curator-recipes/2.12.0//curator-recipes-2.12.0.jar derby/10.14.2.0//derby-10.14.2.0.jar error_prone_annotations/2.14.0//error_prone_annotations-2.14.0.jar failsafe/2.4.4//failsafe-2.4.4.jar failureaccess/1.0.1//failureaccess-1.0.1.jar +flatbuffers-java/1.12.0//flatbuffers-java-1.12.0.jar fliptables/1.0.2//fliptables-1.0.2.jar -generex/1.0.2//generex-1.0.2.jar -grpc-api/1.48.0//grpc-api-1.48.0.jar -grpc-context/1.48.0//grpc-context-1.48.0.jar -grpc-core/1.48.0//grpc-core-1.48.0.jar -grpc-grpclb/1.48.0//grpc-grpclb-1.48.0.jar -grpc-netty/1.48.0//grpc-netty-1.48.0.jar -grpc-protobuf-lite/1.48.0//grpc-protobuf-lite-1.48.0.jar -grpc-protobuf/1.48.0//grpc-protobuf-1.48.0.jar -grpc-stub/1.48.0//grpc-stub-1.48.0.jar +grpc-api/1.53.0//grpc-api-1.53.0.jar +grpc-context/1.53.0//grpc-context-1.53.0.jar +grpc-core/1.53.0//grpc-core-1.53.0.jar +grpc-grpclb/1.53.0//grpc-grpclb-1.53.0.jar +grpc-netty/1.53.0//grpc-netty-1.53.0.jar +grpc-protobuf-lite/1.53.0//grpc-protobuf-lite-1.53.0.jar +grpc-protobuf/1.53.0//grpc-protobuf-1.53.0.jar +grpc-stub/1.53.0//grpc-stub-1.53.0.jar gson/2.9.0//gson-2.9.0.jar -guava/31.1-jre//guava-31.1-jre.jar -hadoop-client-api/3.3.4//hadoop-client-api-3.3.4.jar -hadoop-client-runtime/3.3.4//hadoop-client-runtime-3.3.4.jar +guava/32.0.1-jre//guava-32.0.1-jre.jar +hadoop-client-api/3.3.6//hadoop-client-api-3.3.6.jar +hadoop-client-runtime/3.3.6//hadoop-client-runtime-3.3.6.jar hive-common/3.1.3//hive-common-3.1.3.jar hive-metastore/3.1.3//hive-metastore-3.1.3.jar hive-serde/3.1.3//hive-serde-3.1.3.jar @@ -65,16 +65,16 @@ httpclient/4.5.14//httpclient-4.5.14.jar httpcore/4.4.16//httpcore-4.4.16.jar httpmime/4.5.14//httpmime-4.5.14.jar j2objc-annotations/1.3//j2objc-annotations-1.3.jar -jackson-annotations/2.14.2//jackson-annotations-2.14.2.jar -jackson-core/2.14.2//jackson-core-2.14.2.jar -jackson-databind/2.14.2//jackson-databind-2.14.2.jar -jackson-dataformat-yaml/2.14.2//jackson-dataformat-yaml-2.14.2.jar -jackson-datatype-jdk8/2.14.2//jackson-datatype-jdk8-2.14.2.jar -jackson-datatype-jsr310/2.14.2//jackson-datatype-jsr310-2.14.2.jar -jackson-jaxrs-base/2.14.2//jackson-jaxrs-base-2.14.2.jar -jackson-jaxrs-json-provider/2.14.2//jackson-jaxrs-json-provider-2.14.2.jar -jackson-module-jaxb-annotations/2.14.2//jackson-module-jaxb-annotations-2.14.2.jar -jackson-module-scala_2.12/2.14.2//jackson-module-scala_2.12-2.14.2.jar +jackson-annotations/2.15.0//jackson-annotations-2.15.0.jar +jackson-core/2.15.0//jackson-core-2.15.0.jar +jackson-databind/2.15.0//jackson-databind-2.15.0.jar +jackson-dataformat-yaml/2.15.0//jackson-dataformat-yaml-2.15.0.jar +jackson-datatype-jdk8/2.15.0//jackson-datatype-jdk8-2.15.0.jar +jackson-datatype-jsr310/2.15.0//jackson-datatype-jsr310-2.15.0.jar +jackson-jaxrs-base/2.15.0//jackson-jaxrs-base-2.15.0.jar +jackson-jaxrs-json-provider/2.15.0//jackson-jaxrs-json-provider-2.15.0.jar +jackson-module-jaxb-annotations/2.15.0//jackson-module-jaxb-annotations-2.15.0.jar +jackson-module-scala_2.12/2.15.0//jackson-module-scala_2.12-2.15.0.jar jakarta.annotation-api/1.3.5//jakarta.annotation-api-1.3.5.jar jakarta.inject/2.6.1//jakarta.inject-2.6.1.jar jakarta.servlet-api/4.0.4//jakarta.servlet-api-4.0.4.jar @@ -83,77 +83,85 @@ jakarta.ws.rs-api/2.1.6//jakarta.ws.rs-api-2.1.6.jar jakarta.xml.bind-api/2.3.2//jakarta.xml.bind-api-2.3.2.jar javassist/3.25.0-GA//javassist-3.25.0-GA.jar jcl-over-slf4j/1.7.36//jcl-over-slf4j-1.7.36.jar -jersey-client/2.39//jersey-client-2.39.jar -jersey-common/2.39//jersey-common-2.39.jar -jersey-container-servlet-core/2.39//jersey-container-servlet-core-2.39.jar -jersey-entity-filtering/2.39//jersey-entity-filtering-2.39.jar -jersey-hk2/2.39//jersey-hk2-2.39.jar -jersey-media-json-jackson/2.39//jersey-media-json-jackson-2.39.jar -jersey-media-multipart/2.39//jersey-media-multipart-2.39.jar -jersey-server/2.39//jersey-server-2.39.jar +jersey-client/2.39.1//jersey-client-2.39.1.jar +jersey-common/2.39.1//jersey-common-2.39.1.jar +jersey-container-servlet-core/2.39.1//jersey-container-servlet-core-2.39.1.jar +jersey-entity-filtering/2.39.1//jersey-entity-filtering-2.39.1.jar +jersey-hk2/2.39.1//jersey-hk2-2.39.1.jar +jersey-media-json-jackson/2.39.1//jersey-media-json-jackson-2.39.1.jar +jersey-media-multipart/2.39.1//jersey-media-multipart-2.39.1.jar +jersey-server/2.39.1//jersey-server-2.39.1.jar jetcd-api/0.7.3//jetcd-api-0.7.3.jar jetcd-common/0.7.3//jetcd-common-0.7.3.jar jetcd-core/0.7.3//jetcd-core-0.7.3.jar jetcd-grpc/0.7.3//jetcd-grpc-0.7.3.jar -jetty-http/9.4.50.v20221201//jetty-http-9.4.50.v20221201.jar -jetty-io/9.4.50.v20221201//jetty-io-9.4.50.v20221201.jar -jetty-security/9.4.50.v20221201//jetty-security-9.4.50.v20221201.jar -jetty-server/9.4.50.v20221201//jetty-server-9.4.50.v20221201.jar -jetty-servlet/9.4.50.v20221201//jetty-servlet-9.4.50.v20221201.jar -jetty-util-ajax/9.4.50.v20221201//jetty-util-ajax-9.4.50.v20221201.jar -jetty-util/9.4.50.v20221201//jetty-util-9.4.50.v20221201.jar +jetty-client/9.4.52.v20230823//jetty-client-9.4.52.v20230823.jar +jetty-http/9.4.52.v20230823//jetty-http-9.4.52.v20230823.jar +jetty-io/9.4.52.v20230823//jetty-io-9.4.52.v20230823.jar +jetty-proxy/9.4.52.v20230823//jetty-proxy-9.4.52.v20230823.jar +jetty-security/9.4.52.v20230823//jetty-security-9.4.52.v20230823.jar +jetty-server/9.4.52.v20230823//jetty-server-9.4.52.v20230823.jar +jetty-servlet/9.4.52.v20230823//jetty-servlet-9.4.52.v20230823.jar +jetty-util-ajax/9.4.52.v20230823//jetty-util-ajax-9.4.52.v20230823.jar +jetty-util/9.4.52.v20230823//jetty-util-9.4.52.v20230823.jar jline/0.9.94//jline-0.9.94.jar jul-to-slf4j/1.7.36//jul-to-slf4j-1.7.36.jar -kubernetes-client/5.12.1//kubernetes-client-5.12.1.jar -kubernetes-model-admissionregistration/5.12.1//kubernetes-model-admissionregistration-5.12.1.jar -kubernetes-model-apiextensions/5.12.1//kubernetes-model-apiextensions-5.12.1.jar -kubernetes-model-apps/5.12.1//kubernetes-model-apps-5.12.1.jar -kubernetes-model-autoscaling/5.12.1//kubernetes-model-autoscaling-5.12.1.jar -kubernetes-model-batch/5.12.1//kubernetes-model-batch-5.12.1.jar -kubernetes-model-certificates/5.12.1//kubernetes-model-certificates-5.12.1.jar -kubernetes-model-common/5.12.1//kubernetes-model-common-5.12.1.jar -kubernetes-model-coordination/5.12.1//kubernetes-model-coordination-5.12.1.jar -kubernetes-model-core/5.12.1//kubernetes-model-core-5.12.1.jar -kubernetes-model-discovery/5.12.1//kubernetes-model-discovery-5.12.1.jar -kubernetes-model-events/5.12.1//kubernetes-model-events-5.12.1.jar -kubernetes-model-extensions/5.12.1//kubernetes-model-extensions-5.12.1.jar -kubernetes-model-flowcontrol/5.12.1//kubernetes-model-flowcontrol-5.12.1.jar -kubernetes-model-metrics/5.12.1//kubernetes-model-metrics-5.12.1.jar -kubernetes-model-networking/5.12.1//kubernetes-model-networking-5.12.1.jar -kubernetes-model-node/5.12.1//kubernetes-model-node-5.12.1.jar -kubernetes-model-policy/5.12.1//kubernetes-model-policy-5.12.1.jar -kubernetes-model-rbac/5.12.1//kubernetes-model-rbac-5.12.1.jar -kubernetes-model-scheduling/5.12.1//kubernetes-model-scheduling-5.12.1.jar -kubernetes-model-storageclass/5.12.1//kubernetes-model-storageclass-5.12.1.jar +kafka-clients/3.4.0//kafka-clients-3.4.0.jar +kubernetes-client-api/6.8.1//kubernetes-client-api-6.8.1.jar +kubernetes-client/6.8.1//kubernetes-client-6.8.1.jar +kubernetes-httpclient-okhttp/6.8.1//kubernetes-httpclient-okhttp-6.8.1.jar +kubernetes-model-admissionregistration/6.8.1//kubernetes-model-admissionregistration-6.8.1.jar +kubernetes-model-apiextensions/6.8.1//kubernetes-model-apiextensions-6.8.1.jar +kubernetes-model-apps/6.8.1//kubernetes-model-apps-6.8.1.jar +kubernetes-model-autoscaling/6.8.1//kubernetes-model-autoscaling-6.8.1.jar +kubernetes-model-batch/6.8.1//kubernetes-model-batch-6.8.1.jar +kubernetes-model-certificates/6.8.1//kubernetes-model-certificates-6.8.1.jar +kubernetes-model-common/6.8.1//kubernetes-model-common-6.8.1.jar +kubernetes-model-coordination/6.8.1//kubernetes-model-coordination-6.8.1.jar +kubernetes-model-core/6.8.1//kubernetes-model-core-6.8.1.jar +kubernetes-model-discovery/6.8.1//kubernetes-model-discovery-6.8.1.jar +kubernetes-model-events/6.8.1//kubernetes-model-events-6.8.1.jar +kubernetes-model-extensions/6.8.1//kubernetes-model-extensions-6.8.1.jar +kubernetes-model-flowcontrol/6.8.1//kubernetes-model-flowcontrol-6.8.1.jar +kubernetes-model-gatewayapi/6.8.1//kubernetes-model-gatewayapi-6.8.1.jar +kubernetes-model-metrics/6.8.1//kubernetes-model-metrics-6.8.1.jar +kubernetes-model-networking/6.8.1//kubernetes-model-networking-6.8.1.jar +kubernetes-model-node/6.8.1//kubernetes-model-node-6.8.1.jar +kubernetes-model-policy/6.8.1//kubernetes-model-policy-6.8.1.jar +kubernetes-model-rbac/6.8.1//kubernetes-model-rbac-6.8.1.jar +kubernetes-model-resource/6.8.1//kubernetes-model-resource-6.8.1.jar +kubernetes-model-scheduling/6.8.1//kubernetes-model-scheduling-6.8.1.jar +kubernetes-model-storageclass/6.8.1//kubernetes-model-storageclass-6.8.1.jar libfb303/0.9.3//libfb303-0.9.3.jar libthrift/0.9.3//libthrift-0.9.3.jar -log4j-1.2-api/2.19.0//log4j-1.2-api-2.19.0.jar -log4j-api/2.19.0//log4j-api-2.19.0.jar -log4j-core/2.19.0//log4j-core-2.19.0.jar -log4j-slf4j-impl/2.19.0//log4j-slf4j-impl-2.19.0.jar +log4j-1.2-api/2.20.0//log4j-1.2-api-2.20.0.jar +log4j-api/2.20.0//log4j-api-2.20.0.jar +log4j-core/2.20.0//log4j-core-2.20.0.jar +log4j-slf4j-impl/2.20.0//log4j-slf4j-impl-2.20.0.jar logging-interceptor/3.12.12//logging-interceptor-3.12.12.jar +lz4-java/1.8.0//lz4-java-1.8.0.jar metrics-core/4.2.8//metrics-core-4.2.8.jar metrics-jmx/4.2.8//metrics-jmx-4.2.8.jar metrics-json/4.2.8//metrics-json-4.2.8.jar metrics-jvm/4.2.8//metrics-jvm-4.2.8.jar mimepull/1.9.15//mimepull-1.9.15.jar -netty-all/4.1.87.Final//netty-all-4.1.87.Final.jar -netty-buffer/4.1.87.Final//netty-buffer-4.1.87.Final.jar -netty-codec-dns/4.1.87.Final//netty-codec-dns-4.1.87.Final.jar -netty-codec-http/4.1.87.Final//netty-codec-http-4.1.87.Final.jar -netty-codec-http2/4.1.87.Final//netty-codec-http2-4.1.87.Final.jar -netty-codec-socks/4.1.87.Final//netty-codec-socks-4.1.87.Final.jar -netty-codec/4.1.87.Final//netty-codec-4.1.87.Final.jar -netty-common/4.1.87.Final//netty-common-4.1.87.Final.jar -netty-handler-proxy/4.1.87.Final//netty-handler-proxy-4.1.87.Final.jar -netty-handler/4.1.87.Final//netty-handler-4.1.87.Final.jar -netty-resolver-dns/4.1.87.Final//netty-resolver-dns-4.1.87.Final.jar -netty-resolver/4.1.87.Final//netty-resolver-4.1.87.Final.jar -netty-transport-classes-epoll/4.1.87.Final//netty-transport-classes-epoll-4.1.87.Final.jar -netty-transport-native-epoll/4.1.87.Final/linux-aarch_64/netty-transport-native-epoll-4.1.87.Final-linux-aarch_64.jar -netty-transport-native-epoll/4.1.87.Final/linux-x86_64/netty-transport-native-epoll-4.1.87.Final-linux-x86_64.jar -netty-transport-native-unix-common/4.1.87.Final//netty-transport-native-unix-common-4.1.87.Final.jar -netty-transport/4.1.87.Final//netty-transport-4.1.87.Final.jar +netty-all/4.1.93.Final//netty-all-4.1.93.Final.jar +netty-buffer/4.1.93.Final//netty-buffer-4.1.93.Final.jar +netty-codec-dns/4.1.93.Final//netty-codec-dns-4.1.93.Final.jar +netty-codec-http/4.1.93.Final//netty-codec-http-4.1.93.Final.jar +netty-codec-http2/4.1.93.Final//netty-codec-http2-4.1.93.Final.jar +netty-codec-socks/4.1.93.Final//netty-codec-socks-4.1.93.Final.jar +netty-codec/4.1.93.Final//netty-codec-4.1.93.Final.jar +netty-common/4.1.93.Final//netty-common-4.1.93.Final.jar +netty-handler-proxy/4.1.93.Final//netty-handler-proxy-4.1.93.Final.jar +netty-handler/4.1.93.Final//netty-handler-4.1.93.Final.jar +netty-resolver-dns/4.1.93.Final//netty-resolver-dns-4.1.93.Final.jar +netty-resolver/4.1.93.Final//netty-resolver-4.1.93.Final.jar +netty-transport-classes-epoll/4.1.93.Final//netty-transport-classes-epoll-4.1.93.Final.jar +netty-transport-native-epoll/4.1.93.Final/linux-aarch_64/netty-transport-native-epoll-4.1.93.Final-linux-aarch_64.jar +netty-transport-native-epoll/4.1.93.Final/linux-x86_64/netty-transport-native-epoll-4.1.93.Final-linux-x86_64.jar +netty-transport-native-unix-common/4.1.93.Final//netty-transport-native-unix-common-4.1.93.Final.jar +netty-transport/4.1.93.Final//netty-transport-4.1.93.Final.jar okhttp-urlconnection/3.14.9//okhttp-urlconnection-3.14.9.jar okhttp/3.12.12//okhttp-3.12.12.jar okio/1.15.0//okio-1.15.0.jar @@ -163,7 +171,7 @@ perfmark-api/0.25.0//perfmark-api-0.25.0.jar proto-google-common-protos/2.9.0//proto-google-common-protos-2.9.0.jar protobuf-java-util/3.21.7//protobuf-java-util-3.21.7.jar protobuf-java/3.21.7//protobuf-java-3.21.7.jar -scala-library/2.12.17//scala-library-2.12.17.jar +scala-library/2.12.18//scala-library-2.12.18.jar scopt_2.12/4.1.0//scopt_2.12-4.1.0.jar simpleclient/0.16.0//simpleclient-0.16.0.jar simpleclient_common/0.16.0//simpleclient_common-0.16.0.jar @@ -174,7 +182,10 @@ simpleclient_tracer_common/0.16.0//simpleclient_tracer_common-0.16.0.jar simpleclient_tracer_otel/0.16.0//simpleclient_tracer_otel-0.16.0.jar simpleclient_tracer_otel_agent/0.16.0//simpleclient_tracer_otel_agent-0.16.0.jar slf4j-api/1.7.36//slf4j-api-1.7.36.jar -snakeyaml/1.33//snakeyaml-1.33.jar +snakeyaml-engine/2.6//snakeyaml-engine-2.6.jar +snakeyaml/2.2//snakeyaml-2.2.jar +snappy-java/1.1.8.4//snappy-java-1.1.8.4.jar +sqlite-jdbc/3.42.0.0//sqlite-jdbc-3.42.0.0.jar swagger-annotations/2.2.1//swagger-annotations-2.2.1.jar swagger-core/2.2.1//swagger-core-2.2.1.jar swagger-integration/2.2.1//swagger-integration-2.2.1.jar @@ -186,4 +197,4 @@ units/1.6//units-1.6.jar vertx-core/4.3.2//vertx-core-4.3.2.jar vertx-grpc/4.3.2//vertx-grpc-4.3.2.jar zjsonpatch/0.3.0//zjsonpatch-0.3.0.jar -zookeeper/3.4.14//zookeeper-3.4.14.jar +zstd-jni/1.5.2-1//zstd-jni-1.5.2-1.jar diff --git a/dev/gen/gen_all_config_docs.sh b/dev/gen/gen_all_config_docs.sh new file mode 100755 index 00000000000..2a5dca7f952 --- /dev/null +++ b/dev/gen/gen_all_config_docs.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Golden result file: +# docs/deployment/settings.md + +KYUUBI_UPDATE="${KYUUBI_UPDATE:-1}" \ +build/mvn clean test \ + -pl kyuubi-server -am \ + -Pflink-provided,spark-provided,hive-provided \ + -Dtest=none \ + -DwildcardSuites=org.apache.kyuubi.config.AllKyuubiConfiguration diff --git a/dev/gen/gen_hive_kdf_docs.sh b/dev/gen/gen_hive_kdf_docs.sh new file mode 100755 index 00000000000..b670dc3c531 --- /dev/null +++ b/dev/gen/gen_hive_kdf_docs.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Golden result file: +# docs/extensions/engines/hive/functions.md + +KYUUBI_UPDATE="${KYUUBI_UPDATE:-1}" \ +build/mvn clean test \ + -pl externals/kyuubi-hive-sql-engine -am \ + -Pflink-provided,spark-provided,hive-provided \ + -DwildcardSuites=org.apache.kyuubi.engine.hive.udf.KyuubiDefinedFunctionSuite diff --git a/dev/gen/gen_ranger_policy_json.sh b/dev/gen/gen_ranger_policy_json.sh new file mode 100755 index 00000000000..1f4193d3e1f --- /dev/null +++ b/dev/gen/gen_ranger_policy_json.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Golden result file: +# extensions/spark/kyuubi-spark-authz/src/test/resources/sparkSql_hive_jenkins.json + +KYUUBI_UPDATE="${KYUUBI_UPDATE:-1}" \ +build/mvn clean test \ + -pl extensions/spark/kyuubi-spark-authz \ + -Pgen-policy \ + -Dtest=none \ + -DwildcardSuites=org.apache.kyuubi.plugin.spark.authz.gen.PolicyJsonFileGenerator diff --git a/dev/gen/gen_ranger_spec_json.sh b/dev/gen/gen_ranger_spec_json.sh new file mode 100755 index 00000000000..e00857f8f23 --- /dev/null +++ b/dev/gen/gen_ranger_spec_json.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Golden result file: +# extensions/spark/kyuubi-spark-authz/src/main/resources/*_spec.json + +KYUUBI_UPDATE="${KYUUBI_UPDATE:-1}" \ +build/mvn clean test \ + -pl extensions/spark/kyuubi-spark-authz \ + -Pgen-policy \ + -Dtest=none \ + -DwildcardSuites=org.apache.kyuubi.plugin.spark.authz.gen.JsonSpecFileGenerator diff --git a/dev/gen/gen_spark_kdf_docs.sh b/dev/gen/gen_spark_kdf_docs.sh new file mode 100755 index 00000000000..ac13082e31e --- /dev/null +++ b/dev/gen/gen_spark_kdf_docs.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Golden result file: +# docs/extensions/engines/spark/functions.md + +KYUUBI_UPDATE="${KYUUBI_UPDATE:-1}" \ +build/mvn clean test \ + -pl externals/kyuubi-spark-sql-engine -am \ + -Pflink-provided,spark-provided,hive-provided \ + -DwildcardSuites=org.apache.kyuubi.engine.spark.udf.KyuubiDefinedFunctionSuite diff --git a/dev/gen/gen_tpcds_output_schema.sh b/dev/gen/gen_tpcds_output_schema.sh new file mode 100755 index 00000000000..49f8d77988a --- /dev/null +++ b/dev/gen/gen_tpcds_output_schema.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Golden result file: +# extensions/spark/kyuubi-spark-authz/src/test/resources/*.output.schema + +KYUUBI_UPDATE="${KYUUBI_UPDATE:-1}" \ +build/mvn clean install \ + -pl kyuubi-server -am \ + -Dmaven.plugin.scalatest.exclude.tags="" \ + -Dtest=none \ + -DwildcardSuites=org.apache.kyuubi.operation.tpcds.OutputSchemaTPCDSSuite diff --git a/dev/gen/gen_tpcds_queries.sh b/dev/gen/gen_tpcds_queries.sh new file mode 100755 index 00000000000..07f075b7a88 --- /dev/null +++ b/dev/gen/gen_tpcds_queries.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Golden result file: +# kyuubi-spark-connector-tpcds/src/main/resources/kyuubi/tpcds_*/*.sql + +KYUUBI_UPDATE="${KYUUBI_UPDATE:-1}" \ +build/mvn clean install \ + -pl extensions/spark/kyuubi-spark-connector-tpcds -am \ + -Dmaven.plugin.scalatest.exclude.tags="" \ + -Dtest=none \ + -DwildcardSuites=org.apache.kyuubi.spark.connector.tpcds.TPCDSQuerySuite diff --git a/dev/gen/gen_tpch_queries.sh b/dev/gen/gen_tpch_queries.sh new file mode 100755 index 00000000000..d0c65256f01 --- /dev/null +++ b/dev/gen/gen_tpch_queries.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Golden result file: +# kyuubi-spark-connector-tpcds/src/main/resources/kyuubi/tpcdh_*/*.sql + +KYUUBI_UPDATE="${KYUUBI_UPDATE:-1}" \ +build/mvn clean install \ + -pl extensions/spark/kyuubi-spark-connector-tpch -am \ + -Dmaven.plugin.scalatest.exclude.tags="" \ + -Dtest=none \ + -DwildcardSuites=org.apache.kyuubi.spark.connector.tpch.TPCHQuerySuite diff --git a/dev/kyuubi-codecov/pom.xml b/dev/kyuubi-codecov/pom.xml index ba15ec0f823..31b9d27bc03 100644 --- a/dev/kyuubi-codecov/pom.xml +++ b/dev/kyuubi-codecov/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../pom.xml - kyuubi-codecov_2.12 + kyuubi-codecov_${scala.binary.version} pom Kyuubi Dev Code Coverage https://kyuubi.apache.org/ @@ -199,7 +199,17 @@ org.apache.kyuubi - kyuubi-spark-connector-kudu_${scala.binary.version} + kyuubi-spark-connector-hive_${scala.binary.version} + ${project.version} + + + + + spark-3.4 + + + org.apache.kyuubi + kyuubi-extension-spark-3-4_${scala.binary.version} ${project.version} diff --git a/dev/kyuubi-tpcds/pom.xml b/dev/kyuubi-tpcds/pom.xml index 1bc69f9f2ce..b80c1227fc2 100644 --- a/dev/kyuubi-tpcds/pom.xml +++ b/dev/kyuubi-tpcds/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../pom.xml - kyuubi-tpcds_2.12 + kyuubi-tpcds_${scala.binary.version} jar Kyuubi Dev TPCDS Generator https://kyuubi.apache.org/ diff --git a/dev/merge_kyuubi_pr.py b/dev/merge_kyuubi_pr.py index cb3696d1f98..fe889374867 100755 --- a/dev/merge_kyuubi_pr.py +++ b/dev/merge_kyuubi_pr.py @@ -30,9 +30,9 @@ import re import subprocess import sys -from urllib.request import urlopen -from urllib.request import Request from urllib.error import HTTPError +from urllib.request import Request +from urllib.request import urlopen KYUUBI_HOME = os.environ.get("KYUUBI_HOME", os.getcwd()) PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache") @@ -248,6 +248,8 @@ def main(): user_login = pr["user"]["login"] base_ref = pr["head"]["ref"] pr_repo_desc = "%s/%s" % (user_login, base_ref) + assignees = pr["assignees"] + milestone = pr["milestone"] # Merged pull requests don't appear as merged in the GitHub API; # Instead, they're closed by asfgit. @@ -276,6 +278,17 @@ def main(): print("\n=== Pull Request #%s ===" % pr_num) print("title:\t%s\nsource:\t%s\ntarget:\t%s\nurl:\t%s\nbody:\n\n%s" % (title, pr_repo_desc, target_ref, url, body)) + + if assignees is None or len(assignees)==0: + continue_maybe("Assignees have NOT been set. Continue?") + else: + print("assignees: %s" % [assignee["login"] for assignee in assignees]) + + if milestone is None: + continue_maybe("Milestone has NOT been set. Continue?") + else: + print("milestone: %s" % milestone["title"]) + continue_maybe("Proceed with merging pull request #%s?" % pr_num) merged_refs = [target_ref] diff --git a/dev/reformat b/dev/reformat index 7c6ef712485..6346e68f68d 100755 --- a/dev/reformat +++ b/dev/reformat @@ -20,7 +20,7 @@ set -x KYUUBI_HOME="$(cd "`dirname "$0"`/.."; pwd)" -PROFILES="-Pflink-provided,hive-provided,spark-provided,spark-block-cleaner,spark-3.3,spark-3.2,spark-3.1,tpcds" +PROFILES="-Pflink-provided,hive-provided,spark-provided,spark-block-cleaner,spark-3.4,spark-3.3,spark-3.2,spark-3.1,tpcds" # python style checks rely on `black` in path if ! command -v black &> /dev/null diff --git a/docker/kyuubi-configmap.yaml b/docker/kyuubi-configmap.yaml index 13835493b8f..6a6d430ce58 100644 --- a/docker/kyuubi-configmap.yaml +++ b/docker/kyuubi-configmap.yaml @@ -52,4 +52,4 @@ data: # kyuubi.frontend.bind.port 10009 # - # Details in https://kyuubi.apache.org/docs/latest/deployment/settings.html + # Details in https://kyuubi.readthedocs.io/en/master/configuration/settings.html diff --git a/docker/playground/.env b/docker/playground/.env index abd897192d9..24284bd39fa 100644 --- a/docker/playground/.env +++ b/docker/playground/.env @@ -15,16 +15,16 @@ # limitations under the License. # -AWS_JAVA_SDK_VERSION=1.12.239 -HADOOP_VERSION=3.3.1 +AWS_JAVA_SDK_VERSION=1.12.367 +HADOOP_VERSION=3.3.6 HIVE_VERSION=2.3.9 -ICEBERG_VERSION=1.1.0 -KYUUBI_VERSION=1.6.1-incubating -KYUUBI_HADOOP_VERSION=3.3.4 +ICEBERG_VERSION=1.3.1 +KYUUBI_VERSION=1.7.3 +KYUUBI_HADOOP_VERSION=3.3.5 POSTGRES_VERSION=12 POSTGRES_JDBC_VERSION=42.3.4 SCALA_BINARY_VERSION=2.12 -SPARK_VERSION=3.3.2 +SPARK_VERSION=3.3.3 SPARK_BINARY_VERSION=3.3 SPARK_HADOOP_VERSION=3.3.2 ZOOKEEPER_VERSION=3.6.3 diff --git a/docker/playground/compose.yml b/docker/playground/compose.yml index 069624ee2a9..362b3505be1 100644 --- a/docker/playground/compose.yml +++ b/docker/playground/compose.yml @@ -17,11 +17,11 @@ services: minio: - image: alekcander/bitnami-minio-multiarch:RELEASE.2022-05-26T05-48-41Z + image: bitnami/minio:2023-debian-11 environment: MINIO_ROOT_USER: minio MINIO_ROOT_PASSWORD: minio_minio - MINIO_DEFAULT_BUCKETS: spark-bucket,iceberg-bucket + MINIO_DEFAULT_BUCKETS: spark-bucket container_name: minio hostname: minio ports: @@ -68,6 +68,7 @@ services: ports: - 4040-4050:4040-4050 - 10009:10009 + - 10099:10099 volumes: - ./conf/core-site.xml:/etc/hadoop/conf/core-site.xml - ./conf/hive-site.xml:/etc/hive/conf/hive-site.xml diff --git a/docker/playground/conf/kyuubi-defaults.conf b/docker/playground/conf/kyuubi-defaults.conf index 4906c5de4c0..e4a674634d4 100644 --- a/docker/playground/conf/kyuubi-defaults.conf +++ b/docker/playground/conf/kyuubi-defaults.conf @@ -18,8 +18,10 @@ ## Kyuubi Configurations kyuubi.authentication=NONE -kyuubi.frontend.thrift.binary.bind.host=0.0.0.0 +kyuubi.frontend.bind.host=0.0.0.0 +kyuubi.frontend.protocols=THRIFT_BINARY,REST kyuubi.frontend.thrift.binary.bind.port=10009 +kyuubi.frontend.rest.bind.port=10099 kyuubi.ha.addresses=zookeeper:2181 kyuubi.session.engine.idle.timeout=PT5M kyuubi.operation.incremental.collect=true @@ -28,4 +30,4 @@ kyuubi.operation.progress.enabled=true kyuubi.engine.session.initialize.sql \ show namespaces in tpcds; \ show namespaces in tpch; \ - show namespaces in postgres; + show namespaces in postgres diff --git a/docker/playground/conf/spark-defaults.conf b/docker/playground/conf/spark-defaults.conf index 9d1d4a6028b..7983b5e705c 100644 --- a/docker/playground/conf/spark-defaults.conf +++ b/docker/playground/conf/spark-defaults.conf @@ -38,7 +38,3 @@ spark.sql.catalog.postgres.url=jdbc:postgresql://postgres:5432/metastore spark.sql.catalog.postgres.driver=org.postgresql.Driver spark.sql.catalog.postgres.user=postgres spark.sql.catalog.postgres.password=postgres - -spark.sql.catalog.iceberg=org.apache.iceberg.spark.SparkCatalog -spark.sql.catalog.iceberg.type=hadoop -spark.sql.catalog.iceberg.warehouse=s3a://iceberg-bucket/iceberg-warehouse diff --git a/docker/playground/image/kyuubi-playground-base.Dockerfile b/docker/playground/image/kyuubi-playground-base.Dockerfile index 6ee4ed40519..e8375eb68b8 100644 --- a/docker/playground/image/kyuubi-playground-base.Dockerfile +++ b/docker/playground/image/kyuubi-playground-base.Dockerfile @@ -20,4 +20,4 @@ RUN set -x && \ mkdir /opt/busybox && \ busybox --install /opt/busybox -ENV PATH=/opt/java/openjdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/busybox +ENV PATH=${PATH}:/opt/busybox diff --git a/docs/appendix/terminology.md b/docs/appendix/terminology.md index b81fa25fe87..b349d77c7bd 100644 --- a/docs/appendix/terminology.md +++ b/docs/appendix/terminology.md @@ -129,9 +129,9 @@ As an enterprise service, SLA commitment is essential. Deploying Kyuubi in High

-## DataLake & LakeHouse +## DataLake & Lakehouse -Kyuubi unifies DataLake & LakeHouse access in the simplest pure SQL way, meanwhile it's also the securest way with authentication and SQL standard authorization. +Kyuubi unifies DataLake & Lakehouse access in the simplest pure SQL way, meanwhile it's also the securest way with authentication and SQL standard authorization. ### Apache Iceberg diff --git a/docs/client/advanced/kerberos.md b/docs/client/advanced/kerberos.md index 4962dd2c8b2..a9cb5581227 100644 --- a/docs/client/advanced/kerberos.md +++ b/docs/client/advanced/kerberos.md @@ -242,5 +242,5 @@ jdbc:hive2://:/;kyuubiServerPrinc - `principal` is inherited from Hive JDBC Driver and is a little ambiguous, and we could use `kyuubiServerPrincipal` as its alias. - `kyuubi_server_principal` is the value of `kyuubi.kinit.principal` set in `kyuubi-defaults.conf`. - As a command line argument, JDBC URL should be quoted to avoid being split into 2 commands by ";". -- As to DBeaver, `;principal=` should be set as the `Database/Schema` argument. +- As to DBeaver, `;principal=` or `;kyuubiServerPrincipal=` should be set as the `Database/Schema` argument. diff --git a/docs/client/jdbc/hive_jdbc.md b/docs/client/jdbc/hive_jdbc.md index 42d2f7b5a33..00498dfaa01 100644 --- a/docs/client/jdbc/hive_jdbc.md +++ b/docs/client/jdbc/hive_jdbc.md @@ -19,14 +19,18 @@ ## Instructions -Kyuubi does not provide its own JDBC Driver so far, -as it is fully compatible with Hive JDBC and ODBC drivers that let you connect to popular Business Intelligence (BI) tools to query, -analyze and visualize data though Spark SQL engines. +Kyuubi is fully compatible with Hive JDBC and ODBC drivers that let you connect to popular Business Intelligence (BI) +tools to query, analyze and visualize data though Spark SQL engines. + +It's recommended to use [Kyuubi JDBC driver](./kyuubi_jdbc.html) for new applications. ## Install Hive JDBC For programing, the easiest way to get `hive-jdbc` is from [the maven central](https://mvnrepository.com/artifact/org.apache.hive/hive-jdbc). For example, +The following sections demonstrate how to use Hive JDBC driver 2.3.8 to connect Kyuubi Server, actually, any version +less or equals 3.1.x should work fine. + - **maven** ```xml @@ -76,7 +80,3 @@ jdbc:hive2://:/;?#<[spark|hive]Var jdbc:hive2://localhost:10009/default;hive.server2.proxy.user=proxy_user?kyuubi.engine.share.level=CONNECTION;spark.ui.enabled=false#var_x=y ``` -## Unsupported Hive Features - -- Connect to HiveServer2 using HTTP transport. ```transportMode=http``` - diff --git a/docs/client/jdbc/kyuubi_jdbc.rst b/docs/client/jdbc/kyuubi_jdbc.rst index fdc40d599eb..d4270ea8ac6 100644 --- a/docs/client/jdbc/kyuubi_jdbc.rst +++ b/docs/client/jdbc/kyuubi_jdbc.rst @@ -17,14 +17,14 @@ Kyuubi Hive JDBC Driver ======================= .. versionadded:: 1.4.0 - Since 1.4.0, kyuubi community maintains a forked hive jdbc driver module and provides both shaded and non-shaded packages. + Kyuubi community maintains a forked Hive JDBC driver module and provides both shaded and non-shaded packages. -This packages aims to support some missing functionalities of the original hive jdbc. -For kyuubi engines that support multiple catalogs, it provides meta APIs for better support. -The behaviors of the original hive jdbc have remained. +This packages aims to support some missing functionalities of the original Hive JDBC driver. +For Kyuubi engines that support multiple catalogs, it provides meta APIs for better support. +The behaviors of the original Hive JDBC driver have remained. -To access a Hive data warehouse or new lakehouse formats, such as Apache Iceberg/Hudi, delta lake using the kyuubi jdbc driver for Apache kyuubi, you need to configure -the following: +To access a Hive data warehouse or new Lakehouse formats, such as Apache Iceberg/Hudi, Delta Lake using the Kyuubi JDBC driver +for Apache kyuubi, you need to configure the following: - The list of driver library files - :ref:`referencing-libraries`. - The Driver or DataSource class - :ref:`registering_class`. @@ -46,28 +46,28 @@ In the code, specify the artifact `kyuubi-hive-jdbc-shaded` from `Maven Central` Maven ^^^^^ -.. code-block:: xml +.. parsed-literal:: org.apache.kyuubi kyuubi-hive-jdbc-shaded - 1.5.2-incubating + \ |release|\ -Sbt +sbt ^^^ -.. code-block:: sbt +.. parsed-literal:: - libraryDependencies += "org.apache.kyuubi" % "kyuubi-hive-jdbc-shaded" % "1.5.2-incubating" + libraryDependencies += "org.apache.kyuubi" % "kyuubi-hive-jdbc-shaded" % "\ |release|\" Gradle ^^^^^^ -.. code-block:: gradle +.. parsed-literal:: - implementation group: 'org.apache.kyuubi', name: 'kyuubi-hive-jdbc-shaded', version: '1.5.2-incubating' + implementation group: 'org.apache.kyuubi', name: 'kyuubi-hive-jdbc-shaded', version: '\ |release|\' Using the Driver in a JDBC Application ************************************** @@ -92,11 +92,9 @@ connection for JDBC: .. code-block:: java - private static Connection connectViaDM() throws Exception - { - Connection connection = null; - connection = DriverManager.getConnection(CONNECTION_URL); - return connection; + private static Connection newKyuubiConnection() throws Exception { + Connection connection = DriverManager.getConnection(CONNECTION_URL); + return connection; } .. _building_url: @@ -112,12 +110,13 @@ accessing. The following is the format of the connection URL for the Kyuubi Hive .. code-block:: jdbc - jdbc:subprotocol://host:port/schema;<[#|?]sessionProperties> + jdbc:subprotocol://host:port[/catalog]/[schema];<[#|?]sessionProperties> - subprotocol: kyuubi or hive2 - host: DNS or IP address of the kyuubi server - port: The number of the TCP port that the server uses to listen for client requests -- dbName: Optional database name to set the current database to run the query against, use `default` if absent. +- catalog: Optional catalog name to set the current catalog to run the query against. +- schema: Optional database name to set the current database to run the query against, use `default` if absent. - clientProperties: Optional `semicolon(;)` separated `key=value` parameters identified and affect the client behavior locally. e.g., user=foo;password=bar. - sessionProperties: Optional `semicolon(;)` separated `key=value` parameters used to configure the session, operation or background engines. For instance, `kyuubi.engine.share.level=CONNECTION` determines the background engine instance is used only by the current connection. `spark.ui.enabled=false` disables the Spark UI of the engine. @@ -127,7 +126,7 @@ accessing. The following is the format of the connection URL for the Kyuubi Hive - Properties are case-sensitive - Do not duplicate properties in the connection URL -Connection URL over Http +Connection URL over HTTP ************************ .. versionadded:: 1.6.0 @@ -145,16 +144,78 @@ Connection URL over Service Discovery jdbc:subprotocol:///;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=kyuubi -- zookeeper quorum is the corresponding zookeeper cluster configured by `kyuubi.ha.zookeeper.quorum` at the server side. -- zooKeeperNamespace is the corresponding namespace configured by `kyuubi.ha.zookeeper.namespace` at the server side. +- zookeeper quorum is the corresponding zookeeper cluster configured by `kyuubi.ha.addresses` at the server side. +- zooKeeperNamespace is the corresponding namespace configured by `kyuubi.ha.namespace` at the server side. -Authentication --------------- +Kerberos Authentication +----------------------- +Since 1.6.0, Kyuubi JDBC driver implements the Kerberos authentication based on JAAS framework instead of `Hadoop UserGroupInformation`_, +which means it does not forcibly rely on Hadoop dependencies to connect a kerberized Kyuubi Server. +Kyuubi JDBC driver supports different approaches to connect a kerberized Kyuubi Server. First of all, please follow +the `krb5.conf instruction`_ to setup ``krb5.conf`` properly. -DataTypes ---------- +Authentication by Principal and Keytab +************************************** + +.. versionadded:: 1.6.0 + +.. tip:: + + It's the simplest way w/ minimal setup requirements for Kerberos authentication. + +It's straightforward to use principal and keytab for Kerberos authentication, just simply configure them in the JDBC URL. + +.. code-block:: + + jdbc:kyuubi://host:port/schema;kyuubiClientPrincipal=;kyuubiClientKeytab=;kyuubiServerPrincipal= + +- kyuubiClientPrincipal: Kerberos ``principal`` for client authentication +- kyuubiClientKeytab: path of Kerberos ``keytab`` file for client authentication +- kyuubiServerPrincipal: Kerberos ``principal`` configured by `kyuubi.kinit.principal` at the server side. ``kyuubiServerPrincipal`` is available + as an alias of ``principal`` since 1.7.0, use ``principal`` for previous versions. + +Authentication by Principal and TGT Cache +***************************************** + +Another typical usage of Kerberos authentication is using `kinit` to generate the TGT cache first, then the application +does Kerberos authentication through the TGT cache. + +.. code-block:: + + jdbc:kyuubi://host:port/schema;kyuubiServerPrincipal= + +Authentication by `Hadoop UserGroupInformation`_ ``doAs`` (programing only) +*************************************************************************** + +.. tip:: + + This approach allows project which already uses `Hadoop UserGroupInformation`_ for Kerberos authentication to easily + connect the kerberized Kyuubi Server. This approach does not work between [1.6.0, 1.7.0], and got fixed in 1.7.1. + +.. code-block:: + + String jdbcUrl = "jdbc:kyuubi://host:port/schema;kyuubiServerPrincipal=" + UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab); + ugi.doAs((PrivilegedExceptionAction) () -> { + Connection conn = DriverManager.getConnection(jdbcUrl); + ... + }); + +Authentication by Subject (programing only) +******************************************* + +.. code-block:: java + + String jdbcUrl = "jdbc:kyuubi://host:port/schema;kyuubiServerPrincipal=;kerberosAuthType=fromSubject" + Subject kerberizedSubject = ...; + Subject.doAs(kerberizedSubject, (PrivilegedExceptionAction) () -> { + Connection conn = DriverManager.getConnection(jdbcUrl); + ... + }); .. _Maven Central: https://mvnrepository.com/artifact/org.apache.kyuubi/kyuubi-hive-jdbc-shaded .. _JDBC Applications: ../bi_tools/index.html .. _java.sql.DriverManager: https://docs.oracle.com/javase/8/docs/api/java/sql/DriverManager.html +.. _Hadoop UserGroupInformation: https://hadoop.apache.org/docs/stable/api/org/apache/hadoop/security/UserGroupInformation.html +.. _krb5.conf instruction: https://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/tutorials/KerberosReq.html \ No newline at end of file diff --git a/docs/client/python/index.rst b/docs/client/python/index.rst index 70d2bc9e3db..5e8ae4228ac 100644 --- a/docs/client/python/index.rst +++ b/docs/client/python/index.rst @@ -22,4 +22,4 @@ Python pyhive pyspark - + jaydebeapi diff --git a/docs/client/python/jaydebeapi.md b/docs/client/python/jaydebeapi.md new file mode 100644 index 00000000000..3d89fd72298 --- /dev/null +++ b/docs/client/python/jaydebeapi.md @@ -0,0 +1,87 @@ + + +# Python-JayDeBeApi + +The [JayDeBeApi](https://pypi.org/project/JayDeBeApi/) module allows you to connect from Python code to databases using Java JDBC. +It provides a Python DB-API v2.0 to that database. + +## Requirements + +To install Python-JayDeBeApi, you can use pip, the Python package manager. Open your command-line interface or terminal and run the following command: + +```shell +pip install jaydebeapi +``` + +If you want to install JayDeBeApi in Jython, you'll need to ensure that you have either pip or EasyInstall available for Jython. These tools are used to install Python packages, including JayDeBeApi. +Or you can get a copy of the source by cloning from the [JayDeBeApi GitHub project](https://github.com/baztian/jaydebeapi) and install it. + +```shell +python setup.py install +``` + +or if you are using Jython use + +```shell +jython setup.py install +``` + +## Preparation + +Using the Python-JayDeBeApi package to connect to Kyuubi, you need to install the library and configure the relevant JDBC driver. You can download JDBC driver from maven repository and specify its path in Python. Choose the matching driver `kyuubi-hive-jdbc-*.jar` package based on the Kyuubi server version. +The driver class name is `org.apache.kyuubi.jdbc.KyuubiHiveDriver`. + +| Package | Repo | +|--------------------|-----------------------------------------------------------------------------------------------------| +| kyuubi jdbc driver | [kyuubi-hive-jdbc-*.jar](https://repo1.maven.org/maven2/org/apache/kyuubi/kyuubi-hive-jdbc-shaded/) | + +## Usage + +Below is a simple example demonstrating how to use Python-JayDeBeApi to connect to Kyuubi database and execute a query: + +```python +import jaydebeapi + +# Set JDBC driver path and connection URL +driver = "org.apache.kyuubi.jdbc.KyuubiHiveDriver" +url = "jdbc:kyuubi://host:port/default" +jdbc_driver_path = ["/path/to/kyuubi-hive-jdbc-*.jar"] + +# Connect to the database using JayDeBeApi +conn = jaydebeapi.connect(driver, url, ["user", "password"], jdbc_driver_path) + +# Create a cursor object +cursor = conn.cursor() + +# Execute the SQL query +cursor.execute("SELECT * FROM example_table LIMIT 10") + +# Retrieve query results +result_set = cursor.fetchall() + +# Process the results +for row in result_set: + print(row) + +# Close the cursor and the connection +cursor.close() +conn.close() +``` + +Make sure to replace the placeholders (host, port, user, password) with your actual Kyuubi configuration. +With the above code, you can connect to Kyuubi and execute SQL queries in Python. Please handle exceptions and errors appropriately in real-world applications. diff --git a/docs/client/python/pyhive.md b/docs/client/python/pyhive.md index dbebf684fc0..b5e57ea2eae 100644 --- a/docs/client/python/pyhive.md +++ b/docs/client/python/pyhive.md @@ -64,7 +64,47 @@ If password is provided for connection, make sure the `auth` param set to either ```python # open connection -conn = hive.Connection(host=kyuubi_host,port=10009, -user='user', password='password', auth='CUSTOM') +conn = hive.Connection(host=kyuubi_host, port=10009, + username='user', password='password', auth='CUSTOM') +``` + +Use Kerberos to connect to Kyuubi. + +`kerberos_service_name` must be the name of the service that started the Kyuubi server, usually the prefix of the first slash of `kyuubi.kinit.principal`. + +Note that PyHive does not support passing in `principal`, it splices in part of `principal` with `kerberos_service_name` and `kyuubi_host`. + +```python +# open connection +conn = hive.Connection(host=kyuubi_host, port=10009, auth="KERBEROS", kerberos_service_name="kyuubi") +``` + +If you encounter the following errors, you need to install related packages. + +``` +thrift.transport.TTransport.TTransportException: Could not start SASL: b'Error in sasl_client_start (-4) SASL(-4): no mechanism available: No worthy mechs found' +``` + +```bash +yum install -y cyrus-sasl-plain cyrus-sasl-devel cyrus-sasl-gssapi cyrus-sasl-md5 +``` + +Note that PyHive does not support the connection method based on zookeeper HA, you can connect to zookeeper to get the service address via [Kazoo](https://pypi.org/project/kazoo/). + +Code reference [https://stackoverflow.com/a/73326589](https://stackoverflow.com/a/73326589) + +```python +from pyhive import hive +import random +from kazoo.client import KazooClient +zk = KazooClient(hosts='kyuubi1.xx.com:2181,kyuubi2.xx.com:2181,kyuubi3.xx.com:2181', read_only=True) +zk.start() +servers = [kyuubi_server.split(';')[0].split('=')[1].split(':') + for kyuubi_server + in zk.get_children(path='kyuubi')] +kyuubi_host, kyuubi_port = random.choice(servers) +zk.stop() +print(kyuubi_host, kyuubi_port) +conn = hive.Connection(host=kyuubi_host, port=kyuubi_port, auth="KERBEROS", kerberos_service_name="kyuubi") ``` diff --git a/docs/client/rest/rest_api.md b/docs/client/rest/rest_api.md index 59e2d8535d5..fc04857d020 100644 --- a/docs/client/rest/rest_api.md +++ b/docs/client/rest/rest_api.md @@ -89,13 +89,9 @@ Create a session #### Request Parameters -| Name | Description | Type | -|:----------------|:-----------------------------------------|:-------| -| protocolVersion | The protocol version of Hive CLI service | Int | -| user | The user name | String | -| password | The user password | String | -| ipAddr | The user client IP address | String | -| configs | The configuration of the session | Map | +| Name | Description | Type | +|:--------|:---------------------------------|:-----| +| configs | The configuration of the session | Map | #### Response Body @@ -114,11 +110,12 @@ Create an operation with EXECUTE_STATEMENT type #### Request Body -| Name | Description | Type | -|:-------------|:---------------------------------------------------------------|:--------| -| statement | The SQL statement that you execute | String | -| runAsync | The flag indicates whether the query runs synchronously or not | Boolean | -| queryTimeout | The interval of query time out | Long | +| Name | Description | Type | +|:-------------|:---------------------------------------------------------------|:---------------| +| statement | The SQL statement that you execute | String | +| runAsync | The flag indicates whether the query runs synchronously or not | Boolean | +| queryTimeout | The interval of query time out | Long | +| confOverlay | The conf to overlay only for current operation | Map of key=val | #### Response Body @@ -401,7 +398,7 @@ curl --location --request POST 'http://localhost:10099/api/v1/batches' \ The created [Batch](#batch) object. -### GET /batches/{batchId} +### GET /batches/${batchId} Returns the batch information. @@ -452,7 +449,13 @@ Refresh the Hadoop configurations of the Kyuubi server. ### POST /admin/refresh/user_defaults_conf -Refresh the [user defaults configs](../../deployment/settings.html#user-defaults) with key in format in the form of `___{username}___.{config key}` from default property file. +Refresh the [user defaults configs](../../configuration/settings.html#user-defaults) with key in format in the form of `___{username}___.{config key}` from default property file. + +### POST /admin/refresh/kubernetes_conf + +Refresh the kubernetes configs with key prefixed with `kyuubi.kubernetes` from default property file. + +It is helpful if you need to support multiple kubernetes contexts and namespaces, see [KYUUBI #4843](https://github.com/apache/kyuubi/issues/4843). ### DELETE /admin/engine diff --git a/docs/community/release.md b/docs/community/release.md index 163c575ffad..f2c8541b1e1 100644 --- a/docs/community/release.md +++ b/docs/community/release.md @@ -191,6 +191,7 @@ The tag pattern is `v${RELEASE_VERSION}-rc${RELEASE_RC_NO}`, e.g. `v1.7.0-rc0` ```shell # Bump to the release version build/mvn versions:set -DgenerateBackupPoms=false -DnewVersion="${RELEASE_VERSION}" +(cd kyuubi-server/web-ui && npm version "${RELEASE_VERSION}") git commit -am "[RELEASE] Bump ${RELEASE_VERSION}" # Create tag @@ -198,6 +199,7 @@ git tag v${RELEASE_VERSION}-rc${RELEASE_RC_NO} # Prepare for the next development version build/mvn versions:set -DgenerateBackupPoms=false -DnewVersion="${NEXT_VERSION}-SNAPSHOT" +(cd kyuubi-server/web-ui && npm version "${NEXT_VERSION}-SNAPSHOT") git commit -am "[RELEASE] Bump ${NEXT_VERSION}-SNAPSHOT" # Push branch to apache remote repo @@ -275,8 +277,7 @@ Fork and clone [Apache Kyuubi website](https://github.com/apache/kyuubi-website) 1. Add a new markdown file in `src/zh/news/`, `src/en/news/` 2. Add a new markdown file in `src/zh/release/`, `src/en/release/` -3. Follow [Build Document](../develop_tools/build_document.md) to build documents, then copy `apache/kyuubi`'s - folder `docs/_build/html` to `apache/kyuubi-website`'s folder `content/docs/r{RELEASE_VERSION}` +3. Update `releases` defined in `hugo.toml`'s `[params]` part. ### Create an Announcement @@ -300,6 +301,9 @@ svn delete https://dist.apache.org/repos/dist/dev/kyuubi/{RELEASE_TAG} \ --message "Remove deprecated Apache Kyuubi ${RELEASE_TAG}" ``` -## Publish docker image +## Keep other artifacts up-to-date + +- Docker Image: https://github.com/apache/kyuubi-docker/blob/master/release/release_guide.md +- Helm Charts: https://github.com/apache/kyuubi/blob/master/charts/kyuubi/Chart.yaml +- Playground: https://github.com/apache/kyuubi/blob/master/docker/playground/.env -See steps in `https://github.com/apache/kyuubi-docker/blob/master/release/release_guide.md` diff --git a/docs/conf.py b/docs/conf.py index 3df98c6e34c..eaac1acedef 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -64,7 +64,7 @@ author = 'Apache Kyuubi Community' # The full version, including alpha/beta/rc tags -release = subprocess.getoutput("cd .. && build/mvn help:evaluate -Dexpression=project.version|grep -v Using|grep -v INFO|grep -v WARNING|tail -n 1").split('\n')[-1] +release = subprocess.getoutput("grep 'kyuubi-parent' -C1 ../pom.xml | grep '' | awk -F '[<>]' '{print $3}'") # -- General configuration --------------------------------------------------- @@ -77,9 +77,11 @@ 'sphinx.ext.napoleon', 'sphinx.ext.mathjax', 'recommonmark', + 'sphinx_copybutton', 'sphinx_markdown_tables', 'sphinx_togglebutton', 'notfound.extension', + 'sphinxemoji.sphinxemoji', ] master_doc = 'index' diff --git a/docs/deployment/settings.md b/docs/configuration/settings.md similarity index 72% rename from docs/deployment/settings.md rename to docs/configuration/settings.md index cd8f5b770ae..832099764c2 100644 --- a/docs/deployment/settings.md +++ b/docs/configuration/settings.md @@ -16,7 +16,7 @@ --> -# Introduction to the Kyuubi Configurations System +# Configurations Kyuubi provides several ways to configure the system and corresponding engines. @@ -33,7 +33,7 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co | Key | Default | Meaning | Type | Since | |-----------------------------------------------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------|-------| -| kyuubi.authentication | NONE | A comma-separated list of client authentication types.
  • NOSASL: raw transport.
  • NONE: no authentication check.
  • KERBEROS: Kerberos/GSSAPI authentication.
  • CUSTOM: User-defined authentication.
  • JDBC: JDBC query authentication.
  • LDAP: Lightweight Directory Access Protocol authentication.
The following tree describes the catalog of each option.
  • NOSASL
  • SASL
    • SASL/PLAIN
      • NONE
      • LDAP
      • JDBC
      • CUSTOM
    • SASL/GSSAPI
      • KERBEROS
Note that: for SASL authentication, KERBEROS and PLAIN auth types are supported at the same time, and only the first specified PLAIN auth type is valid. | seq | 1.0.0 | +| kyuubi.authentication | NONE | A comma-separated list of client authentication types.
  • NOSASL: raw transport.
  • NONE: no authentication check.
  • KERBEROS: Kerberos/GSSAPI authentication.
  • CUSTOM: User-defined authentication.
  • JDBC: JDBC query authentication.
  • LDAP: Lightweight Directory Access Protocol authentication.
The following tree describes the catalog of each option.
  • NOSASL
  • SASL
    • SASL/PLAIN
      • NONE
      • LDAP
      • JDBC
      • CUSTOM
    • SASL/GSSAPI
      • KERBEROS
Note that: for SASL authentication, KERBEROS and PLAIN auth types are supported at the same time, and only the first specified PLAIN auth type is valid. | set | 1.0.0 | | kyuubi.authentication.custom.class | <undefined> | User-defined authentication implementation of org.apache.kyuubi.service.authentication.PasswdAuthenticationProvider | string | 1.3.0 | | kyuubi.authentication.jdbc.driver.class | <undefined> | Driver class name for JDBC Authentication Provider. | string | 1.6.0 | | kyuubi.authentication.jdbc.password | <undefined> | Database password for JDBC Authentication Provider. | string | 1.6.0 | @@ -47,29 +47,31 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co | kyuubi.authentication.ldap.domain | <undefined> | LDAP domain. | string | 1.0.0 | | kyuubi.authentication.ldap.groupClassKey | groupOfNames | LDAP attribute name on the group entry that is to be used in LDAP group searches. For example: group, groupOfNames or groupOfUniqueNames. | string | 1.7.0 | | kyuubi.authentication.ldap.groupDNPattern | <undefined> | COLON-separated list of patterns to use to find DNs for group entities in this directory. Use %s where the actual group name is to be substituted for. For example: CN=%s,CN=Groups,DC=subdomain,DC=domain,DC=com. | string | 1.7.0 | -| kyuubi.authentication.ldap.groupFilter || COMMA-separated list of LDAP Group names (short name not full DNs). For example: HiveAdmins,HadoopAdmins,Administrators | seq | 1.7.0 | +| kyuubi.authentication.ldap.groupFilter || COMMA-separated list of LDAP Group names (short name not full DNs). For example: HiveAdmins,HadoopAdmins,Administrators | set | 1.7.0 | | kyuubi.authentication.ldap.groupMembershipKey | member | LDAP attribute name on the group object that contains the list of distinguished names for the user, group, and contact objects that are members of the group. For example: member, uniqueMember or memberUid | string | 1.7.0 | | kyuubi.authentication.ldap.guidKey | uid | LDAP attribute name whose values are unique in this LDAP server. For example: uid or CN. | string | 1.2.0 | | kyuubi.authentication.ldap.url | <undefined> | SPACE character separated LDAP connection URL(s). | string | 1.0.0 | | kyuubi.authentication.ldap.userDNPattern | <undefined> | COLON-separated list of patterns to use to find DNs for users in this directory. Use %s where the actual group name is to be substituted for. For example: CN=%s,CN=Users,DC=subdomain,DC=domain,DC=com. | string | 1.7.0 | -| kyuubi.authentication.ldap.userFilter || COMMA-separated list of LDAP usernames (just short names, not full DNs). For example: hiveuser,impalauser,hiveadmin,hadoopadmin | seq | 1.7.0 | +| kyuubi.authentication.ldap.userFilter || COMMA-separated list of LDAP usernames (just short names, not full DNs). For example: hiveuser,impalauser,hiveadmin,hadoopadmin | set | 1.7.0 | | kyuubi.authentication.ldap.userMembershipKey | <undefined> | LDAP attribute name on the user object that contains groups of which the user is a direct member, except for the primary group, which is represented by the primaryGroupId. For example: memberOf | string | 1.7.0 | | kyuubi.authentication.sasl.qop | auth | Sasl QOP enable higher levels of protection for Kyuubi communication with clients.
  • auth - authentication only (default)
  • auth-int - authentication plus integrity protection
  • auth-conf - authentication plus integrity and confidentiality protection. This is applicable only if Kyuubi is configured to use Kerberos authentication.
| string | 1.0.0 | ### Backend -| Key | Default | Meaning | Type | Since | -|--------------------------------------------------|---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| -| kyuubi.backend.engine.exec.pool.keepalive.time | PT1M | Time(ms) that an idle async thread of the operation execution thread pool will wait for a new task to arrive before terminating in SQL engine applications | duration | 1.0.0 | -| kyuubi.backend.engine.exec.pool.shutdown.timeout | PT10S | Timeout(ms) for the operation execution thread pool to terminate in SQL engine applications | duration | 1.0.0 | -| kyuubi.backend.engine.exec.pool.size | 100 | Number of threads in the operation execution thread pool of SQL engine applications | int | 1.0.0 | -| kyuubi.backend.engine.exec.pool.wait.queue.size | 100 | Size of the wait queue for the operation execution thread pool in SQL engine applications | int | 1.0.0 | -| kyuubi.backend.server.event.json.log.path | file:///tmp/kyuubi/events | The location of server events go for the built-in JSON logger | string | 1.4.0 | -| kyuubi.backend.server.event.loggers || A comma-separated list of server history loggers, where session/operation etc events go.
  • JSON: the events will be written to the location of kyuubi.backend.server.event.json.log.path
  • JDBC: to be done
  • CUSTOM: User-defined event handlers.
Note that: Kyuubi supports custom event handlers with the Java SPI. To register a custom event handler, the user needs to implement a class which is a child of org.apache.kyuubi.events.handler.CustomEventHandlerProvider which has a zero-arg constructor. | seq | 1.4.0 | -| kyuubi.backend.server.exec.pool.keepalive.time | PT1M | Time(ms) that an idle async thread of the operation execution thread pool will wait for a new task to arrive before terminating in Kyuubi server | duration | 1.0.0 | -| kyuubi.backend.server.exec.pool.shutdown.timeout | PT10S | Timeout(ms) for the operation execution thread pool to terminate in Kyuubi server | duration | 1.0.0 | -| kyuubi.backend.server.exec.pool.size | 100 | Number of threads in the operation execution thread pool of Kyuubi server | int | 1.0.0 | -| kyuubi.backend.server.exec.pool.wait.queue.size | 100 | Size of the wait queue for the operation execution thread pool of Kyuubi server | int | 1.0.0 | +| Key | Default | Meaning | Type | Since | +|--------------------------------------------------|---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| +| kyuubi.backend.engine.exec.pool.keepalive.time | PT1M | Time(ms) that an idle async thread of the operation execution thread pool will wait for a new task to arrive before terminating in SQL engine applications | duration | 1.0.0 | +| kyuubi.backend.engine.exec.pool.shutdown.timeout | PT10S | Timeout(ms) for the operation execution thread pool to terminate in SQL engine applications | duration | 1.0.0 | +| kyuubi.backend.engine.exec.pool.size | 100 | Number of threads in the operation execution thread pool of SQL engine applications | int | 1.0.0 | +| kyuubi.backend.engine.exec.pool.wait.queue.size | 100 | Size of the wait queue for the operation execution thread pool in SQL engine applications | int | 1.0.0 | +| kyuubi.backend.server.event.json.log.path | file:///tmp/kyuubi/events | The location of server events go for the built-in JSON logger | string | 1.4.0 | +| kyuubi.backend.server.event.kafka.close.timeout | PT5S | Period to wait for Kafka producer of server event handlers to close. | duration | 1.8.0 | +| kyuubi.backend.server.event.kafka.topic | <undefined> | The topic of server events go for the built-in Kafka logger | string | 1.8.0 | +| kyuubi.backend.server.event.loggers || A comma-separated list of server history loggers, where session/operation etc events go.
  • JSON: the events will be written to the location of kyuubi.backend.server.event.json.log.path
  • KAFKA: the events will be serialized in JSON format and sent to topic of `kyuubi.backend.server.event.kafka.topic`. Note: For the configs of Kafka producer, please specify them with the prefix: `kyuubi.backend.server.event.kafka.`. For example, `kyuubi.backend.server.event.kafka.bootstrap.servers=127.0.0.1:9092`
  • JDBC: to be done
  • CUSTOM: User-defined event handlers.
Note that: Kyuubi supports custom event handlers with the Java SPI. To register a custom event handler, the user needs to implement a class which is a child of org.apache.kyuubi.events.handler.CustomEventHandlerProvider which has a zero-arg constructor. | seq | 1.4.0 | +| kyuubi.backend.server.exec.pool.keepalive.time | PT1M | Time(ms) that an idle async thread of the operation execution thread pool will wait for a new task to arrive before terminating in Kyuubi server | duration | 1.0.0 | +| kyuubi.backend.server.exec.pool.shutdown.timeout | PT10S | Timeout(ms) for the operation execution thread pool to terminate in Kyuubi server | duration | 1.0.0 | +| kyuubi.backend.server.exec.pool.size | 100 | Number of threads in the operation execution thread pool of Kyuubi server | int | 1.0.0 | +| kyuubi.backend.server.exec.pool.wait.queue.size | 100 | Size of the wait queue for the operation execution thread pool of Kyuubi server | int | 1.0.0 | ### Batch @@ -77,7 +79,7 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co |---------------------------------------------|---------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| | kyuubi.batch.application.check.interval | PT5S | The interval to check batch job application information. | duration | 1.6.0 | | kyuubi.batch.application.starvation.timeout | PT3M | Threshold above which to warn batch application may be starved. | duration | 1.7.0 | -| kyuubi.batch.conf.ignore.list || A comma-separated list of ignored keys for batch conf. If the batch conf contains any of them, the key and the corresponding value will be removed silently during batch job submission. Note that this rule is for server-side protection defined via administrators to prevent some essential configs from tampering. You can also pre-define some config for batch job submission with the prefix: kyuubi.batchConf.[batchType]. For example, you can pre-define `spark.master` for the Spark batch job with key `kyuubi.batchConf.spark.spark.master`. | seq | 1.6.0 | +| kyuubi.batch.conf.ignore.list || A comma-separated list of ignored keys for batch conf. If the batch conf contains any of them, the key and the corresponding value will be removed silently during batch job submission. Note that this rule is for server-side protection defined via administrators to prevent some essential configs from tampering. You can also pre-define some config for batch job submission with the prefix: kyuubi.batchConf.[batchType]. For example, you can pre-define `spark.master` for the Spark batch job with key `kyuubi.batchConf.spark.spark.master`. | set | 1.6.0 | | kyuubi.batch.session.idle.timeout | PT6H | Batch session idle timeout, it will be closed when it's not accessed for this duration | duration | 1.6.2 | ### Credentials @@ -118,59 +120,82 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co ### Engine -| Key | Default | Meaning | Type | Since | -|----------------------------------------------------------|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| -| kyuubi.engine.connection.url.use.hostname | true | (deprecated) When true, the engine registers with hostname to zookeeper. When Spark runs on K8s with cluster mode, set to false to ensure that server can connect to engine | boolean | 1.3.0 | -| kyuubi.engine.deregister.exception.classes || A comma-separated list of exception classes. If there is any exception thrown, whose class matches the specified classes, the engine would deregister itself. | seq | 1.2.0 | -| kyuubi.engine.deregister.exception.messages || A comma-separated list of exception messages. If there is any exception thrown, whose message or stacktrace matches the specified message list, the engine would deregister itself. | seq | 1.2.0 | -| kyuubi.engine.deregister.exception.ttl | PT30M | Time to live(TTL) for exceptions pattern specified in kyuubi.engine.deregister.exception.classes and kyuubi.engine.deregister.exception.messages to deregister engines. Once the total error count hits the kyuubi.engine.deregister.job.max.failures within the TTL, an engine will deregister itself and wait for self-terminated. Otherwise, we suppose that the engine has recovered from temporary failures. | duration | 1.2.0 | -| kyuubi.engine.deregister.job.max.failures | 4 | Number of failures of job before deregistering the engine. | int | 1.2.0 | -| kyuubi.engine.event.json.log.path | file:///tmp/kyuubi/events | The location where all the engine events go for the built-in JSON logger.
  • Local Path: start with 'file://'
  • HDFS Path: start with 'hdfs://'
| string | 1.3.0 | -| kyuubi.engine.event.loggers | SPARK | A comma-separated list of engine history loggers, where engine/session/operation etc events go.
  • SPARK: the events will be written to the Spark listener bus.
  • JSON: the events will be written to the location of kyuubi.engine.event.json.log.path
  • JDBC: to be done
  • CUSTOM: User-defined event handlers.
Note that: Kyuubi supports custom event handlers with the Java SPI. To register a custom event handler, the user needs to implement a subclass of `org.apache.kyuubi.events.handler.CustomEventHandlerProvider` which has a zero-arg constructor. | seq | 1.3.0 | -| kyuubi.engine.flink.extra.classpath | <undefined> | The extra classpath for the Flink SQL engine, for configuring the location of hadoop client jars, etc | string | 1.6.0 | -| kyuubi.engine.flink.java.options | <undefined> | The extra Java options for the Flink SQL engine | string | 1.6.0 | -| kyuubi.engine.flink.memory | 1g | The heap memory for the Flink SQL engine | string | 1.6.0 | -| kyuubi.engine.hive.event.loggers | JSON | A comma-separated list of engine history loggers, where engine/session/operation etc events go.
  • JSON: the events will be written to the location of kyuubi.engine.event.json.log.path
  • JDBC: to be done
  • CUSTOM: to be done.
| seq | 1.7.0 | -| kyuubi.engine.hive.extra.classpath | <undefined> | The extra classpath for the Hive query engine, for configuring location of the hadoop client jars and etc. | string | 1.6.0 | -| kyuubi.engine.hive.java.options | <undefined> | The extra Java options for the Hive query engine | string | 1.6.0 | -| kyuubi.engine.hive.memory | 1g | The heap memory for the Hive query engine | string | 1.6.0 | -| kyuubi.engine.initialize.sql | SHOW DATABASES | SemiColon-separated list of SQL statements to be initialized in the newly created engine before queries. i.e. use `SHOW DATABASES` to eagerly active HiveClient. This configuration can not be used in JDBC url due to the limitation of Beeline/JDBC driver. | seq | 1.2.0 | -| kyuubi.engine.jdbc.connection.password | <undefined> | The password is used for connecting to server | string | 1.6.0 | -| kyuubi.engine.jdbc.connection.properties || The additional properties are used for connecting to server | seq | 1.6.0 | -| kyuubi.engine.jdbc.connection.provider | <undefined> | The connection provider is used for getting a connection from the server | string | 1.6.0 | -| kyuubi.engine.jdbc.connection.url | <undefined> | The server url that engine will connect to | string | 1.6.0 | -| kyuubi.engine.jdbc.connection.user | <undefined> | The user is used for connecting to server | string | 1.6.0 | -| kyuubi.engine.jdbc.driver.class | <undefined> | The driver class for JDBC engine connection | string | 1.6.0 | -| kyuubi.engine.jdbc.extra.classpath | <undefined> | The extra classpath for the JDBC query engine, for configuring the location of the JDBC driver and etc. | string | 1.6.0 | -| kyuubi.engine.jdbc.java.options | <undefined> | The extra Java options for the JDBC query engine | string | 1.6.0 | -| kyuubi.engine.jdbc.memory | 1g | The heap memory for the JDBC query engine | string | 1.6.0 | -| kyuubi.engine.jdbc.type | <undefined> | The short name of JDBC type | string | 1.6.0 | -| kyuubi.engine.operation.convert.catalog.database.enabled | true | When set to true, The engine converts the JDBC methods of set/get Catalog and set/get Schema to the implementation of different engines | boolean | 1.6.0 | -| kyuubi.engine.operation.log.dir.root | engine_operation_logs | Root directory for query operation log at engine-side. | string | 1.4.0 | -| kyuubi.engine.pool.name | engine-pool | The name of the engine pool. | string | 1.5.0 | -| kyuubi.engine.pool.selectPolicy | RANDOM | The select policy of an engine from the corresponding engine pool engine for a session.
  • RANDOM - Randomly use the engine in the pool
  • POLLING - Polling use the engine in the pool
| string | 1.7.0 | -| kyuubi.engine.pool.size | -1 | The size of the engine pool. Note that, if the size is less than 1, the engine pool will not be enabled; otherwise, the size of the engine pool will be min(this, kyuubi.engine.pool.size.threshold). | int | 1.4.0 | -| kyuubi.engine.pool.size.threshold | 9 | This parameter is introduced as a server-side parameter controlling the upper limit of the engine pool. | int | 1.4.0 | -| kyuubi.engine.session.initialize.sql || SemiColon-separated list of SQL statements to be initialized in the newly created engine session before queries. This configuration can not be used in JDBC url due to the limitation of Beeline/JDBC driver. | seq | 1.3.0 | -| kyuubi.engine.share.level | USER | Engines will be shared in different levels, available configs are:
  • CONNECTION: engine will not be shared but only used by the current client connection
  • USER: engine will be shared by all sessions created by a unique username, see also kyuubi.engine.share.level.subdomain
  • GROUP: the engine will be shared by all sessions created by all users belong to the same primary group name. The engine will be launched by the group name as the effective username, so here the group name is in value of special user who is able to visit the computing resources/data of the team. It follows the [Hadoop GroupsMapping](https://reurl.cc/xE61Y5) to map user to a primary group. If the primary group is not found, it fallback to the USER level.
  • SERVER: the App will be shared by Kyuubi servers
| string | 1.2.0 | -| kyuubi.engine.share.level.sub.domain | <undefined> | (deprecated) - Using kyuubi.engine.share.level.subdomain instead | string | 1.2.0 | -| kyuubi.engine.share.level.subdomain | <undefined> | Allow end-users to create a subdomain for the share level of an engine. A subdomain is a case-insensitive string values that must be a valid zookeeper subpath. For example, for the `USER` share level, an end-user can share a certain engine within a subdomain, not for all of its clients. End-users are free to create multiple engines in the `USER` share level. When disable engine pool, use 'default' if absent. | string | 1.4.0 | -| kyuubi.engine.single.spark.session | false | When set to true, this engine is running in a single session mode. All the JDBC/ODBC connections share the temporary views, function registries, SQL configuration and the current database. | boolean | 1.3.0 | -| kyuubi.engine.spark.event.loggers | SPARK | A comma-separated list of engine loggers, where engine/session/operation etc events go.
  • SPARK: the events will be written to the Spark listener bus.
  • JSON: the events will be written to the location of kyuubi.engine.event.json.log.path
  • JDBC: to be done
  • CUSTOM: to be done.
| seq | 1.7.0 | -| kyuubi.engine.spark.python.env.archive | <undefined> | Portable Python env archive used for Spark engine Python language mode. | string | 1.7.0 | -| kyuubi.engine.spark.python.env.archive.exec.path | bin/python | The Python exec path under the Python env archive. | string | 1.7.0 | -| kyuubi.engine.spark.python.home.archive | <undefined> | Spark archive containing $SPARK_HOME/python directory, which is used to init session Python worker for Python language mode. | string | 1.7.0 | -| kyuubi.engine.trino.event.loggers | JSON | A comma-separated list of engine history loggers, where engine/session/operation etc events go.
  • JSON: the events will be written to the location of kyuubi.engine.event.json.log.path
  • JDBC: to be done
  • CUSTOM: to be done.
| seq | 1.7.0 | -| kyuubi.engine.trino.extra.classpath | <undefined> | The extra classpath for the Trino query engine, for configuring other libs which may need by the Trino engine | string | 1.6.0 | -| kyuubi.engine.trino.java.options | <undefined> | The extra Java options for the Trino query engine | string | 1.6.0 | -| kyuubi.engine.trino.memory | 1g | The heap memory for the Trino query engine | string | 1.6.0 | -| kyuubi.engine.type | SPARK_SQL | Specify the detailed engine supported by Kyuubi. The engine type bindings to SESSION scope. This configuration is experimental. Currently, available configs are:
  • SPARK_SQL: specify this engine type will launch a Spark engine which can provide all the capacity of the Apache Spark. Note, it's a default engine type.
  • FLINK_SQL: specify this engine type will launch a Flink engine which can provide all the capacity of the Apache Flink.
  • TRINO: specify this engine type will launch a Trino engine which can provide all the capacity of the Trino.
  • HIVE_SQL: specify this engine type will launch a Hive engine which can provide all the capacity of the Hive Server2.
  • JDBC: specify this engine type will launch a JDBC engine which can provide a MySQL protocol connector, for now we only support Doris dialect.
| string | 1.4.0 | -| kyuubi.engine.ui.retainedSessions | 200 | The number of SQL client sessions kept in the Kyuubi Query Engine web UI. | int | 1.4.0 | -| kyuubi.engine.ui.retainedStatements | 200 | The number of statements kept in the Kyuubi Query Engine web UI. | int | 1.4.0 | -| kyuubi.engine.ui.stop.enabled | true | When true, allows Kyuubi engine to be killed from the Spark Web UI. | boolean | 1.3.0 | -| kyuubi.engine.user.isolated.spark.session | true | When set to false, if the engine is running in a group or server share level, all the JDBC/ODBC connections will be isolated against the user. Including the temporary views, function registries, SQL configuration, and the current database. Note that, it does not affect if the share level is connection or user. | boolean | 1.6.0 | -| kyuubi.engine.user.isolated.spark.session.idle.interval | PT1M | The interval to check if the user-isolated Spark session is timeout. | duration | 1.6.0 | -| kyuubi.engine.user.isolated.spark.session.idle.timeout | PT6H | If kyuubi.engine.user.isolated.spark.session is false, we will release the Spark session if its corresponding user is inactive after this configured timeout. | duration | 1.6.0 | +| Key | Default | Meaning | Type | Since | +|----------------------------------------------------------|---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| +| kyuubi.engine.chat.extra.classpath | <undefined> | The extra classpath for the Chat engine, for configuring the location of the SDK and etc. | string | 1.8.0 | +| kyuubi.engine.chat.gpt.apiKey | <undefined> | The key to access OpenAI open API, which could be got at https://platform.openai.com/account/api-keys | string | 1.8.0 | +| kyuubi.engine.chat.gpt.http.connect.timeout | PT2M | The timeout[ms] for establishing the connection with the Chat GPT server. A timeout value of zero is interpreted as an infinite timeout. | duration | 1.8.0 | +| kyuubi.engine.chat.gpt.http.proxy | <undefined> | HTTP proxy url for API calling in Chat GPT engine. e.g. http://127.0.0.1:1087 | string | 1.8.0 | +| kyuubi.engine.chat.gpt.http.socket.timeout | PT2M | The timeout[ms] for waiting for data packets after Chat GPT server connection is established. A timeout value of zero is interpreted as an infinite timeout. | duration | 1.8.0 | +| kyuubi.engine.chat.gpt.model | gpt-3.5-turbo | ID of the model used in ChatGPT. Available models refer to OpenAI's [Model overview](https://platform.openai.com/docs/models/overview). | string | 1.8.0 | +| kyuubi.engine.chat.java.options | <undefined> | The extra Java options for the Chat engine | string | 1.8.0 | +| kyuubi.engine.chat.memory | 1g | The heap memory for the Chat engine | string | 1.8.0 | +| kyuubi.engine.chat.provider | ECHO | The provider for the Chat engine. Candidates:
  • ECHO: simply replies a welcome message.
  • GPT: a.k.a ChatGPT, powered by OpenAI.
| string | 1.8.0 | +| kyuubi.engine.connection.url.use.hostname | true | (deprecated) When true, the engine registers with hostname to zookeeper. When Spark runs on K8s with cluster mode, set to false to ensure that server can connect to engine | boolean | 1.3.0 | +| kyuubi.engine.deregister.exception.classes || A comma-separated list of exception classes. If there is any exception thrown, whose class matches the specified classes, the engine would deregister itself. | set | 1.2.0 | +| kyuubi.engine.deregister.exception.messages || A comma-separated list of exception messages. If there is any exception thrown, whose message or stacktrace matches the specified message list, the engine would deregister itself. | set | 1.2.0 | +| kyuubi.engine.deregister.exception.ttl | PT30M | Time to live(TTL) for exceptions pattern specified in kyuubi.engine.deregister.exception.classes and kyuubi.engine.deregister.exception.messages to deregister engines. Once the total error count hits the kyuubi.engine.deregister.job.max.failures within the TTL, an engine will deregister itself and wait for self-terminated. Otherwise, we suppose that the engine has recovered from temporary failures. | duration | 1.2.0 | +| kyuubi.engine.deregister.job.max.failures | 4 | Number of failures of job before deregistering the engine. | int | 1.2.0 | +| kyuubi.engine.event.json.log.path | file:///tmp/kyuubi/events | The location where all the engine events go for the built-in JSON logger.
  • Local Path: start with 'file://'
  • HDFS Path: start with 'hdfs://'
| string | 1.3.0 | +| kyuubi.engine.event.loggers | SPARK | A comma-separated list of engine history loggers, where engine/session/operation etc events go.
  • SPARK: the events will be written to the Spark listener bus.
  • JSON: the events will be written to the location of kyuubi.engine.event.json.log.path
  • JDBC: to be done
  • CUSTOM: User-defined event handlers.
Note that: Kyuubi supports custom event handlers with the Java SPI. To register a custom event handler, the user needs to implement a subclass of `org.apache.kyuubi.events.handler.CustomEventHandlerProvider` which has a zero-arg constructor. | seq | 1.3.0 | +| kyuubi.engine.flink.application.jars | <undefined> | A comma-separated list of the local jars to be shipped with the job to the cluster. For example, SQL UDF jars. Only effective in yarn application mode. | string | 1.8.0 | +| kyuubi.engine.flink.extra.classpath | <undefined> | The extra classpath for the Flink SQL engine, for configuring the location of hadoop client jars, etc. Only effective in yarn session mode. | string | 1.6.0 | +| kyuubi.engine.flink.java.options | <undefined> | The extra Java options for the Flink SQL engine. Only effective in yarn session mode. | string | 1.6.0 | +| kyuubi.engine.flink.memory | 1g | The heap memory for the Flink SQL engine. Only effective in yarn session mode. | string | 1.6.0 | +| kyuubi.engine.hive.event.loggers | JSON | A comma-separated list of engine history loggers, where engine/session/operation etc events go.
  • JSON: the events will be written to the location of kyuubi.engine.event.json.log.path
  • JDBC: to be done
  • CUSTOM: to be done.
| seq | 1.7.0 | +| kyuubi.engine.hive.extra.classpath | <undefined> | The extra classpath for the Hive query engine, for configuring location of the hadoop client jars and etc. | string | 1.6.0 | +| kyuubi.engine.hive.java.options | <undefined> | The extra Java options for the Hive query engine | string | 1.6.0 | +| kyuubi.engine.hive.memory | 1g | The heap memory for the Hive query engine | string | 1.6.0 | +| kyuubi.engine.initialize.sql | SHOW DATABASES | SemiColon-separated list of SQL statements to be initialized in the newly created engine before queries. i.e. use `SHOW DATABASES` to eagerly active HiveClient. This configuration can not be used in JDBC url due to the limitation of Beeline/JDBC driver. | seq | 1.2.0 | +| kyuubi.engine.jdbc.connection.password | <undefined> | The password is used for connecting to server | string | 1.6.0 | +| kyuubi.engine.jdbc.connection.propagateCredential | false | Whether to use the session's user and password to connect to database | boolean | 1.8.0 | +| kyuubi.engine.jdbc.connection.properties || The additional properties are used for connecting to server | seq | 1.6.0 | +| kyuubi.engine.jdbc.connection.provider | <undefined> | The connection provider is used for getting a connection from the server | string | 1.6.0 | +| kyuubi.engine.jdbc.connection.url | <undefined> | The server url that engine will connect to | string | 1.6.0 | +| kyuubi.engine.jdbc.connection.user | <undefined> | The user is used for connecting to server | string | 1.6.0 | +| kyuubi.engine.jdbc.driver.class | <undefined> | The driver class for JDBC engine connection | string | 1.6.0 | +| kyuubi.engine.jdbc.extra.classpath | <undefined> | The extra classpath for the JDBC query engine, for configuring the location of the JDBC driver and etc. | string | 1.6.0 | +| kyuubi.engine.jdbc.initialize.sql | SELECT 1 | SemiColon-separated list of SQL statements to be initialized in the newly created engine before queries. i.e. use `SELECT 1` to eagerly active JDBCClient. | seq | 1.8.0 | +| kyuubi.engine.jdbc.java.options | <undefined> | The extra Java options for the JDBC query engine | string | 1.6.0 | +| kyuubi.engine.jdbc.memory | 1g | The heap memory for the JDBC query engine | string | 1.6.0 | +| kyuubi.engine.jdbc.session.initialize.sql || SemiColon-separated list of SQL statements to be initialized in the newly created engine session before queries. | seq | 1.8.0 | +| kyuubi.engine.jdbc.type | <undefined> | The short name of JDBC type | string | 1.6.0 | +| kyuubi.engine.kubernetes.submit.timeout | PT30S | The engine submit timeout for Kubernetes application. | duration | 1.7.2 | +| kyuubi.engine.operation.convert.catalog.database.enabled | true | When set to true, The engine converts the JDBC methods of set/get Catalog and set/get Schema to the implementation of different engines | boolean | 1.6.0 | +| kyuubi.engine.operation.log.dir.root | engine_operation_logs | Root directory for query operation log at engine-side. | string | 1.4.0 | +| kyuubi.engine.pool.name | engine-pool | The name of the engine pool. | string | 1.5.0 | +| kyuubi.engine.pool.selectPolicy | RANDOM | The select policy of an engine from the corresponding engine pool engine for a session.
  • RANDOM - Randomly use the engine in the pool
  • POLLING - Polling use the engine in the pool
| string | 1.7.0 | +| kyuubi.engine.pool.size | -1 | The size of the engine pool. Note that, if the size is less than 1, the engine pool will not be enabled; otherwise, the size of the engine pool will be min(this, kyuubi.engine.pool.size.threshold). | int | 1.4.0 | +| kyuubi.engine.pool.size.threshold | 9 | This parameter is introduced as a server-side parameter controlling the upper limit of the engine pool. | int | 1.4.0 | +| kyuubi.engine.session.initialize.sql || SemiColon-separated list of SQL statements to be initialized in the newly created engine session before queries. This configuration can not be used in JDBC url due to the limitation of Beeline/JDBC driver. | seq | 1.3.0 | +| kyuubi.engine.share.level | USER | Engines will be shared in different levels, available configs are:
  • CONNECTION: engine will not be shared but only used by the current client connection
  • USER: engine will be shared by all sessions created by a unique username, see also kyuubi.engine.share.level.subdomain
  • GROUP: the engine will be shared by all sessions created by all users belong to the same primary group name. The engine will be launched by the group name as the effective username, so here the group name is in value of special user who is able to visit the computing resources/data of the team. It follows the [Hadoop GroupsMapping](https://reurl.cc/xE61Y5) to map user to a primary group. If the primary group is not found, it fallback to the USER level.
  • SERVER: the App will be shared by Kyuubi servers
| string | 1.2.0 | +| kyuubi.engine.share.level.sub.domain | <undefined> | (deprecated) - Using kyuubi.engine.share.level.subdomain instead | string | 1.2.0 | +| kyuubi.engine.share.level.subdomain | <undefined> | Allow end-users to create a subdomain for the share level of an engine. A subdomain is a case-insensitive string values that must be a valid zookeeper subpath. For example, for the `USER` share level, an end-user can share a certain engine within a subdomain, not for all of its clients. End-users are free to create multiple engines in the `USER` share level. When disable engine pool, use 'default' if absent. | string | 1.4.0 | +| kyuubi.engine.single.spark.session | false | When set to true, this engine is running in a single session mode. All the JDBC/ODBC connections share the temporary views, function registries, SQL configuration and the current database. | boolean | 1.3.0 | +| kyuubi.engine.spark.event.loggers | SPARK | A comma-separated list of engine loggers, where engine/session/operation etc events go.
  • SPARK: the events will be written to the Spark listener bus.
  • JSON: the events will be written to the location of kyuubi.engine.event.json.log.path
  • JDBC: to be done
  • CUSTOM: to be done.
| seq | 1.7.0 | +| kyuubi.engine.spark.python.env.archive | <undefined> | Portable Python env archive used for Spark engine Python language mode. | string | 1.7.0 | +| kyuubi.engine.spark.python.env.archive.exec.path | bin/python | The Python exec path under the Python env archive. | string | 1.7.0 | +| kyuubi.engine.spark.python.home.archive | <undefined> | Spark archive containing $SPARK_HOME/python directory, which is used to init session Python worker for Python language mode. | string | 1.7.0 | +| kyuubi.engine.submit.timeout | PT30S | Period to tolerant Driver Pod ephemerally invisible after submitting. In some Resource Managers, e.g. K8s, the Driver Pod is not visible immediately after `spark-submit` is returned. | duration | 1.7.1 | +| kyuubi.engine.trino.connection.keystore.password | <undefined> | The keystore password used for connecting to trino cluster | string | 1.8.0 | +| kyuubi.engine.trino.connection.keystore.path | <undefined> | The keystore path used for connecting to trino cluster | string | 1.8.0 | +| kyuubi.engine.trino.connection.keystore.type | <undefined> | The keystore type used for connecting to trino cluster | string | 1.8.0 | +| kyuubi.engine.trino.connection.password | <undefined> | The password used for connecting to trino cluster | string | 1.8.0 | +| kyuubi.engine.trino.connection.truststore.password | <undefined> | The truststore password used for connecting to trino cluster | string | 1.8.0 | +| kyuubi.engine.trino.connection.truststore.path | <undefined> | The truststore path used for connecting to trino cluster | string | 1.8.0 | +| kyuubi.engine.trino.connection.truststore.type | <undefined> | The truststore type used for connecting to trino cluster | string | 1.8.0 | +| kyuubi.engine.trino.event.loggers | JSON | A comma-separated list of engine history loggers, where engine/session/operation etc events go.
  • JSON: the events will be written to the location of kyuubi.engine.event.json.log.path
  • JDBC: to be done
  • CUSTOM: to be done.
| seq | 1.7.0 | +| kyuubi.engine.trino.extra.classpath | <undefined> | The extra classpath for the Trino query engine, for configuring other libs which may need by the Trino engine | string | 1.6.0 | +| kyuubi.engine.trino.java.options | <undefined> | The extra Java options for the Trino query engine | string | 1.6.0 | +| kyuubi.engine.trino.memory | 1g | The heap memory for the Trino query engine | string | 1.6.0 | +| kyuubi.engine.type | SPARK_SQL | Specify the detailed engine supported by Kyuubi. The engine type bindings to SESSION scope. This configuration is experimental. Currently, available configs are:
  • SPARK_SQL: specify this engine type will launch a Spark engine which can provide all the capacity of the Apache Spark. Note, it's a default engine type.
  • FLINK_SQL: specify this engine type will launch a Flink engine which can provide all the capacity of the Apache Flink.
  • TRINO: specify this engine type will launch a Trino engine which can provide all the capacity of the Trino.
  • HIVE_SQL: specify this engine type will launch a Hive engine which can provide all the capacity of the Hive Server2.
  • JDBC: specify this engine type will launch a JDBC engine which can provide a MySQL protocol connector, for now we only support Doris dialect.
  • CHAT: specify this engine type will launch a Chat engine.
| string | 1.4.0 | +| kyuubi.engine.ui.retainedSessions | 200 | The number of SQL client sessions kept in the Kyuubi Query Engine web UI. | int | 1.4.0 | +| kyuubi.engine.ui.retainedStatements | 200 | The number of statements kept in the Kyuubi Query Engine web UI. | int | 1.4.0 | +| kyuubi.engine.ui.stop.enabled | true | When true, allows Kyuubi engine to be killed from the Spark Web UI. | boolean | 1.3.0 | +| kyuubi.engine.user.isolated.spark.session | true | When set to false, if the engine is running in a group or server share level, all the JDBC/ODBC connections will be isolated against the user. Including the temporary views, function registries, SQL configuration, and the current database. Note that, it does not affect if the share level is connection or user. | boolean | 1.6.0 | +| kyuubi.engine.user.isolated.spark.session.idle.interval | PT1M | The interval to check if the user-isolated Spark session is timeout. | duration | 1.6.0 | +| kyuubi.engine.user.isolated.spark.session.idle.timeout | PT6H | If kyuubi.engine.user.isolated.spark.session is false, we will release the Spark session if its corresponding user is inactive after this configured timeout. | duration | 1.6.0 | +| kyuubi.engine.yarn.submit.timeout | PT30S | The engine submit timeout for YARN application. | duration | 1.7.2 | ### Event @@ -182,94 +207,96 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co ### Frontend -| Key | Default | Meaning | Type | Since | -|--------------------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| -| kyuubi.frontend.backoff.slot.length | PT0.1S | (deprecated) Time to back off during login to the thrift frontend service. | duration | 1.0.0 | -| kyuubi.frontend.bind.host | <undefined> | Hostname or IP of the machine on which to run the frontend services. | string | 1.0.0 | -| kyuubi.frontend.bind.port | 10009 | (deprecated) Port of the machine on which to run the thrift frontend service via the binary protocol. | int | 1.0.0 | -| kyuubi.frontend.connection.url.use.hostname | true | When true, frontend services prefer hostname, otherwise, ip address. Note that, the default value is set to `false` when engine running on Kubernetes to prevent potential network issues. | boolean | 1.5.0 | -| kyuubi.frontend.login.timeout | PT20S | (deprecated) Timeout for Thrift clients during login to the thrift frontend service. | duration | 1.0.0 | -| kyuubi.frontend.max.message.size | 104857600 | (deprecated) Maximum message size in bytes a Kyuubi server will accept. | int | 1.0.0 | -| kyuubi.frontend.max.worker.threads | 999 | (deprecated) Maximum number of threads in the frontend worker thread pool for the thrift frontend service | int | 1.0.0 | -| kyuubi.frontend.min.worker.threads | 9 | (deprecated) Minimum number of threads in the frontend worker thread pool for the thrift frontend service | int | 1.0.0 | -| kyuubi.frontend.mysql.bind.host | <undefined> | Hostname or IP of the machine on which to run the MySQL frontend service. | string | 1.4.0 | -| kyuubi.frontend.mysql.bind.port | 3309 | Port of the machine on which to run the MySQL frontend service. | int | 1.4.0 | -| kyuubi.frontend.mysql.max.worker.threads | 999 | Maximum number of threads in the command execution thread pool for the MySQL frontend service | int | 1.4.0 | -| kyuubi.frontend.mysql.min.worker.threads | 9 | Minimum number of threads in the command execution thread pool for the MySQL frontend service | int | 1.4.0 | -| kyuubi.frontend.mysql.netty.worker.threads | <undefined> | Number of thread in the netty worker event loop of MySQL frontend service. Use min(cpu_cores, 8) in default. | int | 1.4.0 | -| kyuubi.frontend.mysql.worker.keepalive.time | PT1M | Time(ms) that an idle async thread of the command execution thread pool will wait for a new task to arrive before terminating in MySQL frontend service | duration | 1.4.0 | -| kyuubi.frontend.protocols | THRIFT_BINARY | A comma-separated list for all frontend protocols
  • THRIFT_BINARY - HiveServer2 compatible thrift binary protocol.
  • THRIFT_HTTP - HiveServer2 compatible thrift http protocol.
  • REST - Kyuubi defined REST API(experimental).
  • MYSQL - MySQL compatible text protocol(experimental).
  • TRINO - Trino compatible http protocol(experimental).
| seq | 1.4.0 | -| kyuubi.frontend.proxy.http.client.ip.header | X-Real-IP | The HTTP header to record the real client IP address. If your server is behind a load balancer or other proxy, the server will see this load balancer or proxy IP address as the client IP address, to get around this common issue, most load balancers or proxies offer the ability to record the real remote IP address in an HTTP header that will be added to the request for other devices to use. Note that, because the header value can be specified to any IP address, so it will not be used for authentication. | string | 1.6.0 | -| kyuubi.frontend.rest.bind.host | <undefined> | Hostname or IP of the machine on which to run the REST frontend service. | string | 1.4.0 | -| kyuubi.frontend.rest.bind.port | 10099 | Port of the machine on which to run the REST frontend service. | int | 1.4.0 | -| kyuubi.frontend.rest.max.worker.threads | 999 | Maximum number of threads in the frontend worker thread pool for the rest frontend service | int | 1.6.2 | -| kyuubi.frontend.ssl.keystore.algorithm | <undefined> | SSL certificate keystore algorithm. | string | 1.7.0 | -| kyuubi.frontend.ssl.keystore.password | <undefined> | SSL certificate keystore password. | string | 1.7.0 | -| kyuubi.frontend.ssl.keystore.path | <undefined> | SSL certificate keystore location. | string | 1.7.0 | -| kyuubi.frontend.ssl.keystore.type | <undefined> | SSL certificate keystore type. | string | 1.7.0 | -| kyuubi.frontend.thrift.backoff.slot.length | PT0.1S | Time to back off during login to the thrift frontend service. | duration | 1.4.0 | -| kyuubi.frontend.thrift.binary.bind.host | <undefined> | Hostname or IP of the machine on which to run the thrift frontend service via the binary protocol. | string | 1.4.0 | -| kyuubi.frontend.thrift.binary.bind.port | 10009 | Port of the machine on which to run the thrift frontend service via the binary protocol. | int | 1.4.0 | -| kyuubi.frontend.thrift.binary.ssl.disallowed.protocols | SSLv2,SSLv3 | SSL versions to disallow for Kyuubi thrift binary frontend. | seq | 1.7.0 | -| kyuubi.frontend.thrift.binary.ssl.enabled | false | Set this to true for using SSL encryption in thrift binary frontend server. | boolean | 1.7.0 | -| kyuubi.frontend.thrift.binary.ssl.include.ciphersuites || A comma-separated list of include SSL cipher suite names for thrift binary frontend. | seq | 1.7.0 | -| kyuubi.frontend.thrift.http.allow.user.substitution | true | Allow alternate user to be specified as part of open connection request when using HTTP transport mode. | boolean | 1.6.0 | -| kyuubi.frontend.thrift.http.bind.host | <undefined> | Hostname or IP of the machine on which to run the thrift frontend service via http protocol. | string | 1.6.0 | -| kyuubi.frontend.thrift.http.bind.port | 10010 | Port of the machine on which to run the thrift frontend service via http protocol. | int | 1.6.0 | -| kyuubi.frontend.thrift.http.compression.enabled | true | Enable thrift http compression via Jetty compression support | boolean | 1.6.0 | -| kyuubi.frontend.thrift.http.cookie.auth.enabled | true | When true, Kyuubi in HTTP transport mode, will use cookie-based authentication mechanism | boolean | 1.6.0 | -| kyuubi.frontend.thrift.http.cookie.domain | <undefined> | Domain for the Kyuubi generated cookies | string | 1.6.0 | -| kyuubi.frontend.thrift.http.cookie.is.httponly | true | HttpOnly attribute of the Kyuubi generated cookie. | boolean | 1.6.0 | -| kyuubi.frontend.thrift.http.cookie.max.age | 86400 | Maximum age in seconds for server side cookie used by Kyuubi in HTTP mode. | int | 1.6.0 | -| kyuubi.frontend.thrift.http.cookie.path | <undefined> | Path for the Kyuubi generated cookies | string | 1.6.0 | -| kyuubi.frontend.thrift.http.max.idle.time | PT30M | Maximum idle time for a connection on the server when in HTTP mode. | duration | 1.6.0 | -| kyuubi.frontend.thrift.http.path | cliservice | Path component of URL endpoint when in HTTP mode. | string | 1.6.0 | -| kyuubi.frontend.thrift.http.request.header.size | 6144 | Request header size in bytes, when using HTTP transport mode. Jetty defaults used. | int | 1.6.0 | -| kyuubi.frontend.thrift.http.response.header.size | 6144 | Response header size in bytes, when using HTTP transport mode. Jetty defaults used. | int | 1.6.0 | -| kyuubi.frontend.thrift.http.ssl.exclude.ciphersuites || A comma-separated list of exclude SSL cipher suite names for thrift http frontend. | seq | 1.7.0 | -| kyuubi.frontend.thrift.http.ssl.keystore.password | <undefined> | SSL certificate keystore password. | string | 1.6.0 | -| kyuubi.frontend.thrift.http.ssl.keystore.path | <undefined> | SSL certificate keystore location. | string | 1.6.0 | -| kyuubi.frontend.thrift.http.ssl.protocol.blacklist | SSLv2,SSLv3 | SSL Versions to disable when using HTTP transport mode. | seq | 1.6.0 | -| kyuubi.frontend.thrift.http.use.SSL | false | Set this to true for using SSL encryption in http mode. | boolean | 1.6.0 | -| kyuubi.frontend.thrift.http.xsrf.filter.enabled | false | If enabled, Kyuubi will block any requests made to it over HTTP if an X-XSRF-HEADER header is not present | boolean | 1.6.0 | -| kyuubi.frontend.thrift.login.timeout | PT20S | Timeout for Thrift clients during login to the thrift frontend service. | duration | 1.4.0 | -| kyuubi.frontend.thrift.max.message.size | 104857600 | Maximum message size in bytes a Kyuubi server will accept. | int | 1.4.0 | -| kyuubi.frontend.thrift.max.worker.threads | 999 | Maximum number of threads in the frontend worker thread pool for the thrift frontend service | int | 1.4.0 | -| kyuubi.frontend.thrift.min.worker.threads | 9 | Minimum number of threads in the frontend worker thread pool for the thrift frontend service | int | 1.4.0 | -| kyuubi.frontend.thrift.worker.keepalive.time | PT1M | Keep-alive time (in milliseconds) for an idle worker thread | duration | 1.4.0 | -| kyuubi.frontend.trino.bind.host | <undefined> | Hostname or IP of the machine on which to run the TRINO frontend service. | string | 1.7.0 | -| kyuubi.frontend.trino.bind.port | 10999 | Port of the machine on which to run the TRINO frontend service. | int | 1.7.0 | -| kyuubi.frontend.trino.max.worker.threads | 999 | Maximum number of threads in the frontend worker thread pool for the Trino frontend service | int | 1.7.0 | -| kyuubi.frontend.worker.keepalive.time | PT1M | (deprecated) Keep-alive time (in milliseconds) for an idle worker thread | duration | 1.0.0 | +| Key | Default | Meaning | Type | Since | +|--------------------------------------------------------|--------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| +| kyuubi.frontend.advertised.host | <undefined> | Hostname or IP of the Kyuubi server's frontend services to publish to external systems such as the service discovery ensemble and metadata store. Use it when you want to advertise a different hostname or IP than the bind host. | string | 1.8.0 | +| kyuubi.frontend.backoff.slot.length | PT0.1S | (deprecated) Time to back off during login to the thrift frontend service. | duration | 1.0.0 | +| kyuubi.frontend.bind.host | <undefined> | Hostname or IP of the machine on which to run the frontend services. | string | 1.0.0 | +| kyuubi.frontend.bind.port | 10009 | (deprecated) Port of the machine on which to run the thrift frontend service via the binary protocol. | int | 1.0.0 | +| kyuubi.frontend.connection.url.use.hostname | true | When true, frontend services prefer hostname, otherwise, ip address. Note that, the default value is set to `false` when engine running on Kubernetes to prevent potential network issues. | boolean | 1.5.0 | +| kyuubi.frontend.login.timeout | PT20S | (deprecated) Timeout for Thrift clients during login to the thrift frontend service. | duration | 1.0.0 | +| kyuubi.frontend.max.message.size | 104857600 | (deprecated) Maximum message size in bytes a Kyuubi server will accept. | int | 1.0.0 | +| kyuubi.frontend.max.worker.threads | 999 | (deprecated) Maximum number of threads in the frontend worker thread pool for the thrift frontend service | int | 1.0.0 | +| kyuubi.frontend.min.worker.threads | 9 | (deprecated) Minimum number of threads in the frontend worker thread pool for the thrift frontend service | int | 1.0.0 | +| kyuubi.frontend.mysql.bind.host | <undefined> | Hostname or IP of the machine on which to run the MySQL frontend service. | string | 1.4.0 | +| kyuubi.frontend.mysql.bind.port | 3309 | Port of the machine on which to run the MySQL frontend service. | int | 1.4.0 | +| kyuubi.frontend.mysql.max.worker.threads | 999 | Maximum number of threads in the command execution thread pool for the MySQL frontend service | int | 1.4.0 | +| kyuubi.frontend.mysql.min.worker.threads | 9 | Minimum number of threads in the command execution thread pool for the MySQL frontend service | int | 1.4.0 | +| kyuubi.frontend.mysql.netty.worker.threads | <undefined> | Number of thread in the netty worker event loop of MySQL frontend service. Use min(cpu_cores, 8) in default. | int | 1.4.0 | +| kyuubi.frontend.mysql.worker.keepalive.time | PT1M | Time(ms) that an idle async thread of the command execution thread pool will wait for a new task to arrive before terminating in MySQL frontend service | duration | 1.4.0 | +| kyuubi.frontend.protocols | THRIFT_BINARY,REST | A comma-separated list for all frontend protocols
  • THRIFT_BINARY - HiveServer2 compatible thrift binary protocol.
  • THRIFT_HTTP - HiveServer2 compatible thrift http protocol.
  • REST - Kyuubi defined REST API(experimental).
  • MYSQL - MySQL compatible text protocol(experimental).
  • TRINO - Trino compatible http protocol(experimental).
| seq | 1.4.0 | +| kyuubi.frontend.proxy.http.client.ip.header | X-Real-IP | The HTTP header to record the real client IP address. If your server is behind a load balancer or other proxy, the server will see this load balancer or proxy IP address as the client IP address, to get around this common issue, most load balancers or proxies offer the ability to record the real remote IP address in an HTTP header that will be added to the request for other devices to use. Note that, because the header value can be specified to any IP address, so it will not be used for authentication. | string | 1.6.0 | +| kyuubi.frontend.rest.bind.host | <undefined> | Hostname or IP of the machine on which to run the REST frontend service. | string | 1.4.0 | +| kyuubi.frontend.rest.bind.port | 10099 | Port of the machine on which to run the REST frontend service. | int | 1.4.0 | +| kyuubi.frontend.rest.max.worker.threads | 999 | Maximum number of threads in the frontend worker thread pool for the rest frontend service | int | 1.6.2 | +| kyuubi.frontend.ssl.keystore.algorithm | <undefined> | SSL certificate keystore algorithm. | string | 1.7.0 | +| kyuubi.frontend.ssl.keystore.password | <undefined> | SSL certificate keystore password. | string | 1.7.0 | +| kyuubi.frontend.ssl.keystore.path | <undefined> | SSL certificate keystore location. | string | 1.7.0 | +| kyuubi.frontend.ssl.keystore.type | <undefined> | SSL certificate keystore type. | string | 1.7.0 | +| kyuubi.frontend.thrift.backoff.slot.length | PT0.1S | Time to back off during login to the thrift frontend service. | duration | 1.4.0 | +| kyuubi.frontend.thrift.binary.bind.host | <undefined> | Hostname or IP of the machine on which to run the thrift frontend service via the binary protocol. | string | 1.4.0 | +| kyuubi.frontend.thrift.binary.bind.port | 10009 | Port of the machine on which to run the thrift frontend service via the binary protocol. | int | 1.4.0 | +| kyuubi.frontend.thrift.binary.ssl.disallowed.protocols | SSLv2,SSLv3 | SSL versions to disallow for Kyuubi thrift binary frontend. | set | 1.7.0 | +| kyuubi.frontend.thrift.binary.ssl.enabled | false | Set this to true for using SSL encryption in thrift binary frontend server. | boolean | 1.7.0 | +| kyuubi.frontend.thrift.binary.ssl.include.ciphersuites || A comma-separated list of include SSL cipher suite names for thrift binary frontend. | seq | 1.7.0 | +| kyuubi.frontend.thrift.http.allow.user.substitution | true | Allow alternate user to be specified as part of open connection request when using HTTP transport mode. | boolean | 1.6.0 | +| kyuubi.frontend.thrift.http.bind.host | <undefined> | Hostname or IP of the machine on which to run the thrift frontend service via http protocol. | string | 1.6.0 | +| kyuubi.frontend.thrift.http.bind.port | 10010 | Port of the machine on which to run the thrift frontend service via http protocol. | int | 1.6.0 | +| kyuubi.frontend.thrift.http.compression.enabled | true | Enable thrift http compression via Jetty compression support | boolean | 1.6.0 | +| kyuubi.frontend.thrift.http.cookie.auth.enabled | true | When true, Kyuubi in HTTP transport mode, will use cookie-based authentication mechanism | boolean | 1.6.0 | +| kyuubi.frontend.thrift.http.cookie.domain | <undefined> | Domain for the Kyuubi generated cookies | string | 1.6.0 | +| kyuubi.frontend.thrift.http.cookie.is.httponly | true | HttpOnly attribute of the Kyuubi generated cookie. | boolean | 1.6.0 | +| kyuubi.frontend.thrift.http.cookie.max.age | 86400 | Maximum age in seconds for server side cookie used by Kyuubi in HTTP mode. | int | 1.6.0 | +| kyuubi.frontend.thrift.http.cookie.path | <undefined> | Path for the Kyuubi generated cookies | string | 1.6.0 | +| kyuubi.frontend.thrift.http.max.idle.time | PT30M | Maximum idle time for a connection on the server when in HTTP mode. | duration | 1.6.0 | +| kyuubi.frontend.thrift.http.path | cliservice | Path component of URL endpoint when in HTTP mode. | string | 1.6.0 | +| kyuubi.frontend.thrift.http.request.header.size | 6144 | Request header size in bytes, when using HTTP transport mode. Jetty defaults used. | int | 1.6.0 | +| kyuubi.frontend.thrift.http.response.header.size | 6144 | Response header size in bytes, when using HTTP transport mode. Jetty defaults used. | int | 1.6.0 | +| kyuubi.frontend.thrift.http.ssl.exclude.ciphersuites || A comma-separated list of exclude SSL cipher suite names for thrift http frontend. | seq | 1.7.0 | +| kyuubi.frontend.thrift.http.ssl.keystore.password | <undefined> | SSL certificate keystore password. | string | 1.6.0 | +| kyuubi.frontend.thrift.http.ssl.keystore.path | <undefined> | SSL certificate keystore location. | string | 1.6.0 | +| kyuubi.frontend.thrift.http.ssl.protocol.blacklist | SSLv2,SSLv3 | SSL Versions to disable when using HTTP transport mode. | seq | 1.6.0 | +| kyuubi.frontend.thrift.http.use.SSL | false | Set this to true for using SSL encryption in http mode. | boolean | 1.6.0 | +| kyuubi.frontend.thrift.http.xsrf.filter.enabled | false | If enabled, Kyuubi will block any requests made to it over HTTP if an X-XSRF-HEADER header is not present | boolean | 1.6.0 | +| kyuubi.frontend.thrift.login.timeout | PT20S | Timeout for Thrift clients during login to the thrift frontend service. | duration | 1.4.0 | +| kyuubi.frontend.thrift.max.message.size | 104857600 | Maximum message size in bytes a Kyuubi server will accept. | int | 1.4.0 | +| kyuubi.frontend.thrift.max.worker.threads | 999 | Maximum number of threads in the frontend worker thread pool for the thrift frontend service | int | 1.4.0 | +| kyuubi.frontend.thrift.min.worker.threads | 9 | Minimum number of threads in the frontend worker thread pool for the thrift frontend service | int | 1.4.0 | +| kyuubi.frontend.thrift.worker.keepalive.time | PT1M | Keep-alive time (in milliseconds) for an idle worker thread | duration | 1.4.0 | +| kyuubi.frontend.trino.bind.host | <undefined> | Hostname or IP of the machine on which to run the TRINO frontend service. | string | 1.7.0 | +| kyuubi.frontend.trino.bind.port | 10999 | Port of the machine on which to run the TRINO frontend service. | int | 1.7.0 | +| kyuubi.frontend.trino.max.worker.threads | 999 | Maximum number of threads in the frontend worker thread pool for the Trino frontend service | int | 1.7.0 | +| kyuubi.frontend.worker.keepalive.time | PT1M | (deprecated) Keep-alive time (in milliseconds) for an idle worker thread | duration | 1.0.0 | ### Ha -| Key | Default | Meaning | Type | Since | -|------------------------------------------------|----------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| -| kyuubi.ha.addresses || The connection string for the discovery ensemble | string | 1.6.0 | -| kyuubi.ha.client.class | org.apache.kyuubi.ha.client.zookeeper.ZookeeperDiscoveryClient | Class name for service discovery client.
  • Zookeeper: org.apache.kyuubi.ha.client.zookeeper.ZookeeperDiscoveryClient
  • Etcd: org.apache.kyuubi.ha.client.etcd.EtcdDiscoveryClient
| string | 1.6.0 | -| kyuubi.ha.etcd.lease.timeout | PT10S | Timeout for etcd keep alive lease. The kyuubi server will know the unexpected loss of engine after up to this seconds. | duration | 1.6.0 | -| kyuubi.ha.etcd.ssl.ca.path | <undefined> | Where the etcd CA certificate file is stored. | string | 1.6.0 | -| kyuubi.ha.etcd.ssl.client.certificate.path | <undefined> | Where the etcd SSL certificate file is stored. | string | 1.6.0 | -| kyuubi.ha.etcd.ssl.client.key.path | <undefined> | Where the etcd SSL key file is stored. | string | 1.6.0 | -| kyuubi.ha.etcd.ssl.enabled | false | When set to true, will build an SSL secured etcd client. | boolean | 1.6.0 | -| kyuubi.ha.namespace | kyuubi | The root directory for the service to deploy its instance uri | string | 1.6.0 | -| kyuubi.ha.zookeeper.acl.enabled | false | Set to true if the ZooKeeper ensemble is kerberized | boolean | 1.0.0 | -| kyuubi.ha.zookeeper.auth.digest | <undefined> | The digest auth string is used for ZooKeeper authentication, like: username:password. | string | 1.3.2 | -| kyuubi.ha.zookeeper.auth.keytab | <undefined> | Location of the Kyuubi server's keytab is used for ZooKeeper authentication. | string | 1.3.2 | -| kyuubi.ha.zookeeper.auth.principal | <undefined> | Name of the Kerberos principal is used for ZooKeeper authentication. | string | 1.3.2 | -| kyuubi.ha.zookeeper.auth.type | NONE | The type of ZooKeeper authentication, all candidates are
  • NONE
  • KERBEROS
  • DIGEST
| string | 1.3.2 | -| kyuubi.ha.zookeeper.connection.base.retry.wait | 1000 | Initial amount of time to wait between retries to the ZooKeeper ensemble | int | 1.0.0 | -| kyuubi.ha.zookeeper.connection.max.retries | 3 | Max retry times for connecting to the ZooKeeper ensemble | int | 1.0.0 | -| kyuubi.ha.zookeeper.connection.max.retry.wait | 30000 | Max amount of time to wait between retries for BOUNDED_EXPONENTIAL_BACKOFF policy can reach, or max time until elapsed for UNTIL_ELAPSED policy to connect the zookeeper ensemble | int | 1.0.0 | -| kyuubi.ha.zookeeper.connection.retry.policy | EXPONENTIAL_BACKOFF | The retry policy for connecting to the ZooKeeper ensemble, all candidates are:
  • ONE_TIME
  • N_TIME
  • EXPONENTIAL_BACKOFF
  • BOUNDED_EXPONENTIAL_BACKOFF
  • UNTIL_ELAPSED
| string | 1.0.0 | -| kyuubi.ha.zookeeper.connection.timeout | 15000 | The timeout(ms) of creating the connection to the ZooKeeper ensemble | int | 1.0.0 | -| kyuubi.ha.zookeeper.engine.auth.type | NONE | The type of ZooKeeper authentication for the engine, all candidates are
  • NONE
  • KERBEROS
  • DIGEST
| string | 1.3.2 | -| kyuubi.ha.zookeeper.namespace | kyuubi | (deprecated) The root directory for the service to deploy its instance uri | string | 1.0.0 | -| kyuubi.ha.zookeeper.node.creation.timeout | PT2M | Timeout for creating ZooKeeper node | duration | 1.2.0 | -| kyuubi.ha.zookeeper.publish.configs | false | When set to true, publish Kerberos configs to Zookeeper. Note that the Hive driver needs to be greater than 1.3 or 2.0 or apply HIVE-11581 patch. | boolean | 1.4.0 | -| kyuubi.ha.zookeeper.quorum || (deprecated) The connection string for the ZooKeeper ensemble | string | 1.0.0 | -| kyuubi.ha.zookeeper.session.timeout | 60000 | The timeout(ms) of a connected session to be idled | int | 1.0.0 | +| Key | Default | Meaning | Type | Since | +|------------------------------------------------|----------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| +| kyuubi.ha.addresses || The connection string for the discovery ensemble | string | 1.6.0 | +| kyuubi.ha.client.class | org.apache.kyuubi.ha.client.zookeeper.ZookeeperDiscoveryClient | Class name for service discovery client.
  • Zookeeper: org.apache.kyuubi.ha.client.zookeeper.ZookeeperDiscoveryClient
  • Etcd: org.apache.kyuubi.ha.client.etcd.EtcdDiscoveryClient
| string | 1.6.0 | +| kyuubi.ha.etcd.lease.timeout | PT10S | Timeout for etcd keep alive lease. The kyuubi server will know the unexpected loss of engine after up to this seconds. | duration | 1.6.0 | +| kyuubi.ha.etcd.ssl.ca.path | <undefined> | Where the etcd CA certificate file is stored. | string | 1.6.0 | +| kyuubi.ha.etcd.ssl.client.certificate.path | <undefined> | Where the etcd SSL certificate file is stored. | string | 1.6.0 | +| kyuubi.ha.etcd.ssl.client.key.path | <undefined> | Where the etcd SSL key file is stored. | string | 1.6.0 | +| kyuubi.ha.etcd.ssl.enabled | false | When set to true, will build an SSL secured etcd client. | boolean | 1.6.0 | +| kyuubi.ha.namespace | kyuubi | The root directory for the service to deploy its instance uri | string | 1.6.0 | +| kyuubi.ha.zookeeper.acl.enabled | false | Set to true if the ZooKeeper ensemble is kerberized | boolean | 1.0.0 | +| kyuubi.ha.zookeeper.auth.digest | <undefined> | The digest auth string is used for ZooKeeper authentication, like: username:password. | string | 1.3.2 | +| kyuubi.ha.zookeeper.auth.keytab | <undefined> | Location of the Kyuubi server's keytab that is used for ZooKeeper authentication. | string | 1.3.2 | +| kyuubi.ha.zookeeper.auth.principal | <undefined> | Kerberos principal name that is used for ZooKeeper authentication. | string | 1.3.2 | +| kyuubi.ha.zookeeper.auth.serverPrincipal | <undefined> | Kerberos principal name of ZooKeeper Server. It only takes effect when Zookeeper client's version at least 3.5.7 or 3.6.0 or applies ZOOKEEPER-1467. To use Zookeeper 3.6 client, compile Kyuubi with `-Pzookeeper-3.6`. | string | 1.8.0 | +| kyuubi.ha.zookeeper.auth.type | NONE | The type of ZooKeeper authentication, all candidates are
  • NONE
  • KERBEROS
  • DIGEST
| string | 1.3.2 | +| kyuubi.ha.zookeeper.connection.base.retry.wait | 1000 | Initial amount of time to wait between retries to the ZooKeeper ensemble | int | 1.0.0 | +| kyuubi.ha.zookeeper.connection.max.retries | 3 | Max retry times for connecting to the ZooKeeper ensemble | int | 1.0.0 | +| kyuubi.ha.zookeeper.connection.max.retry.wait | 30000 | Max amount of time to wait between retries for BOUNDED_EXPONENTIAL_BACKOFF policy can reach, or max time until elapsed for UNTIL_ELAPSED policy to connect the zookeeper ensemble | int | 1.0.0 | +| kyuubi.ha.zookeeper.connection.retry.policy | EXPONENTIAL_BACKOFF | The retry policy for connecting to the ZooKeeper ensemble, all candidates are:
  • ONE_TIME
  • N_TIME
  • EXPONENTIAL_BACKOFF
  • BOUNDED_EXPONENTIAL_BACKOFF
  • UNTIL_ELAPSED
| string | 1.0.0 | +| kyuubi.ha.zookeeper.connection.timeout | 15000 | The timeout(ms) of creating the connection to the ZooKeeper ensemble | int | 1.0.0 | +| kyuubi.ha.zookeeper.engine.auth.type | NONE | The type of ZooKeeper authentication for the engine, all candidates are
  • NONE
  • KERBEROS
  • DIGEST
| string | 1.3.2 | +| kyuubi.ha.zookeeper.namespace | kyuubi | (deprecated) The root directory for the service to deploy its instance uri | string | 1.0.0 | +| kyuubi.ha.zookeeper.node.creation.timeout | PT2M | Timeout for creating ZooKeeper node | duration | 1.2.0 | +| kyuubi.ha.zookeeper.publish.configs | false | When set to true, publish Kerberos configs to Zookeeper. Note that the Hive driver needs to be greater than 1.3 or 2.0 or apply HIVE-11581 patch. | boolean | 1.4.0 | +| kyuubi.ha.zookeeper.quorum || (deprecated) The connection string for the ZooKeeper ensemble | string | 1.0.0 | +| kyuubi.ha.zookeeper.session.timeout | 60000 | The timeout(ms) of a connected session to be idled | int | 1.0.0 | ### Kinit @@ -282,37 +309,46 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co ### Kubernetes -| Key | Default | Meaning | Type | Since | -|-----------------------------------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|-------| -| kyuubi.kubernetes.authenticate.caCertFile | <undefined> | Path to the CA cert file for connecting to the Kubernetes API server over TLS from the kyuubi. Specify this as a path as opposed to a URI (i.e. do not provide a scheme) | string | 1.7.0 | -| kyuubi.kubernetes.authenticate.clientCertFile | <undefined> | Path to the client cert file for connecting to the Kubernetes API server over TLS from the kyuubi. Specify this as a path as opposed to a URI (i.e. do not provide a scheme) | string | 1.7.0 | -| kyuubi.kubernetes.authenticate.clientKeyFile | <undefined> | Path to the client key file for connecting to the Kubernetes API server over TLS from the kyuubi. Specify this as a path as opposed to a URI (i.e. do not provide a scheme) | string | 1.7.0 | -| kyuubi.kubernetes.authenticate.oauthToken | <undefined> | The OAuth token to use when authenticating against the Kubernetes API server. Note that unlike, the other authentication options, this must be the exact string value of the token to use for the authentication. | string | 1.7.0 | -| kyuubi.kubernetes.authenticate.oauthTokenFile | <undefined> | Path to the file containing the OAuth token to use when authenticating against the Kubernetes API server. Specify this as a path as opposed to a URI (i.e. do not provide a scheme) | string | 1.7.0 | -| kyuubi.kubernetes.context | <undefined> | The desired context from your kubernetes config file used to configure the K8s client for interacting with the cluster. | string | 1.6.0 | -| kyuubi.kubernetes.master.address | <undefined> | The internal Kubernetes master (API server) address to be used for kyuubi. | string | 1.7.0 | -| kyuubi.kubernetes.namespace | default | The namespace that will be used for running the kyuubi pods and find engines. | string | 1.7.0 | -| kyuubi.kubernetes.trust.certificates | false | If set to true then client can submit to kubernetes cluster only with token | boolean | 1.7.0 | +| Key | Default | Meaning | Type | Since | +|-----------------------------------------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| +| kyuubi.kubernetes.authenticate.caCertFile | <undefined> | Path to the CA cert file for connecting to the Kubernetes API server over TLS from the kyuubi. Specify this as a path as opposed to a URI (i.e. do not provide a scheme) | string | 1.7.0 | +| kyuubi.kubernetes.authenticate.clientCertFile | <undefined> | Path to the client cert file for connecting to the Kubernetes API server over TLS from the kyuubi. Specify this as a path as opposed to a URI (i.e. do not provide a scheme) | string | 1.7.0 | +| kyuubi.kubernetes.authenticate.clientKeyFile | <undefined> | Path to the client key file for connecting to the Kubernetes API server over TLS from the kyuubi. Specify this as a path as opposed to a URI (i.e. do not provide a scheme) | string | 1.7.0 | +| kyuubi.kubernetes.authenticate.oauthToken | <undefined> | The OAuth token to use when authenticating against the Kubernetes API server. Note that unlike, the other authentication options, this must be the exact string value of the token to use for the authentication. | string | 1.7.0 | +| kyuubi.kubernetes.authenticate.oauthTokenFile | <undefined> | Path to the file containing the OAuth token to use when authenticating against the Kubernetes API server. Specify this as a path as opposed to a URI (i.e. do not provide a scheme) | string | 1.7.0 | +| kyuubi.kubernetes.context | <undefined> | The desired context from your kubernetes config file used to configure the K8s client for interacting with the cluster. | string | 1.6.0 | +| kyuubi.kubernetes.context.allow.list || The allowed kubernetes context list, if it is empty, there is no kubernetes context limitation. | set | 1.8.0 | +| kyuubi.kubernetes.master.address | <undefined> | The internal Kubernetes master (API server) address to be used for kyuubi. | string | 1.7.0 | +| kyuubi.kubernetes.namespace | default | The namespace that will be used for running the kyuubi pods and find engines. | string | 1.7.0 | +| kyuubi.kubernetes.namespace.allow.list || The allowed kubernetes namespace list, if it is empty, there is no kubernetes namespace limitation. | set | 1.8.0 | +| kyuubi.kubernetes.terminatedApplicationRetainPeriod | PT5M | The period for which the Kyuubi server retains application information after the application terminates. | duration | 1.7.1 | +| kyuubi.kubernetes.trust.certificates | false | If set to true then client can submit to kubernetes cluster only with token | boolean | 1.7.0 | + +### Lineage + +| Key | Default | Meaning | Type | Since | +|---------------------------------------|--------------------------------------------------------|---------------------------------------------------|--------|-------| +| kyuubi.lineage.parser.plugin.provider | org.apache.kyuubi.plugin.lineage.LineageParserProvider | The provider for the Spark lineage parser plugin. | string | 1.8.0 | ### Metadata -| Key | Default | Meaning | Type | Since | -|-------------------------------------------------|----------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| -| kyuubi.metadata.cleaner.enabled | true | Whether to clean the metadata periodically. If it is enabled, Kyuubi will clean the metadata that is in the terminate state with max age limitation. | boolean | 1.6.0 | -| kyuubi.metadata.cleaner.interval | PT30M | The interval to check and clean expired metadata. | duration | 1.6.0 | -| kyuubi.metadata.max.age | PT72H | The maximum age of metadata, the metadata exceeding the age will be cleaned. | duration | 1.6.0 | -| kyuubi.metadata.recovery.threads | 10 | The number of threads for recovery from the metadata store when the Kyuubi server restarts. | int | 1.6.0 | -| kyuubi.metadata.request.async.retry.enabled | true | Whether to retry in async when metadata request failed. When true, return success response immediately even the metadata request failed, and schedule it in background until success, to tolerate long-time metadata store outages w/o blocking the submission request. | boolean | 1.7.0 | -| kyuubi.metadata.request.async.retry.queue.size | 65536 | The maximum queue size for buffering metadata requests in memory when the external metadata storage is down. Requests will be dropped if the queue exceeds. Only take affect when kyuubi.metadata.request.async.retry.enabled is `true`. | int | 1.6.0 | -| kyuubi.metadata.request.async.retry.threads | 10 | Number of threads in the metadata request async retry manager thread pool. Only take affect when kyuubi.metadata.request.async.retry.enabled is `true`. | int | 1.6.0 | -| kyuubi.metadata.request.retry.interval | PT5S | The interval to check and trigger the metadata request retry tasks. | duration | 1.6.0 | -| kyuubi.metadata.store.class | org.apache.kyuubi.server.metadata.jdbc.JDBCMetadataStore | Fully qualified class name for server metadata store. | string | 1.6.0 | -| kyuubi.metadata.store.jdbc.database.schema.init | true | Whether to init the JDBC metadata store database schema. | boolean | 1.6.0 | -| kyuubi.metadata.store.jdbc.database.type | DERBY | The database type for server jdbc metadata store.
  • DERBY: Apache Derby, JDBC driver `org.apache.derby.jdbc.AutoloadedDriver`.
  • MYSQL: MySQL, JDBC driver `com.mysql.jdbc.Driver`.
  • CUSTOM: User-defined database type, need to specify corresponding JDBC driver.
  • Note that: The JDBC datasource is powered by HiKariCP, for datasource properties, please specify them with the prefix: kyuubi.metadata.store.jdbc.datasource. For example, kyuubi.metadata.store.jdbc.datasource.connectionTimeout=10000. | string | 1.6.0 | -| kyuubi.metadata.store.jdbc.driver | <undefined> | JDBC driver class name for server jdbc metadata store. | string | 1.6.0 | -| kyuubi.metadata.store.jdbc.password || The password for server JDBC metadata store. | string | 1.6.0 | -| kyuubi.metadata.store.jdbc.url | jdbc:derby:memory:kyuubi_state_store_db;create=true | The JDBC url for server JDBC metadata store. By default, it is a DERBY in-memory database url, and the state information is not shared across kyuubi instances. To enable high availability for multiple kyuubi instances, please specify a production JDBC url. | string | 1.6.0 | -| kyuubi.metadata.store.jdbc.user || The username for server JDBC metadata store. | string | 1.6.0 | +| Key | Default | Meaning | Type | Since | +|-------------------------------------------------|----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| +| kyuubi.metadata.cleaner.enabled | true | Whether to clean the metadata periodically. If it is enabled, Kyuubi will clean the metadata that is in the terminate state with max age limitation. | boolean | 1.6.0 | +| kyuubi.metadata.cleaner.interval | PT30M | The interval to check and clean expired metadata. | duration | 1.6.0 | +| kyuubi.metadata.max.age | PT72H | The maximum age of metadata, the metadata exceeding the age will be cleaned. | duration | 1.6.0 | +| kyuubi.metadata.recovery.threads | 10 | The number of threads for recovery from the metadata store when the Kyuubi server restarts. | int | 1.6.0 | +| kyuubi.metadata.request.async.retry.enabled | true | Whether to retry in async when metadata request failed. When true, return success response immediately even the metadata request failed, and schedule it in background until success, to tolerate long-time metadata store outages w/o blocking the submission request. | boolean | 1.7.0 | +| kyuubi.metadata.request.async.retry.queue.size | 65536 | The maximum queue size for buffering metadata requests in memory when the external metadata storage is down. Requests will be dropped if the queue exceeds. Only take affect when kyuubi.metadata.request.async.retry.enabled is `true`. | int | 1.6.0 | +| kyuubi.metadata.request.async.retry.threads | 10 | Number of threads in the metadata request async retry manager thread pool. Only take affect when kyuubi.metadata.request.async.retry.enabled is `true`. | int | 1.6.0 | +| kyuubi.metadata.request.retry.interval | PT5S | The interval to check and trigger the metadata request retry tasks. | duration | 1.6.0 | +| kyuubi.metadata.store.class | org.apache.kyuubi.server.metadata.jdbc.JDBCMetadataStore | Fully qualified class name for server metadata store. | string | 1.6.0 | +| kyuubi.metadata.store.jdbc.database.schema.init | true | Whether to init the JDBC metadata store database schema. | boolean | 1.6.0 | +| kyuubi.metadata.store.jdbc.database.type | SQLITE | The database type for server jdbc metadata store.
    • (Deprecated) DERBY: Apache Derby, JDBC driver `org.apache.derby.jdbc.AutoloadedDriver`.
    • SQLITE: SQLite3, JDBC driver `org.sqlite.JDBC`.
    • MYSQL: MySQL, JDBC driver `com.mysql.jdbc.Driver`.
    • CUSTOM: User-defined database type, need to specify corresponding JDBC driver.
    • Note that: The JDBC datasource is powered by HiKariCP, for datasource properties, please specify them with the prefix: kyuubi.metadata.store.jdbc.datasource. For example, kyuubi.metadata.store.jdbc.datasource.connectionTimeout=10000. | string | 1.6.0 | +| kyuubi.metadata.store.jdbc.driver | <undefined> | JDBC driver class name for server jdbc metadata store. | string | 1.6.0 | +| kyuubi.metadata.store.jdbc.password || The password for server JDBC metadata store. | string | 1.6.0 | +| kyuubi.metadata.store.jdbc.url | jdbc:sqlite:kyuubi_state_store.db | The JDBC url for server JDBC metadata store. By default, it is a SQLite database url, and the state information is not shared across kyuubi instances. To enable high availability for multiple kyuubi instances, please specify a production JDBC url. | string | 1.6.0 | +| kyuubi.metadata.store.jdbc.user || The username for server JDBC metadata store. | string | 1.6.0 | ### Metrics @@ -324,34 +360,35 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co | kyuubi.metrics.json.location | metrics | Where the JSON metrics file located | string | 1.2.0 | | kyuubi.metrics.prometheus.path | /metrics | URI context path of prometheus metrics HTTP server | string | 1.2.0 | | kyuubi.metrics.prometheus.port | 10019 | Prometheus metrics HTTP server port | int | 1.2.0 | -| kyuubi.metrics.reporters | JSON | A comma-separated list for all metrics reporters
      • CONSOLE - ConsoleReporter which outputs measurements to CONSOLE periodically.
      • JMX - JmxReporter which listens for new metrics and exposes them as MBeans.
      • JSON - JsonReporter which outputs measurements to json file periodically.
      • PROMETHEUS - PrometheusReporter which exposes metrics in Prometheus format.
      • SLF4J - Slf4jReporter which outputs measurements to system log periodically.
      | seq | 1.2.0 | +| kyuubi.metrics.reporters | JSON | A comma-separated list for all metrics reporters
      • CONSOLE - ConsoleReporter which outputs measurements to CONSOLE periodically.
      • JMX - JmxReporter which listens for new metrics and exposes them as MBeans.
      • JSON - JsonReporter which outputs measurements to json file periodically.
      • PROMETHEUS - PrometheusReporter which exposes metrics in Prometheus format.
      • SLF4J - Slf4jReporter which outputs measurements to system log periodically.
      | set | 1.2.0 | | kyuubi.metrics.slf4j.interval | PT5S | How often should report metrics to SLF4J logger | duration | 1.2.0 | ### Operation -| Key | Default | Meaning | Type | Since | -|-------------------------------------------------|---------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| -| kyuubi.operation.idle.timeout | PT3H | Operation will be closed when it's not accessed for this duration of time | duration | 1.0.0 | -| kyuubi.operation.interrupt.on.cancel | true | When true, all running tasks will be interrupted if one cancels a query. When false, all running tasks will remain until finished. | boolean | 1.2.0 | -| kyuubi.operation.language | SQL | Choose a programing language for the following inputs
      • SQL: (Default) Run all following statements as SQL queries.
      • SCALA: Run all following input as scala codes
      • PYTHON: (Experimental) Run all following input as Python codes with Spark engine
      | string | 1.5.0 | -| kyuubi.operation.log.dir.root | server_operation_logs | Root directory for query operation log at server-side. | string | 1.4.0 | -| kyuubi.operation.plan.only.excludes | ResetCommand,SetCommand,SetNamespaceCommand,UseStatement,SetCatalogAndNamespace | Comma-separated list of query plan names, in the form of simple class names, i.e, for `SET abc=xyz`, the value will be `SetCommand`. For those auxiliary plans, such as `switch databases`, `set properties`, or `create temporary view` etc., which are used for setup evaluating environments for analyzing actual queries, we can use this config to exclude them and let them take effect. See also kyuubi.operation.plan.only.mode. | seq | 1.5.0 | -| kyuubi.operation.plan.only.mode | none | Configures the statement performed mode, The value can be 'parse', 'analyze', 'optimize', 'optimize_with_stats', 'physical', 'execution', or 'none', when it is 'none', indicate to the statement will be fully executed, otherwise only way without executing the query. different engines currently support different modes, the Spark engine supports all modes, and the Flink engine supports 'parse', 'physical', and 'execution', other engines do not support planOnly currently. | string | 1.4.0 | -| kyuubi.operation.plan.only.output.style | plain | Configures the planOnly output style. The value can be 'plain' or 'json', and the default value is 'plain'. This configuration supports only the output styles of the Spark engine | string | 1.7.0 | -| kyuubi.operation.progress.enabled | false | Whether to enable the operation progress. When true, the operation progress will be returned in `GetOperationStatus`. | boolean | 1.6.0 | -| kyuubi.operation.query.timeout | <undefined> | Timeout for query executions at server-side, take effect with client-side timeout(`java.sql.Statement.setQueryTimeout`) together, a running query will be cancelled automatically if timeout. It's off by default, which means only client-side take full control of whether the query should timeout or not. If set, client-side timeout is capped at this point. To cancel the queries right away without waiting for task to finish, consider enabling kyuubi.operation.interrupt.on.cancel together. | duration | 1.2.0 | -| kyuubi.operation.result.arrow.timestampAsString | false | When true, arrow-based rowsets will convert columns of type timestamp to strings for transmission. | boolean | 1.7.0 | -| kyuubi.operation.result.format | thrift | Specify the result format, available configs are:
      • THRIFT: the result will convert to TRow at the engine driver side.
      • ARROW: the result will be encoded as Arrow at the executor side before collecting by the driver, and deserialized at the client side. note that it only takes effect for kyuubi-hive-jdbc clients now.
      | string | 1.7.0 | -| kyuubi.operation.result.max.rows | 0 | Max rows of Spark query results. Rows exceeding the limit would be ignored. By setting this value to 0 to disable the max rows limit. | int | 1.6.0 | -| kyuubi.operation.scheduler.pool | <undefined> | The scheduler pool of job. Note that, this config should be used after changing Spark config spark.scheduler.mode=FAIR. | string | 1.1.1 | -| kyuubi.operation.spark.listener.enabled | true | When set to true, Spark engine registers an SQLOperationListener before executing the statement, logging a few summary statistics when each stage completes. | boolean | 1.6.0 | -| kyuubi.operation.status.polling.timeout | PT5S | Timeout(ms) for long polling asynchronous running sql query's status | duration | 1.0.0 | +| Key | Default | Meaning | Type | Since | +|--------------------------------------------------|---------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| +| kyuubi.operation.getTables.ignoreTableProperties | false | Speed up the `GetTables` operation by returning table identities only. | boolean | 1.8.0 | +| kyuubi.operation.idle.timeout | PT3H | Operation will be closed when it's not accessed for this duration of time | duration | 1.0.0 | +| kyuubi.operation.interrupt.on.cancel | true | When true, all running tasks will be interrupted if one cancels a query. When false, all running tasks will remain until finished. | boolean | 1.2.0 | +| kyuubi.operation.language | SQL | Choose a programing language for the following inputs
      • SQL: (Default) Run all following statements as SQL queries.
      • SCALA: Run all following input as scala codes
      • PYTHON: (Experimental) Run all following input as Python codes with Spark engine
      | string | 1.5.0 | +| kyuubi.operation.log.dir.root | server_operation_logs | Root directory for query operation log at server-side. | string | 1.4.0 | +| kyuubi.operation.plan.only.excludes | SetCatalogAndNamespace,UseStatement,SetNamespaceCommand,SetCommand,ResetCommand | Comma-separated list of query plan names, in the form of simple class names, i.e, for `SET abc=xyz`, the value will be `SetCommand`. For those auxiliary plans, such as `switch databases`, `set properties`, or `create temporary view` etc., which are used for setup evaluating environments for analyzing actual queries, we can use this config to exclude them and let them take effect. See also kyuubi.operation.plan.only.mode. | set | 1.5.0 | +| kyuubi.operation.plan.only.mode | none | Configures the statement performed mode, The value can be 'parse', 'analyze', 'optimize', 'optimize_with_stats', 'physical', 'execution', 'lineage' or 'none', when it is 'none', indicate to the statement will be fully executed, otherwise only way without executing the query. different engines currently support different modes, the Spark engine supports all modes, and the Flink engine supports 'parse', 'physical', and 'execution', other engines do not support planOnly currently. | string | 1.4.0 | +| kyuubi.operation.plan.only.output.style | plain | Configures the planOnly output style. The value can be 'plain' or 'json', and the default value is 'plain'. This configuration supports only the output styles of the Spark engine | string | 1.7.0 | +| kyuubi.operation.progress.enabled | false | Whether to enable the operation progress. When true, the operation progress will be returned in `GetOperationStatus`. | boolean | 1.6.0 | +| kyuubi.operation.query.timeout | <undefined> | Timeout for query executions at server-side, take effect with client-side timeout(`java.sql.Statement.setQueryTimeout`) together, a running query will be cancelled automatically if timeout. It's off by default, which means only client-side take full control of whether the query should timeout or not. If set, client-side timeout is capped at this point. To cancel the queries right away without waiting for task to finish, consider enabling kyuubi.operation.interrupt.on.cancel together. | duration | 1.2.0 | +| kyuubi.operation.result.arrow.timestampAsString | false | When true, arrow-based rowsets will convert columns of type timestamp to strings for transmission. | boolean | 1.7.0 | +| kyuubi.operation.result.format | thrift | Specify the result format, available configs are:
      • THRIFT: the result will convert to TRow at the engine driver side.
      • ARROW: the result will be encoded as Arrow at the executor side before collecting by the driver, and deserialized at the client side. note that it only takes effect for kyuubi-hive-jdbc clients now.
      | string | 1.7.0 | +| kyuubi.operation.result.max.rows | 0 | Max rows of Spark query results. Rows exceeding the limit would be ignored. By setting this value to 0 to disable the max rows limit. | int | 1.6.0 | +| kyuubi.operation.scheduler.pool | <undefined> | The scheduler pool of job. Note that, this config should be used after changing Spark config spark.scheduler.mode=FAIR. | string | 1.1.1 | +| kyuubi.operation.spark.listener.enabled | true | When set to true, Spark engine registers an SQLOperationListener before executing the statement, logging a few summary statistics when each stage completes. | boolean | 1.6.0 | +| kyuubi.operation.status.polling.timeout | PT5S | Timeout(ms) for long polling asynchronous running sql query's status | duration | 1.0.0 | ### Server | Key | Default | Meaning | Type | Since | |----------------------------------------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| -| kyuubi.server.administrators || Comma-separated list of Kyuubi service administrators. We use this config to grant admin permission to any service accounts. | seq | 1.8.0 | +| kyuubi.server.administrators || Comma-separated list of Kyuubi service administrators. We use this config to grant admin permission to any service accounts. | set | 1.8.0 | | kyuubi.server.info.provider | ENGINE | The server information provider name, some clients may rely on this information to check the server compatibilities and functionalities.
    • SERVER: Return Kyuubi server information.
    • ENGINE: Return Kyuubi engine information.
    • | string | 1.6.1 | | kyuubi.server.limit.batch.connections.per.ipaddress | <undefined> | Maximum kyuubi server batch connections per ipaddress. Any user exceeding this limit will not be allowed to connect. | int | 1.7.0 | | kyuubi.server.limit.batch.connections.per.user | <undefined> | Maximum kyuubi server batch connections per user. Any user exceeding this limit will not be allowed to connect. | int | 1.7.0 | @@ -360,7 +397,8 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co | kyuubi.server.limit.connections.per.ipaddress | <undefined> | Maximum kyuubi server connections per ipaddress. Any user exceeding this limit will not be allowed to connect. | int | 1.6.0 | | kyuubi.server.limit.connections.per.user | <undefined> | Maximum kyuubi server connections per user. Any user exceeding this limit will not be allowed to connect. | int | 1.6.0 | | kyuubi.server.limit.connections.per.user.ipaddress | <undefined> | Maximum kyuubi server connections per user:ipaddress combination. Any user-ipaddress exceeding this limit will not be allowed to connect. | int | 1.6.0 | -| kyuubi.server.limit.connections.user.unlimited.list || The maximum connections of the user in the white list will not be limited. | seq | 1.7.0 | +| kyuubi.server.limit.connections.user.deny.list || The user in the deny list will be denied to connect to kyuubi server, if the user has configured both user.unlimited.list and user.deny.list, the priority of the latter is higher. | set | 1.8.0 | +| kyuubi.server.limit.connections.user.unlimited.list || The maximum connections of the user in the white list will not be limited. | set | 1.7.0 | | kyuubi.server.name | <undefined> | The name of Kyuubi Server. | string | 1.5.0 | | kyuubi.server.periodicGC.interval | PT30M | How often to trigger a garbage collection. | duration | 1.7.0 | | kyuubi.server.redaction.regex | <undefined> | Regex to decide which Kyuubi contain sensitive information. When this regex matches a property key or value, the value is redacted from the various logs. || 1.6.0 | @@ -370,15 +408,18 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co | Key | Default | Meaning | Type | Since | |------------------------------------------------------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| | kyuubi.session.check.interval | PT5M | The check interval for session timeout. | duration | 1.0.0 | +| kyuubi.session.close.on.disconnect | true | Session will be closed when client disconnects from kyuubi gateway. Set this to false to have session outlive its parent connection. | boolean | 1.8.0 | | kyuubi.session.conf.advisor | <undefined> | A config advisor plugin for Kyuubi Server. This plugin can provide some custom configs for different users or session configs and overwrite the session configs before opening a new session. This config value should be a subclass of `org.apache.kyuubi.plugin.SessionConfAdvisor` which has a zero-arg constructor. | string | 1.5.0 | | kyuubi.session.conf.file.reload.interval | PT10M | When `FileSessionConfAdvisor` is used, this configuration defines the expired time of `$KYUUBI_CONF_DIR/kyuubi-session-.conf` in the cache. After exceeding this value, the file will be reloaded. | duration | 1.7.0 | -| kyuubi.session.conf.ignore.list || A comma-separated list of ignored keys. If the client connection contains any of them, the key and the corresponding value will be removed silently during engine bootstrap and connection setup. Note that this rule is for server-side protection defined via administrators to prevent some essential configs from tampering but will not forbid users to set dynamic configurations via SET syntax. | seq | 1.2.0 | +| kyuubi.session.conf.ignore.list || A comma-separated list of ignored keys. If the client connection contains any of them, the key and the corresponding value will be removed silently during engine bootstrap and connection setup. Note that this rule is for server-side protection defined via administrators to prevent some essential configs from tampering but will not forbid users to set dynamic configurations via SET syntax. | set | 1.2.0 | | kyuubi.session.conf.profile | <undefined> | Specify a profile to load session-level configurations from `$KYUUBI_CONF_DIR/kyuubi-session-.conf`. This configuration will be ignored if the file does not exist. This configuration only takes effect when `kyuubi.session.conf.advisor` is set as `org.apache.kyuubi.session.FileSessionConfAdvisor`. | string | 1.7.0 | -| kyuubi.session.conf.restrict.list || A comma-separated list of restricted keys. If the client connection contains any of them, the connection will be rejected explicitly during engine bootstrap and connection setup. Note that this rule is for server-side protection defined via administrators to prevent some essential configs from tampering but will not forbid users to set dynamic configurations via SET syntax. | seq | 1.2.0 | +| kyuubi.session.conf.restrict.list || A comma-separated list of restricted keys. If the client connection contains any of them, the connection will be rejected explicitly during engine bootstrap and connection setup. Note that this rule is for server-side protection defined via administrators to prevent some essential configs from tampering but will not forbid users to set dynamic configurations via SET syntax. | set | 1.2.0 | +| kyuubi.session.engine.alive.max.failures | 3 | The maximum number of failures allowed for the engine. | int | 1.8.0 | | kyuubi.session.engine.alive.probe.enabled | false | Whether to enable the engine alive probe, it true, we will create a companion thrift client that keeps sending simple requests to check whether the engine is alive. | boolean | 1.6.0 | | kyuubi.session.engine.alive.probe.interval | PT10S | The interval for engine alive probe. | duration | 1.6.0 | | kyuubi.session.engine.alive.timeout | PT2M | The timeout for engine alive. If there is no alive probe success in the last timeout window, the engine will be marked as no-alive. | duration | 1.6.0 | | kyuubi.session.engine.check.interval | PT1M | The check interval for engine timeout | duration | 1.0.0 | +| kyuubi.session.engine.flink.fetch.timeout | <undefined> | Result fetch timeout for Flink engine. If the timeout is reached, the result fetch would be stopped and the current fetched would be returned. If no data are fetched, a TimeoutException would be thrown. | duration | 1.8.0 | | kyuubi.session.engine.flink.main.resource | <undefined> | The package used to create Flink SQL engine remote job. If it is undefined, Kyuubi will use the default | string | 1.4.0 | | kyuubi.session.engine.flink.max.rows | 1000000 | Max rows of Flink query results. For batch queries, rows exceeding the limit would be ignored. For streaming queries, the query would be canceled if the limit is reached. | int | 1.5.0 | | kyuubi.session.engine.hive.main.resource | <undefined> | The package used to create Hive engine remote job. If it is undefined, Kyuubi will use the default | string | 1.6.0 | @@ -391,10 +432,12 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co | kyuubi.session.engine.open.retry.wait | PT10S | How long to wait before retrying to open the engine after failure. | duration | 1.7.0 | | kyuubi.session.engine.share.level | USER | (deprecated) - Using kyuubi.engine.share.level instead | string | 1.0.0 | | kyuubi.session.engine.spark.main.resource | <undefined> | The package used to create Spark SQL engine remote application. If it is undefined, Kyuubi will use the default | string | 1.0.0 | +| kyuubi.session.engine.spark.max.initial.wait | PT1M | Max wait time for the initial connection to Spark engine. The engine will self-terminate no new incoming connection is established within this time. This setting only applies at the CONNECTION share level. 0 or negative means not to self-terminate. | duration | 1.8.0 | | kyuubi.session.engine.spark.max.lifetime | PT0S | Max lifetime for Spark engine, the engine will self-terminate when it reaches the end of life. 0 or negative means not to self-terminate. | duration | 1.6.0 | | kyuubi.session.engine.spark.progress.timeFormat | yyyy-MM-dd HH:mm:ss.SSS | The time format of the progress bar | string | 1.6.0 | | kyuubi.session.engine.spark.progress.update.interval | PT1S | Update period of progress bar. | duration | 1.6.0 | | kyuubi.session.engine.spark.showProgress | false | When true, show the progress bar in the Spark's engine log. | boolean | 1.6.0 | +| kyuubi.session.engine.startup.destroy.timeout | PT5S | Engine startup process destroy wait time, if the process does not stop after this time, force destroy instead. This configuration only takes effect when `kyuubi.session.engine.startup.waitCompletion=false`. | duration | 1.8.0 | | kyuubi.session.engine.startup.error.max.size | 8192 | During engine bootstrapping, if anderror occurs, using this config to limit the length of error message(characters). | int | 1.1.0 | | kyuubi.session.engine.startup.maxLogLines | 10 | The maximum number of engine log lines when errors occur during the engine startup phase. Note that this config effects on client-side to help track engine startup issues. | int | 1.4.0 | | kyuubi.session.engine.startup.waitCompletion | true | Whether to wait for completion after the engine starts. If false, the startup process will be destroyed after the engine is started. Note that only use it when the driver is not running locally, such as in yarn-cluster mode; Otherwise, the engine will be killed. | boolean | 1.5.0 | @@ -405,7 +448,7 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co | kyuubi.session.engine.trino.showProgress.debug | false | When true, show the progress debug info in the Trino engine log. | boolean | 1.6.0 | | kyuubi.session.group.provider | hadoop | A group provider plugin for Kyuubi Server. This plugin can provide primary group and groups information for different users or session configs. This config value should be a subclass of `org.apache.kyuubi.plugin.GroupProvider` which has a zero-arg constructor. Kyuubi provides the following built-in implementations:
    • hadoop: delegate the user group mapping to hadoop UserGroupInformation.
    • | string | 1.7.0 | | kyuubi.session.idle.timeout | PT6H | session idle timeout, it will be closed when it's not accessed for this duration | duration | 1.2.0 | -| kyuubi.session.local.dir.allow.list || The local dir list that are allowed to access by the kyuubi session application. End-users might set some parameters such as `spark.files` and it will upload some local files when launching the kyuubi engine, if the local dir allow list is defined, kyuubi will check whether the path to upload is in the allow list. Note that, if it is empty, there is no limitation for that. And please use absolute paths. | seq | 1.6.0 | +| kyuubi.session.local.dir.allow.list || The local dir list that are allowed to access by the kyuubi session application. End-users might set some parameters such as `spark.files` and it will upload some local files when launching the kyuubi engine, if the local dir allow list is defined, kyuubi will check whether the path to upload is in the allow list. Note that, if it is empty, there is no limitation for that. And please use absolute paths. | set | 1.6.0 | | kyuubi.session.name | <undefined> | A human readable name of the session and we use empty string by default. This name will be recorded in the event. Note that, we only apply this value from session conf. | string | 1.4.0 | | kyuubi.session.timeout | PT6H | (deprecated)session timeout, it will be closed when it's not accessed for this duration | duration | 1.0.0 | | kyuubi.session.user.sign.enabled | false | Whether to verify the integrity of session user name on the engine side, e.g. Authz plugin in Spark. | boolean | 1.7.0 | @@ -417,20 +460,28 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co | kyuubi.spnego.keytab | <undefined> | Keytab file for SPNego principal | string | 1.6.0 | | kyuubi.spnego.principal | <undefined> | SPNego service principal, typical value would look like HTTP/_HOST@EXAMPLE.COM. SPNego service principal would be used when restful Kerberos security is enabled. This needs to be set only if SPNEGO is to be used in authentication. | string | 1.6.0 | +### Yarn + +| Key | Default | Meaning | Type | Since | +|---------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------|-------| +| kyuubi.yarn.user.admin | yarn | When kyuubi.yarn.user.strategy is set to ADMIN, use this admin user to construct YARN client for application management, e.g. kill application. | string | 1.8.0 | +| kyuubi.yarn.user.strategy | NONE | Determine which user to use to construct YARN client for application management, e.g. kill application. Options:
      • NONE: use Kyuubi server user.
      • ADMIN: use admin user configured in `kyuubi.yarn.user.admin`.
      • OWNER: use session user, typically is application owner.
      | string | 1.8.0 | + ### Zookeeper -| Key | Default | Meaning | Type | Since | -|--------------------------------------------------|--------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------|-------| -| kyuubi.zookeeper.embedded.client.port | 2181 | clientPort for the embedded ZooKeeper server to listen for client connections, a client here could be Kyuubi server, engine, and JDBC client | int | 1.2.0 | -| kyuubi.zookeeper.embedded.client.port.address | <undefined> | clientPortAddress for the embedded ZooKeeper server to | string | 1.2.0 | -| kyuubi.zookeeper.embedded.data.dir | embedded_zookeeper | dataDir for the embedded zookeeper server where stores the in-memory database snapshots and, unless specified otherwise, the transaction log of updates to the database. | string | 1.2.0 | -| kyuubi.zookeeper.embedded.data.log.dir | embedded_zookeeper | dataLogDir for the embedded ZooKeeper server where writes the transaction log . | string | 1.2.0 | -| kyuubi.zookeeper.embedded.directory | embedded_zookeeper | The temporary directory for the embedded ZooKeeper server | string | 1.0.0 | -| kyuubi.zookeeper.embedded.max.client.connections | 120 | maxClientCnxns for the embedded ZooKeeper server to limit the number of concurrent connections of a single client identified by IP address | int | 1.2.0 | -| kyuubi.zookeeper.embedded.max.session.timeout | 60000 | maxSessionTimeout in milliseconds for the embedded ZooKeeper server will allow the client to negotiate. Defaults to 20 times the tickTime | int | 1.2.0 | -| kyuubi.zookeeper.embedded.min.session.timeout | 6000 | minSessionTimeout in milliseconds for the embedded ZooKeeper server will allow the client to negotiate. Defaults to 2 times the tickTime | int | 1.2.0 | -| kyuubi.zookeeper.embedded.port | 2181 | The port of the embedded ZooKeeper server | int | 1.0.0 | -| kyuubi.zookeeper.embedded.tick.time | 3000 | tickTime in milliseconds for the embedded ZooKeeper server | int | 1.2.0 | +| Key | Default | Meaning | Type | Since | +|--------------------------------------------------|--------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|-------| +| kyuubi.zookeeper.embedded.client.port | 2181 | clientPort for the embedded ZooKeeper server to listen for client connections, a client here could be Kyuubi server, engine, and JDBC client | int | 1.2.0 | +| kyuubi.zookeeper.embedded.client.port.address | <undefined> | clientPortAddress for the embedded ZooKeeper server to | string | 1.2.0 | +| kyuubi.zookeeper.embedded.client.use.hostname | false | When true, embedded Zookeeper prefer to bind hostname, otherwise, ip address. | boolean | 1.7.2 | +| kyuubi.zookeeper.embedded.data.dir | embedded_zookeeper | dataDir for the embedded zookeeper server where stores the in-memory database snapshots and, unless specified otherwise, the transaction log of updates to the database. | string | 1.2.0 | +| kyuubi.zookeeper.embedded.data.log.dir | embedded_zookeeper | dataLogDir for the embedded ZooKeeper server where writes the transaction log . | string | 1.2.0 | +| kyuubi.zookeeper.embedded.directory | embedded_zookeeper | The temporary directory for the embedded ZooKeeper server | string | 1.0.0 | +| kyuubi.zookeeper.embedded.max.client.connections | 120 | maxClientCnxns for the embedded ZooKeeper server to limit the number of concurrent connections of a single client identified by IP address | int | 1.2.0 | +| kyuubi.zookeeper.embedded.max.session.timeout | 60000 | maxSessionTimeout in milliseconds for the embedded ZooKeeper server will allow the client to negotiate. Defaults to 20 times the tickTime | int | 1.2.0 | +| kyuubi.zookeeper.embedded.min.session.timeout | 6000 | minSessionTimeout in milliseconds for the embedded ZooKeeper server will allow the client to negotiate. Defaults to 2 times the tickTime | int | 1.2.0 | +| kyuubi.zookeeper.embedded.port | 2181 | The port of the embedded ZooKeeper server | int | 1.0.0 | +| kyuubi.zookeeper.embedded.tick.time | 3000 | tickTime in milliseconds for the embedded ZooKeeper server | int | 1.2.0 | ## Spark Configurations diff --git a/docs/connector/flink/index.rst b/docs/connector/flink/index.rst index c9d91091f71..e7d40fd43b9 100644 --- a/docs/connector/flink/index.rst +++ b/docs/connector/flink/index.rst @@ -19,6 +19,6 @@ Connectors For Flink SQL Query Engine .. toctree:: :maxdepth: 2 - flink_table_store + paimon hudi iceberg diff --git a/docs/connector/flink/flink_table_store.rst b/docs/connector/flink/paimon.rst similarity index 51% rename from docs/connector/flink/flink_table_store.rst rename to docs/connector/flink/paimon.rst index 14c576bf3a5..b67101488e8 100644 --- a/docs/connector/flink/flink_table_store.rst +++ b/docs/connector/flink/paimon.rst @@ -13,57 +13,56 @@ See the License for the specific language governing permissions and limitations under the License. -`Flink Table Store`_ -========== +`Apache Paimon (Incubating)`_ +============================= -Flink Table Store is a unified storage to build dynamic tables for both streaming and batch processing in Flink, -supporting high-speed data ingestion and timely data query. +Apache Paimon (Incubating) is a streaming data lake platform that supports high-speed data ingestion, change data tracking, and efficient real-time analytics. .. tip:: - This article assumes that you have mastered the basic knowledge and operation of `Flink Table Store`_. - For the knowledge about Flink Table Store not mentioned in this article, + This article assumes that you have mastered the basic knowledge and operation of `Apache Paimon (Incubating)`_. + For the knowledge not mentioned in this article, you can obtain it from its `Official Documentation`_. -By using kyuubi, we can run SQL queries towards Flink Table Store which is more -convenient, easy to understand, and easy to expand than directly using -flink to manipulate Flink Table Store. +By using kyuubi, we can run SQL queries towards Apache Paimon (Incubating) which is more +convenient, easy to understand, and easy to expand than directly using flink. -Flink Table Store Integration -------------------- +Apache Paimon (Incubating) Integration +-------------------------------------- -To enable the integration of kyuubi flink sql engine and Flink Table Store, you need to: +To enable the integration of kyuubi flink sql engine and Apache Paimon (Incubating), you need to: -- Referencing the Flink Table Store :ref:`dependencies` +- Referencing the Apache Paimon (Incubating) :ref:`dependencies` -.. _flink-table-store-deps: +.. _flink-paimon-deps: Dependencies ************ -The **classpath** of kyuubi flink sql engine with Flink Table Store supported consists of +The **classpath** of kyuubi flink sql engine with Apache Paimon (Incubating) supported consists of 1. kyuubi-flink-sql-engine-\ |release|\ _2.12.jar, the engine jar deployed with Kyuubi distributions 2. a copy of flink distribution -3. flink-table-store-dist-.jar (example: flink-table-store-dist-0.2.jar), which can be found in the `Maven Central`_ +3. paimon-flink-.jar (example: paimon-flink-1.16-0.4-SNAPSHOT.jar), which can be found in the `Apache Paimon (Incubating) Supported Engines Flink`_ +4. flink-shaded-hadoop-2-uber-.jar, which code can be found in the `Pre-bundled Hadoop Jar`_ -In order to make the Flink Table Store packages visible for the runtime classpath of engines, we can use these methods: +In order to make the Apache Paimon (Incubating) packages visible for the runtime classpath of engines, you need to: -1. Put the Flink Table Store packages into ``$FLINK_HOME/lib`` directly +1. Put the Apache Paimon (Incubating) packages into ``$FLINK_HOME/lib`` directly 2. Setting the HADOOP_CLASSPATH environment variable or copy the `Pre-bundled Hadoop Jar`_ to flink/lib. .. warning:: - Please mind the compatibility of different Flink Table Store and Flink versions, which can be confirmed on the page of `Flink Table Store multi engine support`_. + Please mind the compatibility of different Apache Paimon (Incubating) and Flink versions, which can be confirmed on the page of `Apache Paimon (Incubating) multi engine support`_. -Flink Table Store Operations ------------------- +Apache Paimon (Incubating) Operations +------------------------------------- Taking ``CREATE CATALOG`` as a example, .. code-block:: sql CREATE CATALOG my_catalog WITH ( - 'type'='table-store', - 'warehouse'='hdfs://nn:8020/warehouse/path' -- or 'file:///tmp/foo/bar' + 'type'='paimon', + 'warehouse'='file:/tmp/paimon' ); USE CATALOG my_catalog; @@ -104,8 +103,8 @@ Taking ``Rescale Bucket`` as a example, INSERT OVERWRITE my_table PARTITION (dt = '2022-01-01'); -.. _Flink Table Store: https://nightlies.apache.org/flink/flink-table-store-docs-stable/ -.. _Official Documentation: https://nightlies.apache.org/flink/flink-table-store-docs-stable/ -.. _Maven Central: https://mvnrepository.com/artifact/org.apache.flink/flink-table-store-dist -.. _Pre-bundled Hadoop Jar: https://flink.apache.org/downloads.html -.. _Flink Table Store multi engine support: https://nightlies.apache.org/flink/flink-table-store-docs-stable/docs/engines/overview/ +.. _Apache Paimon (Incubating): https://paimon.apache.org/ +.. _Official Documentation: https://paimon.apache.org/docs/master/ +.. _Apache Paimon (Incubating) Supported Engines Flink: https://paimon.apache.org/docs/master/engines/flink/#preparing-paimon-jar-file +.. _Pre-bundled Hadoop Jar: https://flink.apache.org/downloads/#additional-components +.. _Apache Paimon (Incubating) multi engine support: https://paimon.apache.org/docs/master/engines/overview/ diff --git a/docs/connector/hive/index.rst b/docs/connector/hive/index.rst index 961e1bc8bfb..d96f8b04188 100644 --- a/docs/connector/hive/index.rst +++ b/docs/connector/hive/index.rst @@ -19,5 +19,5 @@ Connectors for Hive SQL Query Engine .. toctree:: :maxdepth: 2 - flink_table_store + paimon iceberg diff --git a/docs/connector/hive/flink_table_store.rst b/docs/connector/hive/paimon.rst similarity index 52% rename from docs/connector/hive/flink_table_store.rst rename to docs/connector/hive/paimon.rst index 893262189f0..000d2d7e83c 100644 --- a/docs/connector/hive/flink_table_store.rst +++ b/docs/connector/hive/paimon.rst @@ -13,30 +13,29 @@ See the License for the specific language governing permissions and limitations under the License. -`Flink Table Store`_ +`Apache Paimon (Incubating)`_ ========== -Flink Table Store is a unified storage to build dynamic tables for both streaming and batch processing in Flink, -supporting high-speed data ingestion and timely data query. +Apache Paimon(incubating) is a streaming data lake platform that supports high-speed data ingestion, change data tracking and efficient real-time analytics. .. tip:: - This article assumes that you have mastered the basic knowledge and operation of `Flink Table Store`_. - For the knowledge about Flink Table Store not mentioned in this article, + This article assumes that you have mastered the basic knowledge and operation of `Apache Paimon (Incubating)`_. + For the knowledge about Apache Paimon (Incubating) not mentioned in this article, you can obtain it from its `Official Documentation`_. -By using Kyuubi, we can run SQL queries towards Flink Table Store which is more +By using Kyuubi, we can run SQL queries towards Apache Paimon (Incubating) which is more convenient, easy to understand, and easy to expand than directly using -Hive to manipulate Flink Table Store. +Hive to manipulate Apache Paimon (Incubating). -Flink Table Store Integration +Apache Paimon (Incubating) Integration ------------------- -To enable the integration of kyuubi flink sql engine and Flink Table Store, you need to: +To enable the integration of kyuubi hive sql engine and Apache Paimon (Incubating), you need to: -- Referencing the Flink Table Store :ref:`dependencies` -- Setting the environment variable :ref:`configurations` +- Referencing the Apache Paimon (Incubating) :ref:`dependencies` +- Setting the environment variable :ref:`configurations` -.. _hive-flink-table-store-deps: +.. _hive-paimon-deps: Dependencies ************ @@ -45,37 +44,37 @@ The **classpath** of kyuubi hive sql engine with Iceberg supported consists of 1. kyuubi-hive-sql-engine-\ |release|\ _2.12.jar, the engine jar deployed with Kyuubi distributions 2. a copy of hive distribution -3. flink-table-store-hive-connector-_.jar (example: flink-table-store-hive-connector-0.4.0_3.1.jar), which can be found in the `Installation Table Store in Hive`_ +3. paimon-hive-connector--.jar (example: paimon-hive-connector-3.1-0.4-SNAPSHOT.jar), which can be found in the `Apache Paimon (Incubating) Supported Engines Hive`_ In order to make the Hive packages visible for the runtime classpath of engines, we can use one of these methods: -1. You can create an auxlib folder under the root directory of Hive, and copy flink-table-store-hive-connector-0.4.0_3.1.jar into auxlib. +1. You can create an auxlib folder under the root directory of Hive, and copy paimon-hive-connector-3.1-.jar into auxlib. 2. Execute ADD JAR statement in the Kyuubi to add dependencies to Hive’s auxiliary classpath. For example: .. code-block:: sql - ADD JAR /path/to/flink-table-store-hive-connector-0.4.0_3.1.jar; + ADD JAR /path/to/paimon-hive-connector-3.1-.jar; .. warning:: The second method is not recommended. If you’re using the MR execution engine and running a join statement, you may be faced with the exception ``org.apache.hive.com.esotericsoftware.kryo.kryoexception: unable to find class.`` .. warning:: - Please mind the compatibility of different Flink Table Store and Hive versions, which can be confirmed on the page of `Flink Table Store multi engine support`_. + Please mind the compatibility of different Apache Paimon (Incubating) and Hive versions, which can be confirmed on the page of `Apache Paimon (Incubating) multi engine support`_. -.. _hive-flink-table-store-conf: +.. _hive-paimon-conf: Configurations ************** If you are using HDFS, make sure that the environment variable HADOOP_HOME or HADOOP_CONF_DIR is set. -Flink Table Store Operations +Apache Paimon (Incubating) Operations ------------------ -Flink Table Store only supports only reading table store tables through Hive. -A common scenario is to write data with Flink and read data with Hive. -You can follow this document `Flink Table Store Quick Start`_ to write data to a table store table +Apache Paimon (Incubating) only supports only reading table store tables through Hive. +A common scenario is to write data with Spark or Flink and read data with Hive. +You can follow this document `Apache Paimon (Incubating) Quick Start with Paimon Hive Catalog`_ to write data to a table which can also be accessed directly from Hive. and then use Kyuubi Hive SQL engine to query the table with the following SQL ``SELECT`` statement. Taking ``Query Data`` as an example, @@ -89,13 +88,13 @@ Taking ``Query External Table`` as an example, .. code-block:: sql CREATE EXTERNAL TABLE external_test_table - STORED BY 'org.apache.flink.table.store.hive.TableStoreHiveStorageHandler' + STORED BY 'org.apache.paimon.hive.PaimonStorageHandler' LOCATION '/path/to/table/store/warehouse/default.db/test_table'; SELECT a, b FROM test_table ORDER BY a; -.. _Flink Table Store: https://nightlies.apache.org/flink/flink-table-store-docs-stable/ -.. _Flink Table Store Quick Start: https://nightlies.apache.org/flink/flink-table-store-docs-stable/docs/try-table-store/quick-start/ -.. _Official Documentation: https://nightlies.apache.org/flink/flink-table-store-docs-release-0.4/docs/engines/hive/ -.. _Installation Table Store in Hive: https://nightlies.apache.org/flink/flink-table-store-docs-release-0.4/docs/engines/hive/#installation -.. _Flink Table Store multi engine support: https://nightlies.apache.org/flink/flink-table-store-docs-stable/docs/engines/overview/ +.. _Apache Paimon (Incubating): https://paimon.apache.org/ +.. _Official Documentation: https://paimon.apache.org/docs/master/ +.. _Apache Paimon (Incubating) Quick Start with Paimon Hive Catalog: https://paimon.apache.org/docs/master/engines/hive/#quick-start-with-paimon-hive-catalog +.. _Apache Paimon (Incubating) Supported Engines Hive: https://paimon.apache.org/docs/master/engines/hive/ +.. _Apache Paimon (Incubating) multi engine support: https://paimon.apache.org/docs/master/engines/overview/ diff --git a/docs/connector/spark/flink_table_store.rst b/docs/connector/spark/flink_table_store.rst deleted file mode 100644 index ee4c2b352c2..00000000000 --- a/docs/connector/spark/flink_table_store.rst +++ /dev/null @@ -1,90 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one or more - contributor license agreements. See the NOTICE file distributed with - this work for additional information regarding copyright ownership. - The ASF licenses this file to You under the Apache License, Version 2.0 - (the "License"); you may not use this file except in compliance with - the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -`Flink Table Store`_ -========== - -Flink Table Store is a unified storage to build dynamic tables for both streaming and batch processing in Flink, -supporting high-speed data ingestion and timely data query. - -.. tip:: - This article assumes that you have mastered the basic knowledge and operation of `Flink Table Store`_. - For the knowledge about Flink Table Store not mentioned in this article, - you can obtain it from its `Official Documentation`_. - -By using kyuubi, we can run SQL queries towards Flink Table Store which is more -convenient, easy to understand, and easy to expand than directly using -spark to manipulate Flink Table Store. - -Flink Table Store Integration -------------------- - -To enable the integration of kyuubi spark sql engine and Flink Table Store through -Apache Spark Datasource V2 and Catalog APIs, you need to: - -- Referencing the Flink Table Store :ref:`dependencies` -- Setting the spark extension and catalog :ref:`configurations` - -.. _spark-flink-table-store-deps: - -Dependencies -************ - -The **classpath** of kyuubi spark sql engine with Flink Table Store supported consists of - -1. kyuubi-spark-sql-engine-\ |release|\ _2.12.jar, the engine jar deployed with Kyuubi distributions -2. a copy of spark distribution -3. flink-table-store-spark-.jar (example: flink-table-store-spark-0.2.jar), which can be found in the `Maven Central`_ - -In order to make the Flink Table Store packages visible for the runtime classpath of engines, we can use one of these methods: - -1. Put the Flink Table Store packages into ``$SPARK_HOME/jars`` directly -2. Set ``spark.jars=/path/to/flink-table-store-spark`` - -.. warning:: - Please mind the compatibility of different Flink Table Store and Spark versions, which can be confirmed on the page of `Flink Table Store multi engine support`_. - -.. _spark-flink-table-store-conf: - -Configurations -************** - -To activate functionality of Flink Table Store, we can set the following configurations: - -.. code-block:: properties - - spark.sql.catalog.tablestore=org.apache.flink.table.store.spark.SparkCatalog - spark.sql.catalog.tablestore.warehouse=file:/tmp/warehouse - -Flink Table Store Operations ------------------- - -Flink Table Store supports reading table store tables through Spark. -A common scenario is to write data with Flink and read data with Spark. -You can follow this document `Flink Table Store Quick Start`_ to write data to a table store table -and then use kyuubi spark sql engine to query the table with the following SQL ``SELECT`` statement. - - -.. code-block:: sql - - select * from table_store.default.word_count; - - - -.. _Flink Table Store: https://nightlies.apache.org/flink/flink-table-store-docs-stable/ -.. _Flink Table Store Quick Start: https://nightlies.apache.org/flink/flink-table-store-docs-stable/docs/try-table-store/quick-start/ -.. _Official Documentation: https://nightlies.apache.org/flink/flink-table-store-docs-stable/ -.. _Maven Central: https://mvnrepository.com/artifact/org.apache.flink -.. _Flink Table Store multi engine support: https://nightlies.apache.org/flink/flink-table-store-docs-stable/docs/engines/overview/ diff --git a/docs/connector/spark/index.rst b/docs/connector/spark/index.rst index 790e804f268..d1503443c63 100644 --- a/docs/connector/spark/index.rst +++ b/docs/connector/spark/index.rst @@ -23,7 +23,7 @@ By default, it provides accessibility to hive warehouses with various file forma supported, such as parquet, orc, json, etc. Also,it can easily integrate with other third-party libraries, such as Hudi, -Iceberg, Delta Lake, Kudu, Flink Table Store, HBase,Cassandra, etc. +Iceberg, Delta Lake, Kudu, Apache Paimon (Incubating), HBase,Cassandra, etc. We also provide sample data sources like TDC-DS, TPC-H for testing and benchmarking purpose. @@ -37,7 +37,7 @@ purpose. iceberg kudu hive - flink_table_store + paimon tidb tpcds tpch diff --git a/docs/connector/spark/paimon.rst b/docs/connector/spark/paimon.rst new file mode 100644 index 00000000000..14e74195503 --- /dev/null +++ b/docs/connector/spark/paimon.rst @@ -0,0 +1,110 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +`Apache Paimon (Incubating)`_ +========== + +Apache Paimon(incubating) is a streaming data lake platform that supports high-speed data ingestion, change data tracking and efficient real-time analytics. + +.. tip:: + This article assumes that you have mastered the basic knowledge and operation of `Apache Paimon (Incubating)`_. + For the knowledge about Apache Paimon (Incubating) not mentioned in this article, + you can obtain it from its `Official Documentation`_. + +By using kyuubi, we can run SQL queries towards Apache Paimon (Incubating) which is more +convenient, easy to understand, and easy to expand than directly using +spark to manipulate Apache Paimon (Incubating). + +Apache Paimon (Incubating) Integration +------------------- + +To enable the integration of kyuubi spark sql engine and Apache Paimon (Incubating), you need to set the following configurations: + +- Referencing the Apache Paimon (Incubating) :ref:`dependencies` +- Setting the spark extension and catalog :ref:`configurations` + +.. _spark-paimon-deps: + +Dependencies +************ + +The **classpath** of kyuubi spark sql engine with Apache Paimon (Incubating) consists of + +1. kyuubi-spark-sql-engine-\ |release|\ _2.12.jar, the engine jar deployed with Kyuubi distributions +2. a copy of spark distribution +3. paimon-spark-.jar (example: paimon-spark-3.3-0.4-20230323.002035-5.jar), which can be found in the `Apache Paimon (Incubating) Supported Engines Spark3`_ + +In order to make the Apache Paimon (Incubating) packages visible for the runtime classpath of engines, we can use one of these methods: + +1. Put the Apache Paimon (Incubating) packages into ``$SPARK_HOME/jars`` directly +2. Set ``spark.jars=/path/to/paimon-spark-.jar`` + +.. warning:: + Please mind the compatibility of different Apache Paimon (Incubating) and Spark versions, which can be confirmed on the page of `Apache Paimon (Incubating) multi engine support`_. + +.. _spark-paimon-conf: + +Configurations +************** + +To activate functionality of Apache Paimon (Incubating), we can set the following configurations: + +.. code-block:: properties + + spark.sql.catalog.paimon=org.apache.paimon.spark.SparkCatalog + spark.sql.catalog.paimon.warehouse=file:/tmp/paimon + +Apache Paimon (Incubating) Operations +------------------ + + +Taking ``CREATE NAMESPACE`` as a example, + +.. code-block:: sql + + CREATE DATABASE paimon.default; + USE paimon.default; + +Taking ``CREATE TABLE`` as a example, + +.. code-block:: sql + + create table my_table ( + k int, + v string + ) tblproperties ( + 'primary-key' = 'k' + ); + +Taking ``SELECT`` as a example, + +.. code-block:: sql + + SELECT * FROM my_table; + + +Taking ``INSERT`` as a example, + +.. code-block:: sql + + INSERT INTO my_table VALUES (1, 'Hi Again'), (3, 'Test'); + + + + +.. _Apache Paimon (Incubating): https://paimon.apache.org/ +.. _Official Documentation: https://paimon.apache.org/docs/master/ +.. _Apache Paimon (Incubating) Supported Engines Spark3: https://paimon.apache.org/docs/master/engines/spark3/ +.. _Apache Paimon (Incubating) multi engine support: https://paimon.apache.org/docs/master/engines/overview/ diff --git a/docs/connector/trino/flink_table_store.rst b/docs/connector/trino/flink_table_store.rst deleted file mode 100644 index 8dd0c4061f8..00000000000 --- a/docs/connector/trino/flink_table_store.rst +++ /dev/null @@ -1,94 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one or more - contributor license agreements. See the NOTICE file distributed with - this work for additional information regarding copyright ownership. - The ASF licenses this file to You under the Apache License, Version 2.0 - (the "License"); you may not use this file except in compliance with - the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -`Flink Table Store`_ -========== - -Flink Table Store is a unified storage to build dynamic tables for both streaming and batch processing in Flink, -supporting high-speed data ingestion and timely data query. - -.. tip:: - This article assumes that you have mastered the basic knowledge and operation of `Flink Table Store`_. - For the knowledge about Flink Table Store not mentioned in this article, - you can obtain it from its `Official Documentation`_. - -By using kyuubi, we can run SQL queries towards Flink Table Store which is more -convenient, easy to understand, and easy to expand than directly using -trino to manipulate Flink Table Store. - -Flink Table Store Integration -------------------- - -To enable the integration of kyuubi trino sql engine and Flink Table Store, you need to: - -- Referencing the Flink Table Store :ref:`dependencies` -- Setting the trino extension and catalog :ref:`configurations` - -.. _trino-flink-table-store-deps: - -Dependencies -************ - -The **classpath** of kyuubi trino sql engine with Flink Table Store supported consists of - -1. kyuubi-trino-sql-engine-\ |release|\ _2.12.jar, the engine jar deployed with Kyuubi distributions -2. a copy of trino distribution -3. flink-table-store-trino-.jar (example: flink-table-store-trino-0.2.jar), which code can be found in the `Source Code`_ -4. flink-shaded-hadoop-2-uber-2.8.3-10.0.jar, which code can be found in the `Pre-bundled Hadoop 2.8.3`_ - -In order to make the Flink Table Store packages visible for the runtime classpath of engines, we can use these methods: - -1. Build the flink-table-store-trino-.jar by reference to `Flink Table Store Trino README`_ -2. Put the flink-table-store-trino-.jar and flink-shaded-hadoop-2-uber-2.8.3-10.0.jar packages into ``$TRINO_SERVER_HOME/plugin/tablestore`` directly - -.. warning:: - Please mind the compatibility of different Flink Table Store and Trino versions, which can be confirmed on the page of `Flink Table Store multi engine support`_. - -.. _trino-flink-table-store-conf: - -Configurations -************** - -To activate functionality of Flink Table Store, we can set the following configurations: - -Catalogs are registered by creating a catalog properties file in the $TRINO_SERVER_HOME/etc/catalog directory. -For example, create $TRINO_SERVER_HOME/etc/catalog/tablestore.properties with the following contents to mount the tablestore connector as the tablestore catalog: - -.. code-block:: properties - - connector.name=tablestore - warehouse=file:///tmp/warehouse - -Flink Table Store Operations ------------------- - -Flink Table Store supports reading table store tables through Trino. -A common scenario is to write data with Flink and read data with Trino. -You can follow this document `Flink Table Store Quick Start`_ to write data to a table store table -and then use kyuubi trino sql engine to query the table with the following SQL ``SELECT`` statement. - - -.. code-block:: sql - - SELECT * FROM tablestore.default.t1 - - -.. _Flink Table Store: https://nightlies.apache.org/flink/flink-table-store-docs-stable/ -.. _Flink Table Store Quick Start: https://nightlies.apache.org/flink/flink-table-store-docs-stable/docs/try-table-store/quick-start/ -.. _Official Documentation: https://nightlies.apache.org/flink/flink-table-store-docs-stable/ -.. _Source Code: https://github.com/JingsongLi/flink-table-store-trino -.. _Flink Table Store multi engine support: https://nightlies.apache.org/flink/flink-table-store-docs-stable/docs/engines/overview/ -.. _Pre-bundled Hadoop 2.8.3: https://repo.maven.apache.org/maven2/org/apache/flink/flink-shaded-hadoop-2-uber/2.8.3-10.0/flink-shaded-hadoop-2-uber-2.8.3-10.0.jar -.. _Flink Table Store Trino README: https://github.com/JingsongLi/flink-table-store-trino#readme diff --git a/docs/connector/trino/index.rst b/docs/connector/trino/index.rst index f5d651d45de..290966a5cf7 100644 --- a/docs/connector/trino/index.rst +++ b/docs/connector/trino/index.rst @@ -19,6 +19,6 @@ Connectors For Trino SQL Engine .. toctree:: :maxdepth: 2 - flink_table_store + paimon hudi iceberg \ No newline at end of file diff --git a/docs/connector/trino/paimon.rst b/docs/connector/trino/paimon.rst new file mode 100644 index 00000000000..5ac892234f8 --- /dev/null +++ b/docs/connector/trino/paimon.rst @@ -0,0 +1,92 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +`Apache Paimon (Incubating)`_ +========== + +Apache Paimon(incubating) is a streaming data lake platform that supports high-speed data ingestion, change data tracking and efficient real-time analytics. + +.. tip:: + This article assumes that you have mastered the basic knowledge and operation of `Apache Paimon (Incubating)`_. + For the knowledge about Apache Paimon (Incubating) not mentioned in this article, + you can obtain it from its `Official Documentation`_. + +By using kyuubi, we can run SQL queries towards Apache Paimon (Incubating) which is more +convenient, easy to understand, and easy to expand than directly using +trino to manipulate Apache Paimon (Incubating). + +Apache Paimon (Incubating) Integration +------------------- + +To enable the integration of kyuubi trino sql engine and Apache Paimon (Incubating), you need to: + +- Referencing the Apache Paimon (Incubating) :ref:`dependencies` +- Setting the trino extension and catalog :ref:`configurations` + +.. _trino-paimon-deps: + +Dependencies +************ + +The **classpath** of kyuubi trino sql engine with Apache Paimon (Incubating) supported consists of + +1. kyuubi-trino-sql-engine-\ |release|\ _2.12.jar, the engine jar deployed with Kyuubi distributions +2. a copy of trino distribution +3. paimon-trino-.jar (example: paimon-trino-0.2.jar), which code can be found in the `Source Code`_ +4. flink-shaded-hadoop-2-uber-.jar, which code can be found in the `Pre-bundled Hadoop`_ + +In order to make the Apache Paimon (Incubating) packages visible for the runtime classpath of engines, you need to: + +1. Build the paimon-trino-.jar by reference to `Apache Paimon (Incubating) Trino README`_ +2. Put the paimon-trino-.jar and flink-shaded-hadoop-2-uber-.jar packages into ``$TRINO_SERVER_HOME/plugin/tablestore`` directly + +.. warning:: + Please mind the compatibility of different Apache Paimon (Incubating) and Trino versions, which can be confirmed on the page of `Apache Paimon (Incubating) multi engine support`_. + +.. _trino-paimon-conf: + +Configurations +************** + +To activate functionality of Apache Paimon (Incubating), we can set the following configurations: + +Catalogs are registered by creating a catalog properties file in the $TRINO_SERVER_HOME/etc/catalog directory. +For example, create $TRINO_SERVER_HOME/etc/catalog/tablestore.properties with the following contents to mount the tablestore connector as the tablestore catalog: + +.. code-block:: properties + + connector.name=tablestore + warehouse=file:///tmp/warehouse + +Apache Paimon (Incubating) Operations +------------------ + +Apache Paimon (Incubating) supports reading table store tables through Trino. +A common scenario is to write data with Spark or Flink and read data with Trino. +You can follow this document `Apache Paimon (Incubating) Engines Flink Quick Start`_ to write data to a table store table +and then use kyuubi trino sql engine to query the table with the following SQL ``SELECT`` statement. + + +.. code-block:: sql + + SELECT * FROM tablestore.default.t1 + +.. _Apache Paimon (Incubating): https://paimon.apache.org/ +.. _Apache Paimon (Incubating) multi engine support: https://paimon.apache.org/docs/master/engines/overview/ +.. _Apache Paimon (Incubating) Engines Flink Quick Start: https://paimon.apache.org/docs/master/engines/flink/#quick-start +.. _Official Documentation: https://paimon.apache.org/docs/master/ +.. _Source Code: https://github.com/JingsongLi/paimon-trino +.. _Pre-bundled Hadoop: https://flink.apache.org/downloads/#additional-components +.. _Apache Paimon (Incubating) Trino README: https://github.com/JingsongLi/paimon-trino#readme diff --git a/docs/develop_tools/building.md b/docs/contributing/code/building.md similarity index 93% rename from docs/develop_tools/building.md rename to docs/contributing/code/building.md index d4582dc8dae..8c5c5aeec60 100644 --- a/docs/develop_tools/building.md +++ b/docs/contributing/code/building.md @@ -15,9 +15,9 @@ - limitations under the License. --> -# Building Kyuubi +# Building From Source -## Building Kyuubi with Apache Maven +## Building With Maven **Kyuubi** is built based on [Apache Maven](https://maven.apache.org), @@ -33,7 +33,7 @@ If you want to test it manually, you can start Kyuubi directly from the Kyuubi p bin/kyuubi start ``` -## Building a Submodule Individually +## Building A Submodule Individually For instance, you can build the Kyuubi Common module using: @@ -49,7 +49,7 @@ For instance, you can build the Kyuubi Common module using: build/mvn clean package -pl kyuubi-common,kyuubi-ha -DskipTests ``` -## Skipping Some modules +## Skipping Some Modules For instance, you can build the Kyuubi modules without Kyuubi Codecov and Assembly modules using: @@ -57,7 +57,7 @@ For instance, you can build the Kyuubi modules without Kyuubi Codecov and Assemb mvn clean install -pl '!dev/kyuubi-codecov,!kyuubi-assembly' -DskipTests ``` -## Building Kyuubi against Different Apache Spark versions +## Building Kyuubi Against Different Apache Spark Versions Since v1.1.0, Kyuubi support building with different Spark profiles, @@ -67,7 +67,7 @@ Since v1.1.0, Kyuubi support building with different Spark profiles, | -Pspark-3.2 | No | 1.4.0 | | -Pspark-3.3 | Yes | 1.6.0 | -## Building with Apache dlcdn site +## Building With Apache dlcdn Site By default, we use `https://archive.apache.org/dist/` to download the built-in release packages of engines, such as Spark or Flink. diff --git a/docs/develop_tools/debugging.md b/docs/contributing/code/debugging.md similarity index 98% rename from docs/develop_tools/debugging.md rename to docs/contributing/code/debugging.md index faf7173e427..d3fb6d16f38 100644 --- a/docs/develop_tools/debugging.md +++ b/docs/contributing/code/debugging.md @@ -35,7 +35,7 @@ In the IDE, you set the corresponding parameters(host&port) in debug configurati
      -![](../imgs/idea_debug.png) +![](../../imgs/idea_debug.png)
      diff --git a/docs/develop_tools/developer.md b/docs/contributing/code/developer.md similarity index 76% rename from docs/develop_tools/developer.md rename to docs/contributing/code/developer.md index 329e219de46..ef6fb79889e 100644 --- a/docs/develop_tools/developer.md +++ b/docs/contributing/code/developer.md @@ -24,16 +24,6 @@ build/mvn versions:set -DgenerateBackupPoms=false ``` -## Update Document Version - -Whenever project version updates, please also update the document version at `docs/conf.py` to target the upcoming release. - -For example, - -```python -release = '1.2.0' -``` - ## Update Dependency List Kyuubi uses the `dev/dependencyList` file to indicate what upstream dependencies will actually go to the server-side classpath. @@ -58,3 +48,12 @@ Kyuubi uses settings.md to explain available configurations. You can run `KYUUBI_UPDATE=1 build/mvn clean test -pl kyuubi-server -am -Pflink-provided,spark-provided,hive-provided -DwildcardSuites=org.apache.kyuubi.config.AllKyuubiConfiguration` to append descriptions of new configurations to settings.md. + +## Generative Tooling Usage + +In general, the ASF allows contributions co-authored using generative AI tools. However, there are several considerations when you submit a patch containing generated content. + +Foremost, you are required to disclose usage of such tool. Furthermore, you are responsible for ensuring that the terms and conditions of the tool in question are +compatible with usage in an Open Source project and inclusion of the generated content doesn't pose a risk of copyright violation. + +Please refer to [The ASF Generative Tooling Guidance](https://www.apache.org/legal/generative-tooling.html) for more detailed information. diff --git a/docs/develop_tools/distribution.md b/docs/contributing/code/distribution.md similarity index 98% rename from docs/develop_tools/distribution.md rename to docs/contributing/code/distribution.md index 217f0a4178d..23c9c6542de 100644 --- a/docs/develop_tools/distribution.md +++ b/docs/contributing/code/distribution.md @@ -15,7 +15,7 @@ - limitations under the License. --> -# Building a Runnable Distribution +# Building A Runnable Distribution To create a Kyuubi distribution like those distributed by [Kyuubi Release Page](https://kyuubi.apache.org/releases.html), and that is laid out to be runnable, use `./build/dist` in the project root directory. diff --git a/docs/contributing/code/get_started.rst b/docs/contributing/code/get_started.rst new file mode 100644 index 00000000000..33981a8cd6d --- /dev/null +++ b/docs/contributing/code/get_started.rst @@ -0,0 +1,70 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Get Started +=========== + +Good First Issues +----------------- + +.. image:: https://img.shields.io/github/issues/apache/kyuubi/good%20first%20issue?color=green&label=Good%20first%20issue&logo=gfi&logoColor=red&style=for-the-badge + :alt: GitHub issues by-label + :target: `Good First Issues`_ + +**Good First Issue** is initiative to curate easy pickings for first-time +contributors. It helps you locate suitable development tasks with beginner's +skills required, and finally make your first contribution to Kyuubi. + +After solving one or more good first issues, you should be able to + +- Find efficient ways to communicate with the community and get help +- Setup `develop environment`_ on your machine +- `Build`_ Kyuubi from source +- `Run tests`_ locally +- `Submit a pull request`_ through Github +- Be listed in `Apache Kyuubi contributors`_ +- And most importantly, you can move to the next level and try some tricky issues + +.. note:: Don't linger too long at this stage. + :class: dropdown, toggle + +Help Wanted Issues +------------------ + +.. image:: https://img.shields.io/github/issues/apache/kyuubi/help%20wanted?color=brightgreen&label=HELP%20WANTED&style=for-the-badge + :alt: GitHub issues by-label + :target: `Help Wanted Issues`_ + +Issues that maintainers labeled as help wanted are mostly + +- sub-tasks of an ongoing shorthanded umbrella +- non-urgent improvements +- bug fixes for corner cases +- feature requests not covered by current technology stack of kyuubi community + +Since these problems are not urgent, you can take your time when fixing them. + +.. note:: Help wanted issues may contain easy pickings and tricky ones. + :class: dropdown, toggle + + +.. _Good First Issues: https://github.com/apache/kyuubi/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22 +.. _develop environment: idea_setup.html +.. _Build: build.html +.. _Run tests: testing.html +.. _Submit a pull request: https://kyuubi.apache.org/pull_request.html +.. _Apache Kyuubi contributors: https://github.com/apache/kyuubi/graphs/contributors +.. _Help Wanted Issues: https://github.com/apache/kyuubi/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22 + diff --git a/docs/develop_tools/idea_setup.md b/docs/contributing/code/idea_setup.md similarity index 100% rename from docs/develop_tools/idea_setup.md rename to docs/contributing/code/idea_setup.md diff --git a/docs/develop_tools/index.rst b/docs/contributing/code/index.rst similarity index 84% rename from docs/develop_tools/index.rst rename to docs/contributing/code/index.rst index c56321cb379..25a6e421baa 100644 --- a/docs/develop_tools/index.rst +++ b/docs/contributing/code/index.rst @@ -13,15 +13,19 @@ See the License for the specific language governing permissions and limitations under the License. -Develop Tools -============= +Contributing Code +================= + +These sections explain the process, guidelines, and tools for contributing +code to the Kyuubi project. .. toctree:: :maxdepth: 2 + get_started + style building distribution - build_document testing debugging developer diff --git a/docs/contributing/code/style.rst b/docs/contributing/code/style.rst new file mode 100644 index 00000000000..d967e895971 --- /dev/null +++ b/docs/contributing/code/style.rst @@ -0,0 +1,39 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Code Style Guide +================ + +Code is written once by its author, but read and modified multiple times by +lots of other engineers. As most bugs actually come from future modification +of the code, we need to optimize our codebase for long-term, global +readability and maintainability. The best way to achieve this is to write +simple code. + +Kyuubi's source code is multilingual, specific code style will be applied to +corresponding language. + +Scala Coding Style Guide +------------------------ + +Kyuubi adopts the `Databricks Scala Coding Style Guide`_ for scala codes. + +Java Coding Style Guide +----------------------- + +Kyuubi adopts the `Google Java style`_ for java codes. + +.. _Databricks Scala Coding Style Guide: https://github.com/databricks/scala-style-guide +.. _Google Java style: https://google.github.io/styleguide/javaguide.html \ No newline at end of file diff --git a/docs/develop_tools/testing.md b/docs/contributing/code/testing.md similarity index 100% rename from docs/develop_tools/testing.md rename to docs/contributing/code/testing.md diff --git a/docs/contributing/doc/build.rst b/docs/contributing/doc/build.rst new file mode 100644 index 00000000000..4ec2362f350 --- /dev/null +++ b/docs/contributing/doc/build.rst @@ -0,0 +1,96 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Building Documentation +====================== + +Follow the steps below and learn how to build the Kyuubi documentation as the +one you are watching now. + +Setup Environment +----------------- + +- Firstly, install ``virtualenv``, this is optional but recommended as it is useful + to create an independent environment to resolve dependency issues for building + the documentation. + +.. code-block:: sh + :caption: Install virtualenv + + $ pip install virtualenv + +- Switch to the ``docs`` root directory. + +.. code-block:: sh + :caption: Switch to docs + + $ cd $KYUUBI_SOURCE_PATH/docs + +- Create a virtual environment named 'kyuubi' or anything you like using ``virtualenv`` + if it's not existing. + +.. code-block:: sh + :caption: New virtual environment + + $ virtualenv kyuubi + +- Activate the virtual environment, + +.. code-block:: sh + :caption: Activate virtual environment + + $ source ./kyuubi/bin/activate + +Install All Dependencies +------------------------ + +Install all dependencies enumerated in the ``requirements.txt``. + +.. code-block:: sh + :caption: Install dependencies + + $ pip install -r requirements.txt + + +Create Documentation +-------------------- + +Make sure you are in the ``$KYUUBI_SOURCE_PATH/docs`` directory. + +Linux & MacOS +~~~~~~~~~~~~~ + +.. code-block:: sh + :caption: Sphinx build on Unix-like OS + + $ make html + +Windows +~~~~~~~ + +.. code-block:: sh + :caption: Sphinx build on Windows + + $ make.bat html + + +If the build process succeed, the HTML pages are in +``$KYUUBI_SOURCE_PATH/docs/_build/html``. + +View Locally +------------ + +Open the `$KYUUBI_SOURCE_PATH/docs/_build/html/index.html` file in your +favorite web browser. diff --git a/docs/contributing/doc/get_started.rst b/docs/contributing/doc/get_started.rst new file mode 100644 index 00000000000..f262695b777 --- /dev/null +++ b/docs/contributing/doc/get_started.rst @@ -0,0 +1,117 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Get Started +=========== + +.. image:: https://img.shields.io/github/issues/apache/kyuubi/kind:documentation?color=green&logo=gfi&logoColor=red&style=for-the-badge + :alt: GitHub issues by-label + + +Trivial Fixes +------------- + +For typos, layout, grammar, spelling, punctuation errors and other similar issues +or changes that occur within a single file, it is acceptable to make edits directly +on the page being viewed. When viewing a source file on kyuubi's +`Github repository`_, a simple click on the ``edit icon`` or keyboard shortcut +``e`` will activate the editor. Similarly, when viewing files on `Read The Docs`_ +platform, clicking on the ``suggest edit`` button will lead you to the editor. +These methods do not require any local development environment setup and +are convenient for making quick fixes. + +Upon completion of the editing process, opt the ``commit changes`` option, +adhere to the provided instructions to submit a pull request, +and await feedback from the designated reviewer. + +Major Fixes +----------- + +For significant modifications that affect multiple files, it is advisable to +clone the repository to a local development environment, implement the necessary +changes, and conduct thorough testing prior to submitting a pull request. + + +`Fork`_ The Repository +~~~~~~~~~~~~~~~~~~~~~~ + +Clone The Forked Repository +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: + :caption: Clone the repository + + $ git clone https://github.com/your_username/kyuubi.git + +Replace "your_username" with your GitHub username. This will create a local +copy of your forked repository on your machine. You will see the ``master`` +branch if you run ``git branch`` in the ``kyuubi`` folder. + +Create A New Branch +~~~~~~~~~~~~~~~~~~~ + +.. code-block:: + :caption: Create a new branch + + $ git checkout -b guide + Switched to a new branch 'guide' + +Editing And Testing +~~~~~~~~~~~~~~~~~~~ + +Make the necessary changes to the documentation files using a text editor. +`Build and verify`_ the changes you have made to see if they look fine. + +.. note:: + :class: dropdown, toggle + +Create A Pull Request +~~~~~~~~~~~~~~~~~~~~~ + +Once you have made the changes, + +- Commit them with a descriptive commit message using the command: + +.. code-block:: + :caption: commit the changes + + $ git commit -m "Description of changes made" + +- Push the changes to your forked repository using the command + +.. code-block:: + :caption: push the changes + + $ git push origin guide + +- `Create A Pull Request`_ with a descriptive PR title and description. + +- Polishing the PR with comments of reviews addressed + +Report Only +----------- + +If you don't have time to fix the doc issue and submit a pull request on your own, +`reporting a document issue`_ also helps. Please follow some basic rules: + +- Use the title field to clearly describe the issue +- Choose the documentation report template +- Fill out the required field in the documentation report + +.. _Home Page: https://kyuubi.apache.org +.. _Fork: https://github.com/apache/kyuubi/fork +.. _Build and verify: build.html +.. _Create A Pull Request: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request +.. _reporting a document issue: https://github.com/apache/kyuubi/issues/new/choose \ No newline at end of file diff --git a/docs/contributing/doc/index.rst b/docs/contributing/doc/index.rst new file mode 100644 index 00000000000..bf6ae41bde2 --- /dev/null +++ b/docs/contributing/doc/index.rst @@ -0,0 +1,44 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Contributing Documentations +=========================== + +The project documentation is crucial for users and contributors. This guide +outlines the contribution guidelines for Apache Kyuubi documentation. + +Kyuubi's documentation source files are maintained in the same `github repository`_ +as the code base, which ensures updating code and documentation synchronously. +All documentation source files can be found in the sub-folder named ``docs``. + +Kyuubi's documentation is published and hosted on `Read The Docs`_ platform by +version. with each version having its own dedicated page. To access a specific +version of the document, simply navigate to the "Docs" tab on our Home Page. + +We welcome any contributions to the documentation, including but not limited to +writing, translation, report doc issues on Github, reposting. + + +.. toctree:: + :maxdepth: 2 + + get_started + style + build + +.. _Github repository: https://github.com/apache/kyuubi +.. _Restructured Text: https://en.wikipedia.org/wiki/ReStructuredText +.. _Read The Docs: https://kyuubi.rtfd.io +.. _Home Page: https://kyuubi.apache.org \ No newline at end of file diff --git a/docs/contributing/doc/style.rst b/docs/contributing/doc/style.rst new file mode 100644 index 00000000000..14cc2b8ac78 --- /dev/null +++ b/docs/contributing/doc/style.rst @@ -0,0 +1,135 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Documentation Style Guide +========================= + +This guide contains guidelines, not rules. While guidelines are important +to follow, they are not hard and fast rules. It's important to use your +own judgement and discretion when creating content, and to depart from the +guidelines when necessary to improve the quality and effectiveness of your +content. Ultimately, the goal is to create content that is clear, concise, +and useful to your audience, and sometimes deviating from the guidelines +may be necessary to achieve that goal. + +Goals +----- + +- Source text files are readable and portable +- Source diagram files are editable +- Source files are maintainable over time and across community + +License Header +-------------- + +All original documents should include the ASF license header. All reproduced +or quoted content should be authorized and attributed to the source. + +If you are about to quote some from commercial materials, please refer to +`ASF 3RD PARTY LICENSE POLICY`_, or consult the Apache Kyuubi PMC to avoid +legality issues. + +General Style +------------- + +- Use `ReStructuredText`_ or `Markdown`_ format for text, avoid HTML hacks +- Use `draw.io`_ for drawing or editing an image, and export it as PNG for + referencing in document. A pull request should commit both of them +- Use Kyuubi for short instead of Apache Kyuubi after the first time in the + same page +- Character line limit: 78, except unbreakable ones +- Prefer lists to tables +- Prefer unordered list than ordered + +ReStructuredText +---------------- + +Headings +~~~~~~~~ + +- Use **Pascal Case**, every word starts with an uppercase letter, + e.g., 'Documentation Style Guide' +- Use a max of **three levels** + - Split into multiple files when there comes an H4 + - Prefer `directive rubric`_ than H4 +- Use underline-only adornment styles, **DO NOT** use overline + - The length of underline characters **SHOULD** match the title + - H1 should be underlined with '=' + - H2 should be underlined with '-' + - H3 should be underlined with '~' + - H4 should be underlined with '^', but it's better to avoid using H4 +- **DO NOT** use numbering for sections +- **DO NOT** use "Kyuubi" in titles if possible + +Links +~~~~~ + +- Define links with short descriptive phrases, group them at the bottom of the file + +.. note:: + :class: dropdown, toggle + + .. code-block:: + :caption: Recommended + + Please refer to `Apache Kyuubi Home Page`_. + + .. _Apache Kyuubi Home Page: https://kyuubi.apache.org/ + + .. code-block:: + :caption: Not recommended + + Please refer to `Apache Kyuubi Home Page `_. + + +Markdown +-------- + +Headings +~~~~~~~~ + +- Use **Pascal Case**, every word starts with an uppercase letter, + e.g., 'Documentation Style Guide' +- Use a max of **three levels** + - Split into multiple files when there comes an H4 +- **DO NOT** use numbering for sections +- **DO NOT** use "Kyuubi" in titles if possible + +Images +------ + +Use images only when they provide helpful visual explanations of information +otherwise difficult to express with words + +Third-party references +---------------------- + +If the preceding references don't provide explicit guidance, then see these +third-party references, depending on the nature of your question: + +- `Google developer documentation style`_ +- `Apple Style Guide`_ +- `Red Hat supplementary style guide for product documentation`_ + +.. References + +.. _ASF 3RD PARTY LICENSE POLICY: https://www.apache.org/legal/resolved.html#asf-3rd-party-license-policy +.. _directive rubric :https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html#directive-rubric +.. _ReStructuredText: https://docutils.sourceforge.io/rst.html +.. _Markdown: https://en.wikipedia.org/wiki/Markdown +.. _draw.io: https://www.diagrams.net/ +.. _Google developer documentation style: https://developers.google.com/style +.. _Apple Style Guide: https://help.apple.com/applestyleguide/ +.. _Red Hat supplementary style guide for product documentation: https://redhat-documentation.github.io/supplementary-style-guide/ diff --git a/docs/deployment/engine_on_kubernetes.md b/docs/deployment/engine_on_kubernetes.md index 44fca1602e3..a8f7c6ca0e7 100644 --- a/docs/deployment/engine_on_kubernetes.md +++ b/docs/deployment/engine_on_kubernetes.md @@ -36,6 +36,17 @@ Spark on Kubernetes config master by using a special format. You can use cmd `kubectl cluster-info` to get api-server host and port. +### Deploy Mode + +One of the main advantages of the Kyuubi server compared to other interactive Spark clients is that it supports cluster deploy mode. +It is highly recommended to run Spark in k8s in cluster mode. + +The minimum required configurations are: + +* spark.submit.deployMode (cluster) +* spark.kubernetes.file.upload.path (path on s3 or hdfs) +* spark.kubernetes.authenticate.driver.serviceAccountName ([viz ServiceAccount](#serviceaccount)) + ### Docker Image Spark ships a `./bin/docker-image-tool.sh` script to build and publish the Docker images for running Spark applications on Kubernetes. diff --git a/docs/deployment/engine_on_yarn.md b/docs/deployment/engine_on_yarn.md index 6812afa46db..1025418d9c4 100644 --- a/docs/deployment/engine_on_yarn.md +++ b/docs/deployment/engine_on_yarn.md @@ -15,13 +15,13 @@ - limitations under the License. --> -# Deploy Kyuubi engines on Yarn +# Deploy Kyuubi engines on YARN -## Deploy Kyuubi Spark Engine on Yarn +## Deploy Kyuubi Spark Engine on YARN ### Requirements -When you want to deploy Kyuubi's Spark SQL engines on YARN, you'd better have cognition upon the following things. +To deploy Kyuubi's Spark SQL engines on YARN, you'd better have cognition upon the following things. - Knowing the basics about [Running Spark on YARN](https://spark.apache.org/docs/latest/running-on-yarn.html) - A binary distribution of Spark which is built with YARN support @@ -113,11 +113,11 @@ so `spark.kerberos.keytab` and `spark.kerberos.principal` should not use now. Instead, you can schedule a periodically `kinit` process via `crontab` task on the local machine that hosts Kyuubi server or simply use [Kyuubi Kinit](settings.html#kinit). -## Deploy Kyuubi Flink Engine on Yarn +## Deploy Kyuubi Flink Engine on YARN ### Requirements -When you want to deploy Kyuubi's Flink SQL engines on YARN, you'd better have cognition upon the following things. +To deploy Kyuubi's Flink SQL engines on YARN, you'd better have cognition upon the following things. - Knowing the basics about [Running Flink on YARN](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/resource-providers/yarn) - A binary distribution of Flink which is built with YARN support @@ -127,13 +127,59 @@ When you want to deploy Kyuubi's Flink SQL engines on YARN, you'd better have co - An active Object Storage cluster, e.g. [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html), S3 and [Minio](https://min.io/) etc. - Setup Hadoop client configurations at the machine the Kyuubi server locates -### Yarn Session Mode +### Flink Deployment Modes + +Currently, Flink supports two deployment modes on YARN: [YARN Application Mode](https://nightlies.apache.org/flink/flink-docs-release-1.17/docs/deployment/resource-providers/yarn/#application-mode) and [YARN Session Mode](https://nightlies.apache.org/flink/flink-docs-release-1.17/docs/deployment/resource-providers/yarn/#application-mode). + +- YARN Application Mode: In this mode, Kyuubi starts a dedicated Flink application cluster and runs the SQL engine on it. +- YARN Session Mode: In this mode, Kyuubi starts the Flink SQL engine locally and connects to a running Flink YARN session cluster. + +As Kyuubi has to know the deployment mode before starting the SQL engine, it's required to specify the deployment mode in Kyuubi configuration. + +```properties +# candidates: yarn-application, yarn-session +flink.execution.target=yarn-application +``` + +### YARN Application Mode + +#### Flink Configurations + +Since the Flink SQL engine runs inside the JobManager, it's recommended to tune the resource configurations of the JobManager based on your workload. + +The related Flink configurations are listed below (see more details at [Flink Configuration](https://nightlies.apache.org/flink/flink-docs-master/docs/deployment/config/#yarn)): + +| Name | Default | Meaning | +|--------------------------------|---------|----------------------------------------------------------------------------------------| +| yarn.appmaster.vcores | 1 | The number of virtual cores (vcores) used by the JobManager (YARN application master). | +| jobmanager.memory.process.size | (none) | Total size of the memory of the JobManager process. | + +Note that Flink application mode doesn't support HA for multiple jobs as for now, this also applies to Kyuubi's Flink SQL engine. If JobManager fails and restarts, the submitted jobs would not be recovered and should be re-submitted. + +#### Environment + +Either `HADOOP_CONF_DIR` or `YARN_CONF_DIR` is configured and points to the Hadoop client configurations directory, usually, `$HADOOP_HOME/etc/hadoop`. + +You could verify your setup by the following command: + +```bash +# we assume to be in the root directory of +# the unzipped Flink distribution + +# (0) export HADOOP_CLASSPATH +export HADOOP_CLASSPATH=`hadoop classpath` + +# (1) submit a Flink job and ensure it runs successfully +./bin/flink run -m yarn-cluster ./examples/streaming/WordCount.jar +``` + +### YARN Session Mode #### Flink Configurations ```bash execution.target: yarn-session -# Yarn Session Cluster application id. +# YARN Session Cluster application id. yarn.application.id: application_00000000XX_00XX ``` @@ -194,23 +240,19 @@ To use Hadoop vanilla jars, please configure $KYUUBI_HOME/conf/kyuubi-env.sh as $ echo "export FLINK_HADOOP_CLASSPATH=`hadoop classpath`" >> $KYUUBI_HOME/conf/kyuubi-env.sh ``` -### Deployment Modes Supported by Flink on YARN - -For experiment use, we recommend deploying Kyuubi Flink SQL engine in [Session Mode](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/resource-providers/yarn/#session-mode). -At present, [Application Mode](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/resource-providers/yarn/#application-mode) and [Per-Job Mode (deprecated)](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/resource-providers/yarn/#per-job-mode-deprecated) are not supported for Flink engine. - ### Kerberos -As Kyuubi Flink SQL engine wraps the Flink SQL client that currently does not support [Flink Kerberos Configuration](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/config/#security-kerberos-login-keytab), -so `security.kerberos.login.keytab` and `security.kerberos.login.principal` should not use now. +With regard to YARN application mode, Kerberos is supported natively by Flink, see [Flink Kerberos Configuration](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/config/#security-kerberos-login-keytab) for details. -Instead, you can schedule a periodically `kinit` process via `crontab` task on the local machine that hosts Kyuubi server or simply use [Kyuubi Kinit](settings.html#kinit). +With regard to YARN session mode, `security.kerberos.login.keytab` and `security.kerberos.login.principal` are not effective, as Kyuubi Flink SQL engine mainly relies on Flink SQL client which currently does not support [Flink Kerberos Configuration](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/config/#security-kerberos-login-keytab), + +As a workaround, you can schedule a periodically `kinit` process via `crontab` task on the local machine that hosts Kyuubi server or simply use [Kyuubi Kinit](settings.html#kinit). -## Deploy Kyuubi Hive Engine on Yarn +## Deploy Kyuubi Hive Engine on YARN ### Requirements -When you want to deploy Kyuubi's Hive SQL engines on YARN, you'd better have cognition upon the following things. +To deploy Kyuubi's Hive SQL engines on YARN, you'd better have cognition upon the following things. - Knowing the basics about [Running Hive on YARN](https://cwiki.apache.org/confluence/display/Hive/GettingStarted) - A binary distribution of Hive @@ -239,7 +281,7 @@ $ $HIVE_HOME/bin/beeline -u 'jdbc:hive2://localhost:10000/default' 0: jdbc:hive2://localhost:10000/default> INSERT INTO TABLE pokes VALUES (1, 'hello'); ``` -If the `Hive SQL` passes and there is a job in Yarn Web UI, It indicates the hive environment is normal. +If the `Hive SQL` passes and there is a job in YARN Web UI, it indicates the hive environment is good. #### Required Environment Variable diff --git a/docs/deployment/high_availability_guide.md b/docs/deployment/high_availability_guide.md index 353e549ebba..51c87815765 100644 --- a/docs/deployment/high_availability_guide.md +++ b/docs/deployment/high_availability_guide.md @@ -39,7 +39,7 @@ Using multiple Kyuubi service units with load balancing instead of a single unit - High concurrency - By adding or removing Kyuubi server instances can easily scale up or down to meet the need of client requests. - Upgrade smoothly - - Kyuubi server supports stop gracefully. We could delete a `k.i.` but not stop it immediately. + - Kyuubi server supports stopping gracefully. We could delete a `k.i.` but not stop it immediately. In this case, the `k.i.` will not take any new connection request but only operation requests from existing connections. After all connection are released, it stops then. - The dependencies of Kyuubi engines are free to change, such as bump up versions, modify configurations, add external jars, relocate to another engine home. Everything will be reloaded during start and stop. diff --git a/docs/deployment/index.rst b/docs/deployment/index.rst index ec3ece95145..1b6bf876678 100644 --- a/docs/deployment/index.rst +++ b/docs/deployment/index.rst @@ -31,15 +31,6 @@ Basics high_availability_guide migration-guide -Configurations --------------- - -.. toctree:: - :maxdepth: 2 - :glob: - - settings - Engines ------- diff --git a/docs/deployment/kyuubi_on_kubernetes.md b/docs/deployment/kyuubi_on_kubernetes.md index 8bb1d88c3fe..11ffe8e4859 100644 --- a/docs/deployment/kyuubi_on_kubernetes.md +++ b/docs/deployment/kyuubi_on_kubernetes.md @@ -90,7 +90,7 @@ See more related details in [Using RBAC Authorization](https://kubernetes.io/doc ## Config -You can configure Kyuubi the old-fashioned way by placing kyuubi-default.conf inside the image. Kyuubi do not recommend using this way on Kubernetes. +You can configure Kyuubi the old-fashioned way by placing `kyuubi-defaults.conf` inside the image. Kyuubi does not recommend using this way on Kubernetes. Kyuubi provide `${KYUUBI_HOME}/docker/kyuubi-configmap.yaml` to build Configmap for Kyuubi. diff --git a/docs/deployment/migration-guide.md b/docs/deployment/migration-guide.md index 42905340e10..27dad2aba92 100644 --- a/docs/deployment/migration-guide.md +++ b/docs/deployment/migration-guide.md @@ -17,6 +17,22 @@ # Kyuubi Migration Guide +## Upgrading from Kyuubi 1.7 to 1.8 + +* Since Kyuubi 1.8, SQLite is added and becomes the default database type of Kyuubi metastore, as Derby has been deprecated. + Both Derby and SQLite are mainly for testing purposes, and they're not supposed to be used in production. + To restore previous behavior, set `kyuubi.metadata.store.jdbc.database.type=DERBY` and + `kyuubi.metadata.store.jdbc.url=jdbc:derby:memory:kyuubi_state_store_db;create=true`. + +## Upgrading from Kyuubi 1.7.1 to 1.7.2 + +* Since Kyuubi 1.7.2, for Kyuubi BeeLine, please use `--python-mode` option to run python code or script. + +## Upgrading from Kyuubi 1.7.0 to 1.7.1 + +* Since Kyuubi 1.7.1, `protocolVersion` is removed from the request parameters of the REST API `Open(create) a session`. All removed or unknown parameters will be silently ignored and affects nothing. +* Since Kyuubi 1.7.1, `confOverlay` is supported in the request parameters of the REST API `Create an operation with EXECUTE_STATEMENT type`. + ## Upgrading from Kyuubi 1.6 to 1.7 * In Kyuubi 1.7, `kyuubi.ha.zookeeper.engine.auth.type` does not fallback to `kyuubi.ha.zookeeper.auth.type`. @@ -24,7 +40,7 @@ * Since Kyuubi 1.7, Kyuubi returns engine's information for `GetInfo` request instead of server. To restore the previous behavior, set `kyuubi.server.info.provider` to `SERVER`. * Since Kyuubi 1.7, Kyuubi session type `SQL` is refactored to `INTERACTIVE`, because Kyuubi supports not only `SQL` session, but also `SCALA` and `PYTHON` sessions. User need to use `INTERACTIVE` sessionType to look up the session event. -* Since Kyuubi 1.7, the REST API of `Open(create) a session` will not contains parameters `user` `password` and `IpAddr`. User and password should be set in `Authorization` of http request if needed. +* Since Kyuubi 1.7, the REST API of `Open(create) a session` will not contain parameters `user` `password` and `IpAddr`. User and password should be set in `Authorization` of http request if needed. ## Upgrading from Kyuubi 1.6.0 to 1.6.1 diff --git a/docs/deployment/spark/aqe.md b/docs/deployment/spark/aqe.md index 90cc5aff84c..3682c7f9ec5 100644 --- a/docs/deployment/spark/aqe.md +++ b/docs/deployment/spark/aqe.md @@ -210,7 +210,7 @@ Kyuubi is a long-running service to make it easier for end-users to use Spark SQ ### Setting Default Configurations -[Configuring by `spark-defaults.conf`](settings.html#via-spark-defaults-conf) at the engine side is the best way to set up Kyuubi with AQE. All engines will be instantiated with AQE enabled. +[Configuring by `spark-defaults.conf`](../settings.html#via-spark-defaults-conf) at the engine side is the best way to set up Kyuubi with AQE. All engines will be instantiated with AQE enabled. Here is a config setting that we use in our platform when deploying Kyuubi. diff --git a/docs/deployment/spark/dynamic_allocation.md b/docs/deployment/spark/dynamic_allocation.md index b177b63c365..1a5057e731f 100644 --- a/docs/deployment/spark/dynamic_allocation.md +++ b/docs/deployment/spark/dynamic_allocation.md @@ -170,7 +170,7 @@ Kyuubi is a long-running service to make it easier for end-users to use Spark SQ ### Setting Default Configurations -[Configuring by `spark-defaults.conf`](settings.html#via-spark-defaults-conf) at the engine side is the best way to set up Kyuubi with DRA. All engines will be instantiated with DRA enabled. +[Configuring by `spark-defaults.conf`](../settings.html#via-spark-defaults-conf) at the engine side is the best way to set up Kyuubi with DRA. All engines will be instantiated with DRA enabled. Here is a config setting that we use in our platform when deploying Kyuubi. diff --git a/docs/develop_tools/build_document.md b/docs/develop_tools/build_document.md deleted file mode 100644 index 0be5a180705..00000000000 --- a/docs/develop_tools/build_document.md +++ /dev/null @@ -1,76 +0,0 @@ - - -# Building Kyuubi Documentation - -Follow the steps below and learn how to build the Kyuubi documentation as the one you are watching now. - -## Install & Activate `virtualenv` - -Firstly, install `virtualenv`, this is optional but recommended as it is useful to create an independent environment to resolve dependency issues for building the documentation. - -```bash -pip install virtualenv -``` - -Switch to the `docs` root directory. - -```bash -cd $KYUUBI_SOURCE_PATH/docs -``` - -Create a virtual environment named 'kyuubi' or anything you like using `virtualenv` if it's not existing. - -```bash -virtualenv kyuubi -``` - -Activate it, - -```bash -source ./kyuubi/bin/activate -``` - -## Install all dependencies - -Install all dependencies enumerated in the `requirements.txt`. - -```bash -pip install -r requirements.txt -``` - -## Create Documentation - -Make sure you are in the `$KYUUBI_SOURCE_PATH/docs` directory. - -linux & macos - -```bash -make html -``` - -windows - -```bash -make.bat html -``` - -If the build process succeed, the HTML pages are in `$KYUUBI_SOURCE_PATH/docs/_build/html`. - -## View Locally - -Open the `$KYUUBI_SOURCE_PATH/docs/_build/html/index.html` file in your favorite web browser. diff --git a/docs/extensions/engines/flink/functions.md b/docs/extensions/engines/flink/functions.md new file mode 100644 index 00000000000..1d047d07889 --- /dev/null +++ b/docs/extensions/engines/flink/functions.md @@ -0,0 +1,30 @@ + + +# Auxiliary SQL Functions + +Kyuubi provides several auxiliary SQL functions as supplement to +Flink's [Built-in Functions](https://nightlies.apache.org/flink/flink-docs-release-1.17/docs/dev/table/functions/systemfunctions/) + +| Name | Description | Return Type | Since | +|---------------------|-------------------------------------------------------------|-------------|-------| +| kyuubi_version | Return the version of Kyuubi Server | string | 1.8.0 | +| kyuubi_engine_name | Return the application name for the associated query engine | string | 1.8.0 | +| kyuubi_engine_id | Return the application id for the associated query engine | string | 1.8.0 | +| kyuubi_system_user | Return the system user name for the associated query engine | string | 1.8.0 | +| kyuubi_session_user | Return the session username for the associated query engine | string | 1.8.0 | + diff --git a/docs/extensions/engines/flink/index.rst b/docs/extensions/engines/flink/index.rst index 01bbecf9263..58105b0fa76 100644 --- a/docs/extensions/engines/flink/index.rst +++ b/docs/extensions/engines/flink/index.rst @@ -20,6 +20,7 @@ Extensions for Flink :maxdepth: 1 ../../../connector/flink/index + functions .. warning:: This page is still in-progress. diff --git a/docs/extensions/engines/hive/functions.md b/docs/extensions/engines/hive/functions.md new file mode 100644 index 00000000000..24094ecce31 --- /dev/null +++ b/docs/extensions/engines/hive/functions.md @@ -0,0 +1,30 @@ + + + +# Auxiliary SQL Functions + +Kyuubi provides several auxiliary SQL functions as supplement to Hive's [Built-in Functions](https://cwiki.apache.org/confluence/display/hive/languagemanual+udf#LanguageManualUDF-Built-inFunctions) + +| Name | Description | Return Type | Since | +|----------------|-------------------------------------|-------------|-------| +| kyuubi_version | Return the version of Kyuubi Server | string | 1.8.0 | +| engine_name | Return the name of engine | string | 1.8.0 | +| engine_id | Return the id of engine | string | 1.8.0 | +| system_user | Return the system user | string | 1.8.0 | +| session_user | Return the session user | string | 1.8.0 | + diff --git a/docs/extensions/engines/hive/index.rst b/docs/extensions/engines/hive/index.rst index 8aeebf1bc8b..f43ec11e0b1 100644 --- a/docs/extensions/engines/hive/index.rst +++ b/docs/extensions/engines/hive/index.rst @@ -20,6 +20,7 @@ Extensions for Hive :maxdepth: 2 ../../../connector/hive/index + functions .. warning:: This page is still in-progress. diff --git a/docs/extensions/engines/spark/functions.md b/docs/extensions/engines/spark/functions.md index 66f22aea860..78c2692436f 100644 --- a/docs/extensions/engines/spark/functions.md +++ b/docs/extensions/engines/spark/functions.md @@ -27,4 +27,5 @@ Kyuubi provides several auxiliary SQL functions as supplement to Spark's [Built- | engine_id | Return the spark application id for the associated query engine | string | 1.4.0 | | system_user | Return the system user name for the associated query engine | string | 1.3.0 | | session_user | Return the session username for the associated query engine | string | 1.4.0 | +| engine_url | Return the engine url for the associated query engine | string | 1.8.0 | diff --git a/docs/extensions/engines/spark/lineage.md b/docs/extensions/engines/spark/lineage.md index 8f2f76c9f65..2dbb2a026d3 100644 --- a/docs/extensions/engines/spark/lineage.md +++ b/docs/extensions/engines/spark/lineage.md @@ -45,14 +45,14 @@ The lineage of this SQL: ```json { - "inputTables": ["default.test_table0"], + "inputTables": ["spark_catalog.default.test_table0"], "outputTables": [], "columnLineage": [{ "column": "col0", - "originalColumns": ["default.test_table0.a"] + "originalColumns": ["spark_catalog.default.test_table0.a"] }, { "column": "col1", - "originalColumns": ["default.test_table0.b"] + "originalColumns": ["spark_catalog.default.test_table0.b"] }] } ``` @@ -101,13 +101,12 @@ Kyuubi Spark Lineage Listener Extension is built using [Apache Maven](https://ma To build it, `cd` to the root direct of kyuubi project and run: ```shell -build/mvn clean package -pl :kyuubi-spark-lineage_2.12 -DskipTests +build/mvn clean package -pl :kyuubi-spark-lineage_2.12 -am -DskipTests ``` After a while, if everything goes well, you will get the plugin finally in two parts: - The main plugin jar, which is under `./extensions/spark/kyuubi-spark-lineage/target/kyuubi-spark-lineage_${scala.binary.version}-${project.version}.jar` -- The least transitive dependencies needed, which are under `./extensions/spark/kyuubi-spark-lineage/target/scala-${scala.binary.version}/jars` ### Build against Different Apache Spark Versions @@ -118,7 +117,7 @@ Sometimes, it may be incompatible with other Spark distributions, then you may n For example, ```shell -build/mvn clean package -pl :kyuubi-spark-lineage_2.12 -DskipTests -Dspark.version=3.1.2 +build/mvn clean package -pl :kyuubi-spark-lineage_2.12 -am -DskipTests -Dspark.version=3.1.2 ``` The available `spark.version`s are shown in the following table. @@ -126,6 +125,7 @@ The available `spark.version`s are shown in the following table. | Spark Version | Supported | Remark | |:-------------:|:---------:|:------:| | master | √ | - | +| 3.4.x | √ | - | | 3.3.x | √ | - | | 3.2.x | √ | - | | 3.1.x | √ | - | @@ -168,13 +168,65 @@ Add `org.apache.kyuubi.plugin.lineage.SparkOperationLineageQueryExecutionListene spark.sql.queryExecutionListeners=org.apache.kyuubi.plugin.lineage.SparkOperationLineageQueryExecutionListener ``` -### Settings for Lineage Logger and Path +### Optional configuration -#### Lineage Logger Path +#### Whether to Skip Permanent View Resolution -The location of all the engine operation lineage events go for the builtin JSON logger. -We first need set `kyuubi.engine.event.loggers` to `JSON`. -All operation lineage events will be written in the unified event json logger path, which be setting with -`kyuubi.engine.event.json.log.path`. We can get the lineage logger from the `operation_lineage` dir in the -`kyuubi.engine.event.json.log.path`. +If enabled, lineage resolution will stop at permanent views and treats them as physical tables. We need +to add one configurations. + +```properties +spark.kyuubi.plugin.lineage.skip.parsing.permanent.view.enabled=true +``` + +### Get Lineage Events + +The lineage dispatchers are used to dispatch lineage events, configured via `spark.kyuubi.plugin.lineage.dispatchers`. + +
        +
      • SPARK_EVENT (by default): send lineage event to spark event bus
      • +
      • KYUUBI_EVENT: send lineage event to kyuubi event bus
      • +
      • ATLAS: send lineage to apache atlas
      • +
      + +#### Get Lineage Events from SparkListener + +When using the `SPARK_EVENT` dispatcher, the lineage events will be sent to the `SparkListenerBus`. To handle lineage events, a new `SparkListener` needs to be added. +Example for Adding `SparkListener`: + +```scala +spark.sparkContext.addSparkListener(new SparkListener { + override def onOtherEvent(event: SparkListenerEvent): Unit = { + event match { + case lineageEvent: OperationLineageEvent => + // Your processing logic + case _ => + } + } + }) +``` + +#### Get Lineage Events from Kyuubi EventHandler + +When using the `KYUUBI_EVENT` dispatcher, the lineage events will be sent to the Kyuubi `EventBus`. Refer to [Kyuubi Event Handler](../../server/events) to handle kyuubi events. + +#### Ingest Lineage Entities to Apache Atlas + +The lineage entities can be ingested into [Apache Atlas](https://atlas.apache.org/) using the `ATLAS` dispatcher. + +Extra works: + ++ The least transitive dependencies needed, which are under `./extensions/spark/kyuubi-spark-lineage/target/scala-${scala.binary.version}/jars` ++ Use `spark.files` to specify the `atlas-application.properties` configuration file for Atlas + +Atlas Client configurations (Configure in `atlas-application.properties` or passed in `spark.atlas.` prefix): + +| Name | Default Value | Description | Since | +|-----------------------------------------|------------------------|-------------------------------------------------------|-------| +| atlas.rest.address | http://localhost:21000 | The rest endpoint url for the Atlas server | 1.8.0 | +| atlas.client.type | rest | The client type (currently only supports rest) | 1.8.0 | +| atlas.client.username | none | The client username | 1.8.0 | +| atlas.client.password | none | The client password | 1.8.0 | +| atlas.cluster.name | primary | The cluster name to use in qualifiedName of entities. | 1.8.0 | +| atlas.hook.spark.column.lineage.enabled | true | Whether to ingest column lineages to Atlas. | 1.8.0 | diff --git a/docs/extensions/engines/spark/rules.md b/docs/extensions/engines/spark/rules.md index 5c8c0486920..4614f52440a 100644 --- a/docs/extensions/engines/spark/rules.md +++ b/docs/extensions/engines/spark/rules.md @@ -63,24 +63,33 @@ Now, you can enjoy the Kyuubi SQL Extension. Kyuubi provides some configs to make these feature easy to use. -| Name | Default Value | Description | Since | -|---------------------------------------------------------------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------| -| spark.sql.optimizer.insertRepartitionBeforeWrite.enabled | true | Add repartition node at the top of query plan. An approach of merging small files. | 1.2.0 | -| spark.sql.optimizer.insertRepartitionNum | none | The partition number if `spark.sql.optimizer.insertRepartitionBeforeWrite.enabled` is enabled. If AQE is disabled, the default value is `spark.sql.shuffle.partitions`. If AQE is enabled, the default value is none that means depend on AQE. | 1.2.0 | -| spark.sql.optimizer.dynamicPartitionInsertionRepartitionNum | 100 | The partition number of each dynamic partition if `spark.sql.optimizer.insertRepartitionBeforeWrite.enabled` is enabled. We will repartition by dynamic partition columns to reduce the small file but that can cause data skew. This config is to extend the partition of dynamic partition column to avoid skew but may generate some small files. | 1.2.0 | -| spark.sql.optimizer.forceShuffleBeforeJoin.enabled | false | Ensure shuffle node exists before shuffled join (shj and smj) to make AQE `OptimizeSkewedJoin` works (complex scenario join, multi table join). | 1.2.0 | -| spark.sql.optimizer.finalStageConfigIsolation.enabled | false | If true, the final stage support use different config with previous stage. The prefix of final stage config key should be `spark.sql.finalStage.`. For example, the raw spark config: `spark.sql.adaptive.advisoryPartitionSizeInBytes`, then the final stage config should be: `spark.sql.finalStage.adaptive.advisoryPartitionSizeInBytes`. | 1.2.0 | -| spark.sql.analyzer.classification.enabled | false | When true, allows Kyuubi engine to judge this SQL's classification and set `spark.sql.analyzer.classification` back into sessionConf. Through this configuration item, Spark can optimizing configuration dynamic. | 1.4.0 | -| spark.sql.optimizer.insertZorderBeforeWriting.enabled | true | When true, we will follow target table properties to insert zorder or not. The key properties are: 1) `kyuubi.zorder.enabled`: if this property is true, we will insert zorder before writing data. 2) `kyuubi.zorder.cols`: string split by comma, we will zorder by these cols. | 1.4.0 | -| spark.sql.optimizer.zorderGlobalSort.enabled | true | When true, we do a global sort using zorder. Note that, it can cause data skew issue if the zorder columns have less cardinality. When false, we only do local sort using zorder. | 1.4.0 | -| spark.sql.watchdog.maxPartitions | none | Set the max partition number when spark scans a data source. Enable MaxPartitionStrategy by specifying this configuration. Add maxPartitions Strategy to avoid scan excessive partitions on partitioned table, it's optional that works with defined | 1.4.0 | -| spark.sql.optimizer.dropIgnoreNonExistent | false | When true, do not report an error if DROP DATABASE/TABLE/VIEW/FUNCTION/PARTITION specifies a non-existent database/table/view/function/partition | 1.5.0 | -| spark.sql.optimizer.rebalanceBeforeZorder.enabled | false | When true, we do a rebalance before zorder in case data skew. Note that, if the insertion is dynamic partition we will use the partition columns to rebalance. Note that, this config only affects with Spark 3.3.x. | 1.6.0 | -| spark.sql.optimizer.rebalanceZorderColumns.enabled | false | When true and `spark.sql.optimizer.rebalanceBeforeZorder.enabled` is true, we do rebalance before Z-Order. If it's dynamic partition insert, the rebalance expression will include both partition columns and Z-Order columns. Note that, this config only affects with Spark 3.3.x. | 1.6.0 | -| spark.sql.optimizer.twoPhaseRebalanceBeforeZorder.enabled | false | When true and `spark.sql.optimizer.rebalanceBeforeZorder.enabled` is true, we do two phase rebalance before Z-Order for the dynamic partition write. The first phase rebalance using dynamic partition column; The second phase rebalance using dynamic partition column Z-Order columns. Note that, this config only affects with Spark 3.3.x. | 1.6.0 | -| spark.sql.optimizer.zorderUsingOriginalOrdering.enabled | false | When true and `spark.sql.optimizer.rebalanceBeforeZorder.enabled` is true, we do sort by the original ordering i.e. lexicographical order. Note that, this config only affects with Spark 3.3.x. | 1.6.0 | -| spark.sql.optimizer.inferRebalanceAndSortOrders.enabled | false | When ture, infer columns for rebalance and sort orders from original query, e.g. the join keys from join. It can avoid compression ratio regression. | 1.7.0 | -| spark.sql.optimizer.inferRebalanceAndSortOrdersMaxColumns | 3 | The max columns of inferred columns. | 1.7.0 | -| spark.sql.optimizer.insertRepartitionBeforeWriteIfNoShuffle.enabled | false | When true, add repartition even if the original plan does not have shuffle. | 1.7.0 | -| spark.sql.optimizer.finalStageConfigIsolationWriteOnly.enabled | true | When true, only enable final stage isolation for writing. | 1.7.0 | +| Name | Default Value | Description | Since | +|---------------------------------------------------------------------|----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------| +| spark.sql.optimizer.insertRepartitionBeforeWrite.enabled | true | Add repartition node at the top of query plan. An approach of merging small files. | 1.2.0 | +| spark.sql.optimizer.insertRepartitionNum | none | The partition number if `spark.sql.optimizer.insertRepartitionBeforeWrite.enabled` is enabled. If AQE is disabled, the default value is `spark.sql.shuffle.partitions`. If AQE is enabled, the default value is none that means depend on AQE. This config is used for Spark 3.1 only. | 1.2.0 | +| spark.sql.optimizer.dynamicPartitionInsertionRepartitionNum | 100 | The partition number of each dynamic partition if `spark.sql.optimizer.insertRepartitionBeforeWrite.enabled` is enabled. We will repartition by dynamic partition columns to reduce the small file but that can cause data skew. This config is to extend the partition of dynamic partition column to avoid skew but may generate some small files. | 1.2.0 | +| spark.sql.optimizer.forceShuffleBeforeJoin.enabled | false | Ensure shuffle node exists before shuffled join (shj and smj) to make AQE `OptimizeSkewedJoin` works (complex scenario join, multi table join). | 1.2.0 | +| spark.sql.optimizer.finalStageConfigIsolation.enabled | false | If true, the final stage support use different config with previous stage. The prefix of final stage config key should be `spark.sql.finalStage.`. For example, the raw spark config: `spark.sql.adaptive.advisoryPartitionSizeInBytes`, then the final stage config should be: `spark.sql.finalStage.adaptive.advisoryPartitionSizeInBytes`. | 1.2.0 | +| spark.sql.analyzer.classification.enabled | false | When true, allows Kyuubi engine to judge this SQL's classification and set `spark.sql.analyzer.classification` back into sessionConf. Through this configuration item, Spark can optimizing configuration dynamic. | 1.4.0 | +| spark.sql.optimizer.insertZorderBeforeWriting.enabled | true | When true, we will follow target table properties to insert zorder or not. The key properties are: 1) `kyuubi.zorder.enabled`: if this property is true, we will insert zorder before writing data. 2) `kyuubi.zorder.cols`: string split by comma, we will zorder by these cols. | 1.4.0 | +| spark.sql.optimizer.zorderGlobalSort.enabled | true | When true, we do a global sort using zorder. Note that, it can cause data skew issue if the zorder columns have less cardinality. When false, we only do local sort using zorder. | 1.4.0 | +| spark.sql.watchdog.maxPartitions | none | Set the max partition number when spark scans a data source. Enable maxPartition Strategy by specifying this configuration. Add maxPartitions Strategy to avoid scan excessive partitions on partitioned table, it's optional that works with defined | 1.4.0 | +| spark.sql.watchdog.maxFileSize | none | Set the maximum size in bytes of files when spark scans a data source. Enable maxFileSize Strategy by specifying this configuration. Add maxFileSize Strategy to avoid scan excessive size of files, it's optional that works with defined | 1.8.0 | +| spark.sql.optimizer.dropIgnoreNonExistent | false | When true, do not report an error if DROP DATABASE/TABLE/VIEW/FUNCTION/PARTITION specifies a non-existent database/table/view/function/partition | 1.5.0 | +| spark.sql.optimizer.rebalanceBeforeZorder.enabled | false | When true, we do a rebalance before zorder in case data skew. Note that, if the insertion is dynamic partition we will use the partition columns to rebalance. Note that, this config only affects with Spark 3.3.x. | 1.6.0 | +| spark.sql.optimizer.rebalanceZorderColumns.enabled | false | When true and `spark.sql.optimizer.rebalanceBeforeZorder.enabled` is true, we do rebalance before Z-Order. If it's dynamic partition insert, the rebalance expression will include both partition columns and Z-Order columns. Note that, this config only affects with Spark 3.3.x. | 1.6.0 | +| spark.sql.optimizer.twoPhaseRebalanceBeforeZorder.enabled | false | When true and `spark.sql.optimizer.rebalanceBeforeZorder.enabled` is true, we do two phase rebalance before Z-Order for the dynamic partition write. The first phase rebalance using dynamic partition column; The second phase rebalance using dynamic partition column Z-Order columns. Note that, this config only affects with Spark 3.3.x. | 1.6.0 | +| spark.sql.optimizer.zorderUsingOriginalOrdering.enabled | false | When true and `spark.sql.optimizer.rebalanceBeforeZorder.enabled` is true, we do sort by the original ordering i.e. lexicographical order. Note that, this config only affects with Spark 3.3.x. | 1.6.0 | +| spark.sql.optimizer.inferRebalanceAndSortOrders.enabled | false | When ture, infer columns for rebalance and sort orders from original query, e.g. the join keys from join. It can avoid compression ratio regression. | 1.7.0 | +| spark.sql.optimizer.inferRebalanceAndSortOrdersMaxColumns | 3 | The max columns of inferred columns. | 1.7.0 | +| spark.sql.optimizer.insertRepartitionBeforeWriteIfNoShuffle.enabled | false | When true, add repartition even if the original plan does not have shuffle. | 1.7.0 | +| spark.sql.optimizer.finalStageConfigIsolationWriteOnly.enabled | true | When true, only enable final stage isolation for writing. | 1.7.0 | +| spark.sql.finalWriteStage.eagerlyKillExecutors.enabled | false | When true, eagerly kill redundant executors before running final write stage. | 1.8.0 | +| spark.sql.finalWriteStage.skipKillingExecutorsForTableCache | true | When true, skip killing executors if the plan has table caches. | 1.8.0 | +| spark.sql.finalWriteStage.retainExecutorsFactor | 1.2 | If the target executors * factor < active executors, and target executors * factor > min executors, then inject kill executors or inject custom resource profile. | 1.8.0 | +| spark.sql.finalWriteStage.resourceIsolation.enabled | false | When true, make final write stage resource isolation using custom RDD resource profile. | 1.8.0 | +| spark.sql.finalWriteStageExecutorCores | fallback spark.executor.cores | Specify the executor core request for final write stage. It would be passed to the RDD resource profile. | 1.8.0 | +| spark.sql.finalWriteStageExecutorMemory | fallback spark.executor.memory | Specify the executor on heap memory request for final write stage. It would be passed to the RDD resource profile. | 1.8.0 | +| spark.sql.finalWriteStageExecutorMemoryOverhead | fallback spark.executor.memoryOverhead | Specify the executor memory overhead request for final write stage. It would be passed to the RDD resource profile. | 1.8.0 | +| spark.sql.finalWriteStageExecutorOffHeapMemory | NONE | Specify the executor off heap memory request for final write stage. It would be passed to the RDD resource profile. | 1.8.0 | diff --git a/docs/extensions/server/authentication.rst b/docs/extensions/server/authentication.rst index ab238040cda..7a83b07c285 100644 --- a/docs/extensions/server/authentication.rst +++ b/docs/extensions/server/authentication.rst @@ -49,12 +49,12 @@ To create custom Authenticator class derived from the above interface, we need t - Referencing the library -.. code-block:: xml +.. parsed-literal:: org.apache.kyuubi kyuubi-common_2.12 - 1.5.2-incubating + \ |release|\ provided diff --git a/docs/extensions/server/events.rst b/docs/extensions/server/events.rst index 832c1e5df55..aee7d4899d2 100644 --- a/docs/extensions/server/events.rst +++ b/docs/extensions/server/events.rst @@ -51,12 +51,12 @@ To create custom EventHandlerProvider class derived from the above interface, we - Referencing the library -.. code-block:: xml +.. parsed-literal:: org.apache.kyuubi - kyuubi-event_2.12 - 1.7.0-incubating + kyuubi-events_2.12 + \ |release|\ provided diff --git a/docs/index.rst b/docs/index.rst index fbd299e7b86..e86041ffc0d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -179,6 +179,7 @@ What's Next :glob: quick_start/index + configuration/settings deployment/index Security monitor/index @@ -216,7 +217,13 @@ What's Next :caption: Contributing :maxdepth: 2 - develop_tools/index + contributing/code/index + contributing/doc/index + +.. toctree:: + :caption: Community + :maxdepth: 2 + community/index .. toctree:: diff --git a/docs/overview/kyuubi_vs_hive.md b/docs/overview/kyuubi_vs_hive.md index 43ffac146f6..80038c17864 100644 --- a/docs/overview/kyuubi_vs_hive.md +++ b/docs/overview/kyuubi_vs_hive.md @@ -32,16 +32,14 @@ have multiple reducer stages. ## Differences Between Kyuubi and HiveServer2 -- - -| Kyuubi | HiveServer2 | -|--------------------------------|---------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ** Language ** | Spark SQL | Hive QL | -| ** Optimizer ** | Spark SQL Catalyst | Hive Optimizer | -| ** Engine ** | up to Spark 3.x | MapReduce/[up to Spark 2.3](https://cwiki.apache.org/confluence/display/Hive/Hive+on+Spark%3A+Getting+Started#HiveonSpark:GettingStarted-VersionCompatibility)/Tez | -| ** Performance ** | High | Low | -| ** Compatibility with Spark ** | Good | Bad(need to rebuild on a specific version) | -| ** Data Types ** | [Spark Data Types](https://spark.apache.org/docs/latest/sql-ref-datatypes.html) | [Hive Data Types](https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types) | +| | Kyuubi | HiveServer2 | +|------------------------------|---------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Language** | Spark SQL | Hive QL | +| **Optimizer** | Spark SQL Catalyst | Hive Optimizer | +| **Engine** | up to Spark 3.x | MapReduce/[up to Spark 2.3](https://cwiki.apache.org/confluence/display/Hive/Hive+on+Spark%3A+Getting+Started#HiveonSpark:GettingStarted-VersionCompatibility)/Tez | +| **Performance** | High | Low | +| **Compatibility with Spark** | Good | Bad(need to rebuild on a specific version) | +| **Data Types** | [Spark Data Types](https://spark.apache.org/docs/latest/sql-ref-datatypes.html) | [Hive Data Types](https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types) | ## Performance diff --git a/docs/quick_start/quick_start.rst b/docs/quick_start/quick_start.rst index db564edb92c..2cf5f567fcb 100644 --- a/docs/quick_start/quick_start.rst +++ b/docs/quick_start/quick_start.rst @@ -43,8 +43,8 @@ pre-installed and the `JAVA_HOME` is correctly set to each component. **Kyuubi** Gateway \ |release| \ - Kyuubi Server Engine lib - Kyuubi Engine Beeline - Kyuubi Hive Beeline - **Spark** Engine >=3.0.0 A Spark distribution - **Flink** Engine >=1.14.0 A Flink distribution + **Spark** Engine >=3.1 A Spark distribution + **Flink** Engine 1.16/1.17 A Flink distribution **Trino** Engine >=363 A Trino cluster **Doris** Engine N/A A Doris cluster **Hive** Engine - 3.1.x - A Hive distribution diff --git a/docs/quick_start/quick_start_with_helm.md b/docs/quick_start/quick_start_with_helm.md index a2de5444560..0733a4de72b 100644 --- a/docs/quick_start/quick_start_with_helm.md +++ b/docs/quick_start/quick_start_with_helm.md @@ -15,7 +15,7 @@ - limitations under the License. --> -# Getting Started With Kyuubi on Kubernetes +# Getting Started with Helm ## Running Kyuubi with Helm diff --git a/docs/quick_start/quick_start_with_jdbc.md b/docs/quick_start/quick_start_with_jdbc.md index c22cc1b65c1..e6f4f705296 100644 --- a/docs/quick_start/quick_start_with_jdbc.md +++ b/docs/quick_start/quick_start_with_jdbc.md @@ -15,82 +15,82 @@ - limitations under the License. --> -# Getting Started With Hive JDBC +# Getting Started with Hive JDBC -## How to install JDBC driver +## How to get the Kyuubi JDBC driver -Kyuubi JDBC driver is fully compatible with the 2.3.* version of hive JDBC driver, so we reuse hive JDBC driver to connect to Kyuubi server. +Kyuubi Thrift API is fully compatible with HiveServer2, so technically, it allows to use any Hive JDBC driver to connect +Kyuubi Server. But it's recommended to use [Kyuubi Hive JDBC driver](../client/jdbc/kyuubi_jdbc), which is forked from +Hive 3.1.x JDBC driver, aims to support some missing functionalities of the original Hive JDBC driver. -Add repository to your maven configuration file which may reside in `$MAVEN_HOME/conf/settings.xml`. +The driver is available from Maven Central: ```xml - - - central maven repo - central maven repo https - https://repo.maven.apache.org/maven2 - - -``` - -You can add below dependency to your `pom.xml` file in your application. - -```xml - - - org.apache.hive - hive-jdbc - 2.3.7 - - org.apache.hadoop - hadoop-common - - 2.7.4 + org.apache.kyuubi + kyuubi-hive-jdbc-shaded + 1.7.0 ``` -## Use JDBC driver with kerberos +## Connect to non-kerberized Kyuubi Server The below java code is using a keytab file to login and connect to Kyuubi server by JDBC. ```java package org.apache.kyuubi.examples; -import java.io.IOException; -import java.security.PrivilegedExceptionAction; import java.sql.*; -import org.apache.hadoop.security.UserGroupInformation; - -public class JDBCTest { - - private static String driverName = "org.apache.hive.jdbc.HiveDriver"; - private static String kyuubiJdbcUrl = "jdbc:hive2://localhost:10009/default;"; - - public static void main(String[] args) throws ClassNotFoundException, SQLException { - String principal = args[0]; // kerberos principal - String keytab = args[1]; // keytab file location - Configuration configuration = new Configuration(); - configuration.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - UserGroupInformation.setConfiguration(configuration); - UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab); - - Class.forName(driverName); - Connection conn = ugi.doAs(new PrivilegedExceptionAction(){ - public Connection run() throws SQLException { - return DriverManager.getConnection(kyuubiJdbcUrl); - } - }); - Statement st = conn.createStatement(); - ResultSet res = st.executeQuery("show databases"); - while (res.next()) { - System.out.println(res.getString(1)); +public class KyuubiJDBC { + + private static String driverName = "org.apache.kyuubi.jdbc.KyuubiHiveDriver"; + private static String kyuubiJdbcUrl = "jdbc:kyuubi://localhost:10009/default;"; + + public static void main(String[] args) throws SQLException { + try (Connection conn = DriverManager.getConnection(kyuubiJdbcUrl)) { + try (Statement stmt = conn.createStatement()) { + try (ResultSet rs = st.executeQuery("show databases")) { + while (rs.next()) { + System.out.println(rs.getString(1)); + } + } + } + } + } +} +``` + +## Connect to Kerberized Kyuubi Server + +The following Java code uses a keytab file to login and connect to Kyuubi Server by JDBC. + +```java +package org.apache.kyuubi.examples; + +import java.sql.*; + +public class KyuubiJDBCDemo { + + private static String driverName = "org.apache.kyuubi.jdbc.KyuubiHiveDriver"; + private static String kyuubiJdbcUrlTemplate = "jdbc:kyuubi://localhost:10009/default;" + + "kyuubiClientPrincipal=%s;kyuubiClientKeytab=%s;kyuubiServerPrincipal=%s"; + + public static void main(String[] args) throws SQLException { + String clientPrincipal = args[0]; // Kerberos principal + String clientKeytab = args[1]; // Keytab file location + String serverPrincipal = arg[2]; // Kerberos principal used by Kyuubi Server + String kyuubiJdbcUrl = String.format(kyuubiJdbcUrlTemplate, clientPrincipal, clientKeytab, serverPrincipal); + try (Connection conn = DriverManager.getConnection(kyuubiJdbcUrl)) { + try (Statement stmt = conn.createStatement()) { + try (ResultSet rs = st.executeQuery("show databases")) { + while (rs.next()) { + System.out.println(rs.getString(1)); + } } - res.close(); - st.close(); - conn.close(); + } } + } } ``` diff --git a/docs/quick_start/quick_start_with_jupyter.md b/docs/quick_start/quick_start_with_jupyter.md index 44b3faa5786..608da92846e 100644 --- a/docs/quick_start/quick_start_with_jupyter.md +++ b/docs/quick_start/quick_start_with_jupyter.md @@ -15,5 +15,5 @@ - limitations under the License. --> -# Getting Started With Hive Jupyter Lap +# Getting Started with Jupyter Lap diff --git a/docs/requirements.txt b/docs/requirements.txt index ecc8116e77d..8e1f5c47119 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -24,3 +24,5 @@ sphinx-book-theme==0.3.3 sphinx-markdown-tables==0.0.17 sphinx-notfound-page==0.8.3 sphinx-togglebutton===0.3.2 +sphinxemoji===0.2.0 +sphinx-copybutton===0.5.2 diff --git a/docs/security/authentication.rst b/docs/security/authentication.rst index f16a452c8c2..00bf368ff11 100644 --- a/docs/security/authentication.rst +++ b/docs/security/authentication.rst @@ -43,4 +43,4 @@ The related configurations can be found at `Authentication Configurations`_ jdbc ../extensions/server/authentication -.. _Authentication Configurations: ../deployment/settings.html#authentication +.. _Authentication Configurations: ../configuration/settings.html#authentication diff --git a/docs/security/authorization/spark/build.md b/docs/security/authorization/spark/build.md index 3886f08dfa3..7e38f2eed19 100644 --- a/docs/security/authorization/spark/build.md +++ b/docs/security/authorization/spark/build.md @@ -68,17 +68,18 @@ build/mvn clean package -pl :kyuubi-spark-authz_2.12 -DskipTests -Dranger.versio The available `ranger.version`s are shown in the following table. -| Ranger Version | Supported | Remark | -|:--------------:|:---------:|:------:| -| 2.3.x | √ | - | -| 2.2.x | √ | - | -| 2.1.x | √ | - | -| 2.0.x | √ | - | -| 1.2.x | √ | - | -| 1.1.x | √ | - | -| 1.0.x | √ | - | -| 0.7.x | √ | - | -| 0.6.x | √ | - | +| Ranger Version | Supported | Remark | +|:--------------:|:---------:|:-----------------------------------------------------------------------------------------:| +| 2.4.x | √ | - | +| 2.3.x | √ | - | +| 2.2.x | √ | - | +| 2.1.x | √ | - | +| 2.0.x | √ | - | +| 1.2.x | √ | - | +| 1.1.x | √ | - | +| 1.0.x | √ | - | +| 0.7.x | √ | - | +| 0.6.x | X | [KYUUBI-4672](https://github.com/apache/kyuubi/issues/4672) reported unresolved failures. | Currently, all ranger releases are supported. diff --git a/docs/security/authorization/spark/overview.rst b/docs/security/authorization/spark/overview.rst index fcbaa880b60..364d6485fe7 100644 --- a/docs/security/authorization/spark/overview.rst +++ b/docs/security/authorization/spark/overview.rst @@ -106,4 +106,4 @@ You can specify config `spark.kyuubi.conf.restricted.list` values to disable cha 2. A set statement with key equal to `spark.sql.optimizer.excludedRules` and value containing `org.apache.kyuubi.plugin.spark.authz.ranger.*` also does not allow modification. .. _Apache Ranger: https://ranger.apache.org/ -.. _Spark Configurations: ../../../deployment/settings.html#spark-configurations +.. _Spark Configurations: ../../../configuration/settings.html#spark-configurations diff --git a/docs/security/ldap.md b/docs/security/ldap.md new file mode 100644 index 00000000000..7994afb5142 --- /dev/null +++ b/docs/security/ldap.md @@ -0,0 +1,60 @@ + + +# Configure Kyuubi to use LDAP Authentication + +Kyuubi can be configured to enable frontend LDAP authentication for clients, such as the BeeLine, or the JDBC and ODBC drivers. +At present, only simple LDAP authentication mechanism involving username and password is supported. The client sends +a username and password to the Kyuubi server, and the Kyuubi server validates these credentials using an external LDAP service. + +## Enable LDAP Authentication + +To enable LDAP authentication for Kyuubi, LDAP-related configurations is required to be configured in +`$KYUUBI_HOME/conf/kyuubi-defaults.conf` on each node where Kyuubi server is installed. + +For example, + +```properties example +kyuubi.authentication=LDAP +kyuubi.authentication.ldap.baseDN=dc=org +kyuubi.authentication.ldap.domain=apache.org +kyuubi.authentication.ldap.binddn=uid=kyuubi,OU=Users,DC=apache,DC=org +kyuubi.authentication.ldap.bindpw=kyuubi123123 +kyuubi.authentication.ldap.url=ldap://hostname.com:389/ +``` + +## User and Group Filter in LDAP + +Kyuubi also supports complex LDAP cases as [Apache Hive](https://cwiki.apache.org/confluence/display/Hive/User+and+Group+Filter+Support+with+LDAP+Atn+Provider+in+HiveServer2#UserandGroupFilterSupportwithLDAPAtnProviderinHiveServer2-UserandGroupFilterSupportwithLDAP) does. + +For example, + +```properties example +# Group Membership +kyuubi.authentication.ldap.groupClassKey=groupOfNames +kyuubi.authentication.ldap.groupDNPattern=CN=%s,OU=Groups,DC=apache,DC=org +kyuubi.authentication.ldap.groupFilter=group1,group2 +kyuubi.authentication.ldap.groupMembershipKey=memberUid +# User Search List +kyuubi.authentication.ldap.userDNPattern=CN=%s,CN=Users,DC=apache,DC=org +kyuubi.authentication.ldap.userFilter=hive-admin,hive,hive-test,hive-user +# Custom Query +kyuubi.authentication.ldap.customLDAPQuery=(&(objectClass=group)(objectClass=top)(instanceType=4)(cn=Domain*)), (&(objectClass=person)(|(sAMAccountName=admin)(|(memberOf=CN=Domain Admins,CN=Users,DC=domain,DC=com)(memberOf=CN=Administrators,CN=Builtin,DC=domain,DC=com)))) +``` + +Please refer to [Settings for LDAP authentication in Kyuubi](../configuration/settings.html?highlight=LDAP#authentication) +for all configurations. diff --git a/docs/security/ldap.rst b/docs/security/ldap.rst deleted file mode 100644 index 35cfcd6decf..00000000000 --- a/docs/security/ldap.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one or more - contributor license agreements. See the NOTICE file distributed with - this work for additional information regarding copyright ownership. - The ASF licenses this file to You under the Apache License, Version 2.0 - (the "License"); you may not use this file except in compliance with - the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -Configure Kyuubi to use LDAP Authentication -=============================================== - -.. warning:: - the page is still in-progress. diff --git a/docs/tools/kyuubi-admin.rst b/docs/tools/kyuubi-admin.rst index 6063965938c..bd37f7e684f 100644 --- a/docs/tools/kyuubi-admin.rst +++ b/docs/tools/kyuubi-admin.rst @@ -73,6 +73,8 @@ Usage: ``bin/kyuubi-admin refresh config [options] []`` - The user defaults configs with key in format in the form of `___{username}___.{config key}` from default property file. * - unlimitedUsers - The users without maximum connections limitation. + * - denyUsers + - The user in the deny list will be denied to connect to kyuubi server. .. _list_engine: @@ -97,6 +99,17 @@ Usage: ``bin/kyuubi-admin list engine [options]`` - The subdomain for the share level of an engine. If not specified, it will read the configuration item kyuubi.engine.share.level.subdomain from kyuubi-defaults.conf. * - --hs2ProxyUser - The proxy user to impersonate. When specified, it will list engines for the hs2ProxyUser. + * - -a --all + - All the engine. + +.. _list_server: + +List Servers +------------------------------------- + +Prints a table of the key information about the servers. + +Usage: ``bin/kyuubi-admin list server`` .. _delete_engine: diff --git a/docs/tools/kyuubi-ctl.md b/docs/tools/kyuubi-ctl.md deleted file mode 100644 index aae67584e8e..00000000000 --- a/docs/tools/kyuubi-ctl.md +++ /dev/null @@ -1,183 +0,0 @@ - - -# Managing kyuubi servers and engines Tool - -## Usage - -```shell -bin/kyuubi-ctl --help -``` - -Output - -```shell -kyuubi 1.6.0-SNAPSHOT -Usage: kyuubi-ctl [create|get|delete|list] [options] - - -zk, --zk-quorum - The connection string for the zookeeper ensemble, using zk quorum manually. - -n, --namespace The namespace, using kyuubi-defaults/conf if absent. - -s, --host Hostname or IP address of a service. - -p, --port Listening port of a service. - -v, --version Using the compiled KYUUBI_VERSION default, change it if the active service is running in another. - -b, --verbose Print additional debug output. - -Command: create [server] - -Command: create server - Expose Kyuubi server instance to another domain. - -Command: get [server|engine] [options] - Get the service/engine node info, host and port needed. -Command: get server - Get Kyuubi server info of domain -Command: get engine - Get Kyuubi engine info belong to a user. - -u, --user The user name this engine belong to. - -et, --engine-type - The engine type this engine belong to. - -es, --engine-subdomain - The engine subdomain this engine belong to. - -esl, --engine-share-level - The engine share level this engine belong to. - -Command: delete [server|engine] [options] - Delete the specified service/engine node, host and port needed. -Command: delete server - Delete the specified service node for a domain -Command: delete engine - Delete the specified engine node for user. - -u, --user The user name this engine belong to. - -et, --engine-type - The engine type this engine belong to. - -es, --engine-subdomain - The engine subdomain this engine belong to. - -esl, --engine-share-level - The engine share level this engine belong to. - -Command: list [server|engine] [options] - List all the service/engine nodes for a particular domain. -Command: list server - List all the service nodes for a particular domain -Command: list engine - List all the engine nodes for a user - -u, --user The user name this engine belong to. - -et, --engine-type - The engine type this engine belong to. - -es, --engine-subdomain - The engine subdomain this engine belong to. - -esl, --engine-share-level - The engine share level this engine belong to. - - -h, --help Show help message and exit. -``` - -## Manage kyuubi servers - -You can specify the zookeeper address(`--zk-quorum`) and namespace(`--namespace`), version(`--version`) parameters to query a specific kyuubi server cluster. - -### List server - -List all the service nodes for a particular domain. - -```shell -bin/kyuubi-ctl list server -``` - -### Create server - -Expose Kyuubi server instance to another domain. - -First read `kyuubi.ha.zookeeper.namespace` in `conf/kyuubi-defaults.conf`, if there are server instances under this namespace, register them in the new namespace specified by the `--namespace` parameter. - -```shell -bin/kyuubi-ctl create server --namespace XXX -``` - -### Get server - -Get Kyuubi server info of domain. - -```shell -bin/kyuubi-ctl get server --host XXX --port YYY -``` - -### Delete server - -Delete the specified service node for a domain. - -After the server node is deleted, the kyuubi server stops opening new sessions and waits for all currently open sessions to be closed before the process exits. - -```shell -bin/kyuubi-ctl delete server --host XXX --port YYY -``` - -## Manage kyuubi engines - -You can also specify the engine type(`--engine-type`), engine share level subdomain(`--engine-subdomain`) and engine share level(`--engine-share-level`). - -If not specified, the configuration item `kyuubi.engine.type` of `kyuubi-defaults.conf` read, the default value is `SPARK_SQL`, `kyuubi.engine.share.level.subdomain`, the default value is `default`, `kyuubi.engine.share.level`, the default value is `USER`. - -If the engine pool mode is enabled through `kyuubi.engine.pool.size`, the subdomain consists of `kyuubi.engine.pool.name` and a number below size, e.g. `engine-pool-0` . - -`--engine-share-level` supports the following enum values. -* CONNECTION - -The engine Ref Id (UUID) must be specified via `--engine-subdomain`. -* USER: - -Default Value. -* GROUP: - -The `--user` parameter is the group name corresponding to the user. -* SERVER: - -The `--user` parameter is the user who started the kyuubi server. - -### List engine - -List all the engine nodes for a user. - -```shell -bin/kyuubi-ctl list engine --user AAA -``` - -The management share level is SERVER, the user who starts the kyuubi server is A, the engine is TRINO, and the subdomain is adhoc. - -```shell -bin/kyuubi-ctl list engine --user A --engine-type TRINO --engine-subdomain adhoc --engine-share-level SERVER -``` - -### Get engine - -Get Kyuubi engine info belong to a user. - -```shell -bin/kyuubi-ctl get engine --user AAA --host XXX --port YYY -``` - -### Delete engine - -Delete the specified engine node for user. - -After the engine node is deleted, the kyuubi engine stops opening new sessions and waits for all currently open sessions to be closed before the process exits. - -```shell -bin/kyuubi-ctl delete engine --user AAA --host XXX --port YYY -``` - diff --git a/docs/tools/kyuubi-ctl.rst b/docs/tools/kyuubi-ctl.rst new file mode 100644 index 00000000000..4a9308fed0e --- /dev/null +++ b/docs/tools/kyuubi-ctl.rst @@ -0,0 +1,213 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Administrator CLI +================= + +.. _usage: + +Usage +----- +.. code-block:: bash + + bin/kyuubi-ctl --help + +Output + +.. parsed-literal:: + + kyuubi |release| + Usage: kyuubi-ctl [create|get|delete|list] [options] + + -zk, --zk-quorum + The connection string for the zookeeper ensemble, using zk quorum manually. + -n, --namespace The namespace, using kyuubi-defaults/conf if absent. + -s, --host Hostname or IP address of a service. + -p, --port Listening port of a service. + -v, --version Using the compiled KYUUBI_VERSION default, change it if the active service is running in another. + -b, --verbose Print additional debug output. + + Command: create [server] + + Command: create server + Expose Kyuubi server instance to another domain. + + Command: get [server|engine] [options] + Get the service/engine node info, host and port needed. + Command: get server + Get Kyuubi server info of domain + Command: get engine + Get Kyuubi engine info belong to a user. + -u, --user The user name this engine belong to. + -et, --engine-type + The engine type this engine belong to. + -es, --engine-subdomain + The engine subdomain this engine belong to. + -esl, --engine-share-level + The engine share level this engine belong to. + + Command: delete [server|engine] [options] + Delete the specified service/engine node, host and port needed. + Command: delete server + Delete the specified service node for a domain + Command: delete engine + Delete the specified engine node for user. + -u, --user The user name this engine belong to. + -et, --engine-type + The engine type this engine belong to. + -es, --engine-subdomain + The engine subdomain this engine belong to. + -esl, --engine-share-level + The engine share level this engine belong to. + + Command: list [server|engine] [options] + List all the service/engine nodes for a particular domain. + Command: list server + List all the service nodes for a particular domain + Command: list engine + List all the engine nodes for a user + -u, --user The user name this engine belong to. + -et, --engine-type + The engine type this engine belong to. + -es, --engine-subdomain + The engine subdomain this engine belong to. + -esl, --engine-share-level + The engine share level this engine belong to. + + -h, --help Show help message and exit. + +.. _manage_kyuubi_servers: + +Manage kyuubi servers +--------------------- + +You can specify the zookeeper address(``--zk-quorum``) and namespace(``--namespace``), version(``--version``) parameters to query a specific kyuubi server cluster. + +.. _list_servers: + +List server +*********** + +List all the service nodes for a particular domain. + +.. code-block:: bash + + bin/kyuubi-ctl list server + +.. _create_servers: + +Create server +*********** +Expose Kyuubi server instance to another domain. + +First read ``kyuubi.ha.zookeeper.namespace`` in ``conf/kyuubi-defaults.conf``, if there are server instances under this namespace, register them in the new namespace specified by the ``--namespace`` parameter. + +.. code-block:: bash + + bin/kyuubi-ctl create server --namespace XXX + +.. _get_servers: + +Get server +*********** + +Get Kyuubi server info of domain. + +.. code-block:: bash + + bin/kyuubi-ctl get server --host XXX --port YYY + +.. _delete_servers: + +Delete server +*********** + +Delete the specified service node for a domain. + +After the server node is deleted, the kyuubi server stops opening new sessions and waits for all currently open sessions to be closed before the process exits. + +.. code-block:: bash + + bin/kyuubi-ctl delete server --host XXX --port YYY + +.. _manage_kyuubi_engines: + +Manage kyuubi engines +--------------------- + +You can also specify the engine type(``--engine-type``), engine share level subdomain(``--engine-subdomain``) and engine share level(``--engine-share-level``). + +If not specified, the configuration item ``kyuubi.engine.type`` of ``kyuubi-defaults.conf`` read, the default value is ``SPARK_SQL``, ``kyuubi.engine.share.level.subdomain``, the default value is ``default``, ``kyuubi.engine.share.level``, the default value is ``USER``. + +If the engine pool mode is enabled through ``kyuubi.engine.pool.size``, the subdomain consists of ``kyuubi.engine.pool.name`` and a number below size, e.g. ``engine-pool-0`` . + +``--engine-share-level`` supports the following enum values. + +- CONNECTION + +The engine Ref Id (UUID) must be specified via ``--engine-subdomain``. + +- USER: + +Default Value. + +- GROUP: + +The ``--user`` parameter is the group name corresponding to the user. + +- SERVER: + +The ``--user`` parameter is the user who started the kyuubi server. + +.. _list_engines: + +List engine +*********** + +List all the engine nodes for a user. + +.. code-block:: bash + + bin/kyuubi-ctl list engine --user AAA + +The management share level is SERVER, the user who starts the kyuubi server is A, the engine is TRINO, and the subdomain is adhoc. + +.. code-block:: bash + + bin/kyuubi-ctl list engine --user A --engine-type TRINO --engine-subdomain adhoc --engine-share-level SERVER + +.. _get_engines: + +Get engine +*********** + +Get Kyuubi engine info belong to a user. + +.. code-block:: bash + + bin/kyuubi-ctl get engine --user AAA --host XXX --port YYY + +.. _delete_engines: + +Delete engine +************* + +Delete the specified engine node for user. + +After the engine node is deleted, the kyuubi engine stops opening new sessions and waits for all currently open sessions to be closed before the process exits. + +.. code-block:: bash + + bin/kyuubi-ctl delete engine --user AAA --host XXX --port YYY \ No newline at end of file diff --git a/extensions/server/kyuubi-server-plugin/pom.xml b/extensions/server/kyuubi-server-plugin/pom.xml index 799f27c4632..12c1699fc02 100644 --- a/extensions/server/kyuubi-server-plugin/pom.xml +++ b/extensions/server/kyuubi-server-plugin/pom.xml @@ -21,7 +21,7 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../../pom.xml diff --git a/extensions/spark/kyuubi-extension-spark-3-1/pom.xml b/extensions/spark/kyuubi-extension-spark-3-1/pom.xml index 9f218f9d0fe..a7fcbabe5b4 100644 --- a/extensions/spark/kyuubi-extension-spark-3-1/pom.xml +++ b/extensions/spark/kyuubi-extension-spark-3-1/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../../pom.xml - kyuubi-extension-spark-3-1_2.12 + kyuubi-extension-spark-3-1_${scala.binary.version} jar Kyuubi Dev Spark Extensions (for Spark 3.1) https://kyuubi.apache.org/ @@ -125,10 +125,21 @@ jakarta.xml.bind-api test + + + org.apache.logging.log4j + log4j-1.2-api + test + + + + org.apache.logging.log4j + log4j-slf4j-impl + test + - org.apache.maven.plugins @@ -137,7 +148,7 @@ false - org.apache.kyuubi:kyuubi-extension-spark-common_${scala.binary.version} + org.apache.kyuubi:* diff --git a/extensions/spark/kyuubi-extension-spark-3-1/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala b/extensions/spark/kyuubi-extension-spark-3-1/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala index cd312de953b..f952b56f387 100644 --- a/extensions/spark/kyuubi-extension-spark-3-1/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala +++ b/extensions/spark/kyuubi-extension-spark-3-1/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala @@ -20,7 +20,7 @@ package org.apache.kyuubi.sql import org.apache.spark.sql.SparkSessionExtensions import org.apache.kyuubi.sql.sqlclassification.KyuubiSqlClassification -import org.apache.kyuubi.sql.watchdog.{ForcedMaxOutputRowsRule, MaxPartitionStrategy} +import org.apache.kyuubi.sql.watchdog.{ForcedMaxOutputRowsRule, MaxScanStrategy} // scalastyle:off line.size.limit /** @@ -40,6 +40,6 @@ class KyuubiSparkSQLExtension extends (SparkSessionExtensions => Unit) { // watchdog extension extensions.injectOptimizerRule(ForcedMaxOutputRowsRule) - extensions.injectPlannerStrategy(MaxPartitionStrategy) + extensions.injectPlannerStrategy(MaxScanStrategy) } } diff --git a/extensions/spark/kyuubi-extension-spark-3-1/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala b/extensions/spark/kyuubi-extension-spark-3-1/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala index 2f12a82e23e..87c10bc3467 100644 --- a/extensions/spark/kyuubi-extension-spark-3-1/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala +++ b/extensions/spark/kyuubi-extension-spark-3-1/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala @@ -21,19 +21,21 @@ import org.antlr.v4.runtime._ import org.antlr.v4.runtime.atn.PredictionMode import org.antlr.v4.runtime.misc.{Interval, ParseCancellationException} import org.apache.spark.sql.AnalysisException -import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier} +import org.apache.spark.sql.catalyst.{FunctionIdentifier, SQLConfHelper, TableIdentifier} import org.apache.spark.sql.catalyst.expressions.Expression import org.apache.spark.sql.catalyst.parser.{ParseErrorListener, ParseException, ParserInterface, PostProcessor} import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.trees.Origin import org.apache.spark.sql.types.{DataType, StructType} -abstract class KyuubiSparkSQLParserBase extends ParserInterface { +abstract class KyuubiSparkSQLParserBase extends ParserInterface with SQLConfHelper { def delegate: ParserInterface - def astBuilder: KyuubiSparkSQLAstBuilderBase + def astBuilder: KyuubiSparkSQLAstBuilder override def parsePlan(sqlText: String): LogicalPlan = parse(sqlText) { parser => astBuilder.visit(parser.singleStatement()) match { + case optimize: UnparsedPredicateOptimize => + astBuilder.buildOptimizeStatement(optimize, delegate.parseExpression) case plan: LogicalPlan => plan case _ => delegate.parsePlan(sqlText) } @@ -105,7 +107,7 @@ abstract class KyuubiSparkSQLParserBase extends ParserInterface { class SparkKyuubiSparkSQLParser( override val delegate: ParserInterface) extends KyuubiSparkSQLParserBase { - def astBuilder: KyuubiSparkSQLAstBuilderBase = new KyuubiSparkSQLAstBuilder + def astBuilder: KyuubiSparkSQLAstBuilder = new KyuubiSparkSQLAstBuilder } /* Copied from Apache Spark's to avoid dependency on Spark Internals */ diff --git a/extensions/spark/kyuubi-extension-spark-3-1/src/main/scala/org/apache/kyuubi/sql/sqlclassification/KyuubiGetSqlClassification.scala b/extensions/spark/kyuubi-extension-spark-3-1/src/main/scala/org/apache/kyuubi/sql/sqlclassification/KyuubiGetSqlClassification.scala index e8aadc85029..b94cdf34674 100644 --- a/extensions/spark/kyuubi-extension-spark-3-1/src/main/scala/org/apache/kyuubi/sql/sqlclassification/KyuubiGetSqlClassification.scala +++ b/extensions/spark/kyuubi-extension-spark-3-1/src/main/scala/org/apache/kyuubi/sql/sqlclassification/KyuubiGetSqlClassification.scala @@ -55,7 +55,7 @@ object KyuubiGetSqlClassification extends Logging { * You need to make sure that the configuration item: SQL_CLASSIFICATION_ENABLED * is true * @param simpleName: the analyzied_logical_plan's getSimpleName - * @return: This sql's classification + * @return This sql's classification */ def getSqlClassification(simpleName: String): String = { jsonNode.map { json => diff --git a/extensions/spark/kyuubi-extension-spark-3-1/src/test/scala/org/apache/spark/sql/ZorderSuite.scala b/extensions/spark/kyuubi-extension-spark-3-1/src/test/scala/org/apache/spark/sql/ZorderSuite.scala index fd04e27dbb5..29a166abf3f 100644 --- a/extensions/spark/kyuubi-extension-spark-3-1/src/test/scala/org/apache/spark/sql/ZorderSuite.scala +++ b/extensions/spark/kyuubi-extension-spark-3-1/src/test/scala/org/apache/spark/sql/ZorderSuite.scala @@ -17,6 +17,20 @@ package org.apache.spark.sql -class ZorderWithCodegenEnabledSuite extends ZorderWithCodegenEnabledSuiteBase {} +import org.apache.spark.sql.catalyst.parser.ParserInterface -class ZorderWithCodegenDisabledSuite extends ZorderWithCodegenDisabledSuiteBase {} +import org.apache.kyuubi.sql.SparkKyuubiSparkSQLParser + +trait ParserSuite { self: ZorderSuiteBase => + override def createParser: ParserInterface = { + new SparkKyuubiSparkSQLParser(spark.sessionState.sqlParser) + } +} + +class ZorderWithCodegenEnabledSuite + extends ZorderWithCodegenEnabledSuiteBase + with ParserSuite {} + +class ZorderWithCodegenDisabledSuite + extends ZorderWithCodegenDisabledSuiteBase + with ParserSuite {} diff --git a/extensions/spark/kyuubi-extension-spark-3-2/pom.xml b/extensions/spark/kyuubi-extension-spark-3-2/pom.xml index a80040aca65..b1ddcecf84e 100644 --- a/extensions/spark/kyuubi-extension-spark-3-2/pom.xml +++ b/extensions/spark/kyuubi-extension-spark-3-2/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../../pom.xml - kyuubi-extension-spark-3-2_2.12 + kyuubi-extension-spark-3-2_${scala.binary.version} jar Kyuubi Dev Spark Extensions (for Spark 3.2) https://kyuubi.apache.org/ @@ -125,10 +125,21 @@ jakarta.xml.bind-api test + + + org.apache.logging.log4j + log4j-1.2-api + test + + + + org.apache.logging.log4j + log4j-slf4j-impl + test + - org.apache.maven.plugins @@ -137,7 +148,7 @@ false - org.apache.kyuubi:kyuubi-extension-spark-common_${scala.binary.version} + org.apache.kyuubi:* diff --git a/extensions/spark/kyuubi-extension-spark-3-2/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala b/extensions/spark/kyuubi-extension-spark-3-2/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala index ef9da41be13..97e77704293 100644 --- a/extensions/spark/kyuubi-extension-spark-3-2/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala +++ b/extensions/spark/kyuubi-extension-spark-3-2/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala @@ -19,7 +19,7 @@ package org.apache.kyuubi.sql import org.apache.spark.sql.SparkSessionExtensions -import org.apache.kyuubi.sql.watchdog.{ForcedMaxOutputRowsRule, MaxPartitionStrategy} +import org.apache.kyuubi.sql.watchdog.{ForcedMaxOutputRowsRule, MaxScanStrategy} // scalastyle:off line.size.limit /** @@ -38,6 +38,6 @@ class KyuubiSparkSQLExtension extends (SparkSessionExtensions => Unit) { // watchdog extension extensions.injectOptimizerRule(ForcedMaxOutputRowsRule) - extensions.injectPlannerStrategy(MaxPartitionStrategy) + extensions.injectPlannerStrategy(MaxScanStrategy) } } diff --git a/extensions/spark/kyuubi-extension-spark-3-2/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala b/extensions/spark/kyuubi-extension-spark-3-2/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala index 2f12a82e23e..87c10bc3467 100644 --- a/extensions/spark/kyuubi-extension-spark-3-2/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala +++ b/extensions/spark/kyuubi-extension-spark-3-2/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala @@ -21,19 +21,21 @@ import org.antlr.v4.runtime._ import org.antlr.v4.runtime.atn.PredictionMode import org.antlr.v4.runtime.misc.{Interval, ParseCancellationException} import org.apache.spark.sql.AnalysisException -import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier} +import org.apache.spark.sql.catalyst.{FunctionIdentifier, SQLConfHelper, TableIdentifier} import org.apache.spark.sql.catalyst.expressions.Expression import org.apache.spark.sql.catalyst.parser.{ParseErrorListener, ParseException, ParserInterface, PostProcessor} import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.trees.Origin import org.apache.spark.sql.types.{DataType, StructType} -abstract class KyuubiSparkSQLParserBase extends ParserInterface { +abstract class KyuubiSparkSQLParserBase extends ParserInterface with SQLConfHelper { def delegate: ParserInterface - def astBuilder: KyuubiSparkSQLAstBuilderBase + def astBuilder: KyuubiSparkSQLAstBuilder override def parsePlan(sqlText: String): LogicalPlan = parse(sqlText) { parser => astBuilder.visit(parser.singleStatement()) match { + case optimize: UnparsedPredicateOptimize => + astBuilder.buildOptimizeStatement(optimize, delegate.parseExpression) case plan: LogicalPlan => plan case _ => delegate.parsePlan(sqlText) } @@ -105,7 +107,7 @@ abstract class KyuubiSparkSQLParserBase extends ParserInterface { class SparkKyuubiSparkSQLParser( override val delegate: ParserInterface) extends KyuubiSparkSQLParserBase { - def astBuilder: KyuubiSparkSQLAstBuilderBase = new KyuubiSparkSQLAstBuilder + def astBuilder: KyuubiSparkSQLAstBuilder = new KyuubiSparkSQLAstBuilder } /* Copied from Apache Spark's to avoid dependency on Spark Internals */ diff --git a/extensions/spark/kyuubi-extension-spark-3-2/src/test/scala/org/apache/spark/sql/ZorderSuite.scala b/extensions/spark/kyuubi-extension-spark-3-2/src/test/scala/org/apache/spark/sql/ZorderSuite.scala index fd04e27dbb5..29a166abf3f 100644 --- a/extensions/spark/kyuubi-extension-spark-3-2/src/test/scala/org/apache/spark/sql/ZorderSuite.scala +++ b/extensions/spark/kyuubi-extension-spark-3-2/src/test/scala/org/apache/spark/sql/ZorderSuite.scala @@ -17,6 +17,20 @@ package org.apache.spark.sql -class ZorderWithCodegenEnabledSuite extends ZorderWithCodegenEnabledSuiteBase {} +import org.apache.spark.sql.catalyst.parser.ParserInterface -class ZorderWithCodegenDisabledSuite extends ZorderWithCodegenDisabledSuiteBase {} +import org.apache.kyuubi.sql.SparkKyuubiSparkSQLParser + +trait ParserSuite { self: ZorderSuiteBase => + override def createParser: ParserInterface = { + new SparkKyuubiSparkSQLParser(spark.sessionState.sqlParser) + } +} + +class ZorderWithCodegenEnabledSuite + extends ZorderWithCodegenEnabledSuiteBase + with ParserSuite {} + +class ZorderWithCodegenDisabledSuite + extends ZorderWithCodegenDisabledSuiteBase + with ParserSuite {} diff --git a/extensions/spark/kyuubi-extension-spark-3-3/pom.xml b/extensions/spark/kyuubi-extension-spark-3-3/pom.xml index ca729a7819b..9b1a30af060 100644 --- a/extensions/spark/kyuubi-extension-spark-3-3/pom.xml +++ b/extensions/spark/kyuubi-extension-spark-3-3/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../../pom.xml - kyuubi-extension-spark-3-3_2.12 + kyuubi-extension-spark-3-3_${scala.binary.version} jar Kyuubi Dev Spark Extensions (for Spark 3.3) https://kyuubi.apache.org/ @@ -37,6 +37,14 @@ ${project.version} + + org.apache.kyuubi + kyuubi-download + ${project.version} + pom + test + + org.apache.kyuubi kyuubi-extension-spark-common_${scala.binary.version} @@ -45,6 +53,14 @@ test + + org.apache.kyuubi + kyuubi-util-scala_${scala.binary.version} + ${project.version} + test-jar + test + + org.scala-lang scala-library @@ -130,6 +146,38 @@ + + org.codehaus.mojo + build-helper-maven-plugin + + + regex-property + + regex-property + + + spark.home + ${project.basedir}/../../../externals/kyuubi-download/target/${spark.archive.name} + (.+)\.tgz + $1 + + + + + + org.scalatest + scalatest-maven-plugin + + + + ${spark.home} + ${scala.binary.version} + + + org.apache.maven.plugins maven-shade-plugin @@ -137,7 +185,7 @@ false - org.apache.kyuubi:kyuubi-extension-spark-common_${scala.binary.version} + org.apache.kyuubi:* diff --git a/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala index ef9da41be13..792315d897a 100644 --- a/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala +++ b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala @@ -17,9 +17,9 @@ package org.apache.kyuubi.sql -import org.apache.spark.sql.SparkSessionExtensions +import org.apache.spark.sql.{FinalStageResourceManager, InjectCustomResourceProfile, SparkSessionExtensions} -import org.apache.kyuubi.sql.watchdog.{ForcedMaxOutputRowsRule, MaxPartitionStrategy} +import org.apache.kyuubi.sql.watchdog.{ForcedMaxOutputRowsRule, MaxScanStrategy} // scalastyle:off line.size.limit /** @@ -38,6 +38,9 @@ class KyuubiSparkSQLExtension extends (SparkSessionExtensions => Unit) { // watchdog extension extensions.injectOptimizerRule(ForcedMaxOutputRowsRule) - extensions.injectPlannerStrategy(MaxPartitionStrategy) + extensions.injectPlannerStrategy(MaxScanStrategy) + + extensions.injectQueryStagePrepRule(FinalStageResourceManager(_)) + extensions.injectQueryStagePrepRule(InjectCustomResourceProfile) } } diff --git a/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala index af1711ebbe7..c4418c33c44 100644 --- a/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala +++ b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala @@ -21,19 +21,21 @@ import org.antlr.v4.runtime._ import org.antlr.v4.runtime.atn.PredictionMode import org.antlr.v4.runtime.misc.{Interval, ParseCancellationException} import org.apache.spark.sql.AnalysisException -import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier} +import org.apache.spark.sql.catalyst.{FunctionIdentifier, SQLConfHelper, TableIdentifier} import org.apache.spark.sql.catalyst.expressions.Expression import org.apache.spark.sql.catalyst.parser.{ParseErrorListener, ParseException, ParserInterface, PostProcessor} import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.trees.Origin import org.apache.spark.sql.types.{DataType, StructType} -abstract class KyuubiSparkSQLParserBase extends ParserInterface { +abstract class KyuubiSparkSQLParserBase extends ParserInterface with SQLConfHelper { def delegate: ParserInterface - def astBuilder: KyuubiSparkSQLAstBuilderBase + def astBuilder: KyuubiSparkSQLAstBuilder override def parsePlan(sqlText: String): LogicalPlan = parse(sqlText) { parser => astBuilder.visit(parser.singleStatement()) match { + case optimize: UnparsedPredicateOptimize => + astBuilder.buildOptimizeStatement(optimize, delegate.parseExpression) case plan: LogicalPlan => plan case _ => delegate.parsePlan(sqlText) } @@ -113,7 +115,7 @@ abstract class KyuubiSparkSQLParserBase extends ParserInterface { class SparkKyuubiSparkSQLParser( override val delegate: ParserInterface) extends KyuubiSparkSQLParserBase { - def astBuilder: KyuubiSparkSQLAstBuilderBase = new KyuubiSparkSQLAstBuilder + def astBuilder: KyuubiSparkSQLAstBuilder = new KyuubiSparkSQLAstBuilder } /* Copied from Apache Spark's to avoid dependency on Spark Internals */ diff --git a/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala new file mode 100644 index 00000000000..32fb9f5ce84 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala @@ -0,0 +1,283 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import scala.annotation.tailrec +import scala.collection.mutable +import scala.collection.mutable.ArrayBuffer + +import org.apache.spark.{ExecutorAllocationClient, MapOutputTrackerMaster, SparkContext, SparkEnv} +import org.apache.spark.internal.Logging +import org.apache.spark.resource.ResourceProfile +import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.execution.{FilterExec, ProjectExec, SortExec, SparkPlan} +import org.apache.spark.sql.execution.adaptive._ +import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec +import org.apache.spark.sql.execution.exchange.{ENSURE_REQUIREMENTS, ShuffleExchangeExec} + +import org.apache.kyuubi.sql.{KyuubiSQLConf, MarkNumOutputColumnsRule} + +/** + * This rule assumes the final write stage has less cores requirement than previous, otherwise + * this rule would take no effect. + * + * It provide a feature: + * 1. Kill redundant executors before running final write stage + */ +case class FinalStageResourceManager(session: SparkSession) + extends Rule[SparkPlan] with FinalRebalanceStageHelper { + override def apply(plan: SparkPlan): SparkPlan = { + if (!conf.getConf(KyuubiSQLConf.FINAL_WRITE_STAGE_EAGERLY_KILL_EXECUTORS_ENABLED)) { + return plan + } + + if (!MarkNumOutputColumnsRule.isWrite(session, plan)) { + return plan + } + + val sc = session.sparkContext + val dra = sc.getConf.getBoolean("spark.dynamicAllocation.enabled", false) + val coresPerExecutor = sc.getConf.getInt("spark.executor.cores", 1) + val minExecutors = sc.getConf.getInt("spark.dynamicAllocation.minExecutors", 0) + val maxExecutors = sc.getConf.getInt("spark.dynamicAllocation.maxExecutors", Int.MaxValue) + val factor = conf.getConf(KyuubiSQLConf.FINAL_WRITE_STAGE_PARTITION_FACTOR) + val hasImprovementRoom = maxExecutors - 1 > minExecutors * factor + // Fast fail if: + // 1. DRA off + // 2. only work with yarn and k8s + // 3. maxExecutors is not bigger than minExecutors * factor + if (!dra || !sc.schedulerBackend.isInstanceOf[CoarseGrainedSchedulerBackend] || + !hasImprovementRoom) { + return plan + } + + val stageOpt = findFinalRebalanceStage(plan) + if (stageOpt.isEmpty) { + return plan + } + + // It's not safe to kill executors if this plan contains table cache. + // If the executor loses then the rdd would re-compute those partition. + if (hasTableCache(plan) && + conf.getConf(KyuubiSQLConf.FINAL_WRITE_STAGE_SKIP_KILLING_EXECUTORS_FOR_TABLE_CACHE)) { + return plan + } + + // TODO: move this to query stage optimizer when updating Spark to 3.5.x + // Since we are in `prepareQueryStage`, the AQE shuffle read has not been applied. + // So we need to apply it by self. + val shuffleRead = queryStageOptimizerRules.foldLeft(stageOpt.get.asInstanceOf[SparkPlan]) { + case (latest, rule) => rule.apply(latest) + } + val (targetCores, stage) = shuffleRead match { + case AQEShuffleReadExec(stage: ShuffleQueryStageExec, partitionSpecs) => + (partitionSpecs.length, stage) + case stage: ShuffleQueryStageExec => + // we can still kill executors if no AQE shuffle read, e.g., `.repartition(2)` + (stage.shuffle.numPartitions, stage) + case _ => + // it should never happen in current Spark, but to be safe do nothing if happens + logWarning("BUG, Please report to Apache Kyuubi community") + return plan + } + // The condition whether inject custom resource profile: + // - target executors < active executors + // - active executors - target executors > min executors + val numActiveExecutors = sc.getExecutorIds().length + val targetExecutors = (math.ceil(targetCores.toFloat / coresPerExecutor) * factor).toInt + .max(1) + val hasBenefits = targetExecutors < numActiveExecutors && + (numActiveExecutors - targetExecutors) > minExecutors + logInfo(s"The snapshot of current executors view, " + + s"active executors: $numActiveExecutors, min executor: $minExecutors, " + + s"target executors: $targetExecutors, has benefits: $hasBenefits") + if (hasBenefits) { + val shuffleId = stage.plan.asInstanceOf[ShuffleExchangeExec].shuffleDependency.shuffleId + val numReduce = stage.plan.asInstanceOf[ShuffleExchangeExec].numPartitions + // Now, there is only a final rebalance stage waiting to execute and all tasks of previous + // stage are finished. Kill redundant existed executors eagerly so the tasks of final + // stage can be centralized scheduled. + killExecutors(sc, targetExecutors, shuffleId, numReduce) + } + + plan + } + + /** + * The priority of kill executors follow: + * 1. kill executor who is younger than other (The older the JIT works better) + * 2. kill executor who produces less shuffle data first + */ + private def findExecutorToKill( + sc: SparkContext, + targetExecutors: Int, + shuffleId: Int, + numReduce: Int): Seq[String] = { + val tracker = SparkEnv.get.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster] + val shuffleStatusOpt = tracker.shuffleStatuses.get(shuffleId) + if (shuffleStatusOpt.isEmpty) { + return Seq.empty + } + val shuffleStatus = shuffleStatusOpt.get + val executorToBlockSize = new mutable.HashMap[String, Long] + shuffleStatus.withMapStatuses { mapStatus => + mapStatus.foreach { status => + var i = 0 + var sum = 0L + while (i < numReduce) { + sum += status.getSizeForBlock(i) + i += 1 + } + executorToBlockSize.getOrElseUpdate(status.location.executorId, sum) + } + } + + val backend = sc.schedulerBackend.asInstanceOf[CoarseGrainedSchedulerBackend] + val executorsWithRegistrationTs = backend.getExecutorsWithRegistrationTs() + val existedExecutors = executorsWithRegistrationTs.keys.toSet + val expectedNumExecutorToKill = existedExecutors.size - targetExecutors + if (expectedNumExecutorToKill < 1) { + return Seq.empty + } + + val executorIdsToKill = new ArrayBuffer[String]() + // We first kill executor who does not hold shuffle block. It would happen because + // the last stage is running fast and finished in a short time. The existed executors are + // from previous stages that have not been killed by DRA, so we can not find it by tracking + // shuffle status. + // We should evict executors by their alive time first and retain all of executors which + // have better locality for shuffle block. + executorsWithRegistrationTs.toSeq.sortBy(_._2).foreach { case (id, _) => + if (executorIdsToKill.length < expectedNumExecutorToKill && + !executorToBlockSize.contains(id)) { + executorIdsToKill.append(id) + } + } + + // Evict the rest executors according to the shuffle block size + executorToBlockSize.toSeq.sortBy(_._2).foreach { case (id, _) => + if (executorIdsToKill.length < expectedNumExecutorToKill && existedExecutors.contains(id)) { + executorIdsToKill.append(id) + } + } + + executorIdsToKill.toSeq + } + + private def killExecutors( + sc: SparkContext, + targetExecutors: Int, + shuffleId: Int, + numReduce: Int): Unit = { + val executorAllocationClient = sc.schedulerBackend.asInstanceOf[ExecutorAllocationClient] + + val executorsToKill = + if (conf.getConf(KyuubiSQLConf.FINAL_WRITE_STAGE_EAGERLY_KILL_EXECUTORS_KILL_ALL)) { + executorAllocationClient.getExecutorIds() + } else { + findExecutorToKill(sc, targetExecutors, shuffleId, numReduce) + } + logInfo(s"Request to kill executors, total count ${executorsToKill.size}, " + + s"[${executorsToKill.mkString(", ")}].") + if (executorsToKill.isEmpty) { + return + } + + // Note, `SparkContext#killExecutors` does not allow with DRA enabled, + // see `https://github.com/apache/spark/pull/20604`. + // It may cause the status in `ExecutorAllocationManager` inconsistent with + // `CoarseGrainedSchedulerBackend` for a while. But it should be synchronous finally. + // + // We should adjust target num executors, otherwise `YarnAllocator` might re-request original + // target executors if DRA has not updated target executors yet. + // Note, DRA would re-adjust executors if there are more tasks to be executed, so we are safe. + // + // * We kill executor + // * YarnAllocator re-request target executors + // * DRA can not release executors since they are new added + // ----------------------------------------------------------------> timeline + executorAllocationClient.killExecutors( + executorIds = executorsToKill, + adjustTargetNumExecutors = true, + countFailures = false, + force = false) + + FinalStageResourceManager.getAdjustedTargetExecutors(sc) + .filter(_ < targetExecutors).foreach { adjustedExecutors => + val delta = targetExecutors - adjustedExecutors + logInfo(s"Target executors after kill ($adjustedExecutors) is lower than required " + + s"($targetExecutors). Requesting $delta additional executor(s).") + executorAllocationClient.requestExecutors(delta) + } + } + + @transient private val queryStageOptimizerRules: Seq[Rule[SparkPlan]] = Seq( + OptimizeSkewInRebalancePartitions, + CoalesceShufflePartitions(session), + OptimizeShuffleWithLocalRead) +} + +object FinalStageResourceManager extends Logging { + + private[sql] def getAdjustedTargetExecutors(sc: SparkContext): Option[Int] = { + sc.schedulerBackend match { + case schedulerBackend: CoarseGrainedSchedulerBackend => + try { + val field = classOf[CoarseGrainedSchedulerBackend] + .getDeclaredField("requestedTotalExecutorsPerResourceProfile") + field.setAccessible(true) + schedulerBackend.synchronized { + val requestedTotalExecutorsPerResourceProfile = + field.get(schedulerBackend).asInstanceOf[mutable.HashMap[ResourceProfile, Int]] + val defaultRp = sc.resourceProfileManager.defaultResourceProfile + requestedTotalExecutorsPerResourceProfile.get(defaultRp) + } + } catch { + case e: Exception => + logWarning("Failed to get requestedTotalExecutors of Default ResourceProfile", e) + None + } + case _ => None + } + } +} + +trait FinalRebalanceStageHelper extends AdaptiveSparkPlanHelper { + @tailrec + final protected def findFinalRebalanceStage(plan: SparkPlan): Option[ShuffleQueryStageExec] = { + plan match { + case p: ProjectExec => findFinalRebalanceStage(p.child) + case f: FilterExec => findFinalRebalanceStage(f.child) + case s: SortExec if !s.global => findFinalRebalanceStage(s.child) + case stage: ShuffleQueryStageExec + if stage.isMaterialized && stage.mapStats.isDefined && + stage.plan.isInstanceOf[ShuffleExchangeExec] && + stage.plan.asInstanceOf[ShuffleExchangeExec].shuffleOrigin != ENSURE_REQUIREMENTS => + Some(stage) + case _ => None + } + } + + final protected def hasTableCache(plan: SparkPlan): Boolean = { + find(plan) { + case _: InMemoryTableScanExec => true + case _ => false + }.isDefined + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/InjectCustomResourceProfile.scala b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/InjectCustomResourceProfile.scala new file mode 100644 index 00000000000..30c042b2a2c --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/InjectCustomResourceProfile.scala @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.execution.{CustomResourceProfileExec, SparkPlan} +import org.apache.spark.sql.execution.adaptive._ + +import org.apache.kyuubi.sql.{KyuubiSQLConf, MarkNumOutputColumnsRule} + +/** + * Inject custom resource profile for final write stage, so we can specify custom + * executor resource configs. + */ +case class InjectCustomResourceProfile(session: SparkSession) + extends Rule[SparkPlan] with FinalRebalanceStageHelper { + override def apply(plan: SparkPlan): SparkPlan = { + if (!conf.getConf(KyuubiSQLConf.FINAL_WRITE_STAGE_RESOURCE_ISOLATION_ENABLED)) { + return plan + } + + if (!MarkNumOutputColumnsRule.isWrite(session, plan)) { + return plan + } + + val stage = findFinalRebalanceStage(plan) + if (stage.isEmpty) { + return plan + } + + // TODO: Ideally, We can call `CoarseGrainedSchedulerBackend.requestTotalExecutors` eagerly + // to reduce the task submit pending time, but it may lose task locality. + // + // By default, it would request executors when catch stage submit event. + injectCustomResourceProfile(plan, stage.get.id) + } + + private def injectCustomResourceProfile(plan: SparkPlan, id: Int): SparkPlan = { + plan match { + case stage: ShuffleQueryStageExec if stage.id == id => + CustomResourceProfileExec(stage) + case _ => plan.mapChildren(child => injectCustomResourceProfile(child, id)) + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/execution/CustomResourceProfileExec.scala b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/execution/CustomResourceProfileExec.scala new file mode 100644 index 00000000000..3698140fbd0 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/execution/CustomResourceProfileExec.scala @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution + +import org.apache.spark.network.util.{ByteUnit, JavaUtils} +import org.apache.spark.rdd.RDD +import org.apache.spark.resource.{ExecutorResourceRequests, ResourceProfileBuilder} +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.expressions.{Attribute, SortOrder} +import org.apache.spark.sql.catalyst.plans.physical.Partitioning +import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics} +import org.apache.spark.sql.vectorized.ColumnarBatch +import org.apache.spark.util.Utils + +import org.apache.kyuubi.sql.KyuubiSQLConf._ + +/** + * This node wraps the final executed plan and inject custom resource profile to the RDD. + * It assumes that, the produced RDD would create the `ResultStage` in `DAGScheduler`, + * so it makes resource isolation between previous and final stage. + * + * Note that, Spark does not support config `minExecutors` for each resource profile. + * Which means, it would retain `minExecutors` for each resource profile. + * So, suggest set `spark.dynamicAllocation.minExecutors` to 0 if enable this feature. + */ +case class CustomResourceProfileExec(child: SparkPlan) extends UnaryExecNode { + override def output: Seq[Attribute] = child.output + override def outputPartitioning: Partitioning = child.outputPartitioning + override def outputOrdering: Seq[SortOrder] = child.outputOrdering + override def supportsColumnar: Boolean = child.supportsColumnar + override def supportsRowBased: Boolean = child.supportsRowBased + override protected def doCanonicalize(): SparkPlan = child.canonicalized + + private val executorCores = conf.getConf(FINAL_WRITE_STAGE_EXECUTOR_CORES).getOrElse( + sparkContext.getConf.getInt("spark.executor.cores", 1)) + private val executorMemory = conf.getConf(FINAL_WRITE_STAGE_EXECUTOR_MEMORY).getOrElse( + sparkContext.getConf.get("spark.executor.memory", "2G")) + private val executorMemoryOverhead = + conf.getConf(FINAL_WRITE_STAGE_EXECUTOR_MEMORY_OVERHEAD) + .getOrElse(sparkContext.getConf.get("spark.executor.memoryOverhead", "1G")) + private val executorOffHeapMemory = conf.getConf(FINAL_WRITE_STAGE_EXECUTOR_OFF_HEAP_MEMORY) + + override lazy val metrics: Map[String, SQLMetric] = { + val base = Map( + "executorCores" -> SQLMetrics.createMetric(sparkContext, "executor cores"), + "executorMemory" -> SQLMetrics.createMetric(sparkContext, "executor memory (MiB)"), + "executorMemoryOverhead" -> SQLMetrics.createMetric( + sparkContext, + "executor memory overhead (MiB)")) + val addition = executorOffHeapMemory.map(_ => + "executorOffHeapMemory" -> + SQLMetrics.createMetric(sparkContext, "executor off heap memory (MiB)")).toMap + base ++ addition + } + + private def wrapResourceProfile[T](rdd: RDD[T]): RDD[T] = { + if (Utils.isTesting) { + // do nothing for local testing + return rdd + } + + metrics("executorCores") += executorCores + metrics("executorMemory") += JavaUtils.byteStringAs(executorMemory, ByteUnit.MiB) + metrics("executorMemoryOverhead") += JavaUtils.byteStringAs( + executorMemoryOverhead, + ByteUnit.MiB) + executorOffHeapMemory.foreach(m => + metrics("executorOffHeapMemory") += JavaUtils.byteStringAs(m, ByteUnit.MiB)) + + val executionId = sparkContext.getLocalProperty(SQLExecution.EXECUTION_ID_KEY) + SQLMetrics.postDriverMetricUpdates(sparkContext, executionId, metrics.values.toSeq) + + val resourceProfileBuilder = new ResourceProfileBuilder() + val executorResourceRequests = new ExecutorResourceRequests() + executorResourceRequests.cores(executorCores) + executorResourceRequests.memory(executorMemory) + executorResourceRequests.memoryOverhead(executorMemoryOverhead) + executorOffHeapMemory.foreach(executorResourceRequests.offHeapMemory) + resourceProfileBuilder.require(executorResourceRequests) + rdd.withResources(resourceProfileBuilder.build()) + rdd + } + + override protected def doExecute(): RDD[InternalRow] = { + val rdd = child.execute() + wrapResourceProfile(rdd) + } + + override protected def doExecuteColumnar(): RDD[ColumnarBatch] = { + val rdd = child.executeColumnar() + wrapResourceProfile(rdd) + } + + override protected def withNewChildInternal(newChild: SparkPlan): SparkPlan = { + this.copy(child = newChild) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-3/src/test/scala/org/apache/spark/sql/FinalStageResourceManagerSuite.scala b/extensions/spark/kyuubi-extension-spark-3-3/src/test/scala/org/apache/spark/sql/FinalStageResourceManagerSuite.scala new file mode 100644 index 00000000000..4b9991ef6f2 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-3/src/test/scala/org/apache/spark/sql/FinalStageResourceManagerSuite.scala @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.SparkConf +import org.scalatest.time.{Minutes, Span} + +import org.apache.kyuubi.sql.KyuubiSQLConf +import org.apache.kyuubi.tags.SparkLocalClusterTest + +@SparkLocalClusterTest +class FinalStageResourceManagerSuite extends KyuubiSparkSQLExtensionTest { + + override def sparkConf(): SparkConf = { + // It is difficult to run spark in local-cluster mode when spark.testing is set. + sys.props.remove("spark.testing") + + super.sparkConf().set("spark.master", "local-cluster[3, 1, 1024]") + .set("spark.dynamicAllocation.enabled", "true") + .set("spark.dynamicAllocation.initialExecutors", "3") + .set("spark.dynamicAllocation.minExecutors", "1") + .set("spark.dynamicAllocation.shuffleTracking.enabled", "true") + .set(KyuubiSQLConf.FINAL_STAGE_CONFIG_ISOLATION.key, "true") + .set(KyuubiSQLConf.FINAL_WRITE_STAGE_EAGERLY_KILL_EXECUTORS_ENABLED.key, "true") + } + + test("[KYUUBI #5136][Bug] Final Stage hangs forever") { + // Prerequisite to reproduce the bug: + // 1. Dynamic allocation is enabled. + // 2. Dynamic allocation min executors is 1. + // 3. target executors < active executors. + // 4. No active executor is left after FinalStageResourceManager killed executors. + // This is possible because FinalStageResourceManager retained executors may already be + // requested to be killed but not died yet. + // 5. Final Stage required executors is 1. + withSQLConf( + (KyuubiSQLConf.FINAL_WRITE_STAGE_EAGERLY_KILL_EXECUTORS_KILL_ALL.key, "true")) { + withTable("final_stage") { + eventually(timeout(Span(10, Minutes))) { + sql( + "CREATE TABLE final_stage AS SELECT id, count(*) as num FROM (SELECT 0 id) GROUP BY id") + } + assert(FinalStageResourceManager.getAdjustedTargetExecutors(spark.sparkContext).get == 1) + } + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-3/src/test/scala/org/apache/spark/sql/InjectResourceProfileSuite.scala b/extensions/spark/kyuubi-extension-spark-3-3/src/test/scala/org/apache/spark/sql/InjectResourceProfileSuite.scala new file mode 100644 index 00000000000..b0767b18708 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-3/src/test/scala/org/apache/spark/sql/InjectResourceProfileSuite.scala @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent} +import org.apache.spark.sql.execution.ui.SparkListenerSQLAdaptiveExecutionUpdate + +import org.apache.kyuubi.sql.KyuubiSQLConf + +class InjectResourceProfileSuite extends KyuubiSparkSQLExtensionTest { + private def checkCustomResourceProfile(sqlString: String, exists: Boolean): Unit = { + @volatile var lastEvent: SparkListenerSQLAdaptiveExecutionUpdate = null + val listener = new SparkListener { + override def onOtherEvent(event: SparkListenerEvent): Unit = { + event match { + case e: SparkListenerSQLAdaptiveExecutionUpdate => lastEvent = e + case _ => + } + } + } + + spark.sparkContext.addSparkListener(listener) + try { + sql(sqlString).collect() + spark.sparkContext.listenerBus.waitUntilEmpty() + assert(lastEvent != null) + var current = lastEvent.sparkPlanInfo + var shouldStop = false + while (!shouldStop) { + if (current.nodeName != "CustomResourceProfile") { + if (current.children.isEmpty) { + assert(!exists) + shouldStop = true + } else { + current = current.children.head + } + } else { + assert(exists) + shouldStop = true + } + } + } finally { + spark.sparkContext.removeSparkListener(listener) + } + } + + test("Inject resource profile") { + withTable("t") { + withSQLConf( + "spark.sql.adaptive.forceApply" -> "true", + KyuubiSQLConf.FINAL_STAGE_CONFIG_ISOLATION.key -> "true", + KyuubiSQLConf.FINAL_WRITE_STAGE_RESOURCE_ISOLATION_ENABLED.key -> "true") { + + sql("CREATE TABLE t (c1 int, c2 string) USING PARQUET") + + checkCustomResourceProfile("INSERT INTO TABLE t VALUES(1, 'a')", false) + checkCustomResourceProfile("SELECT 1", false) + checkCustomResourceProfile( + "INSERT INTO TABLE t SELECT /*+ rebalance */ * FROM VALUES(1, 'a')", + true) + } + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-3/src/test/scala/org/apache/spark/sql/ZorderSuite.scala b/extensions/spark/kyuubi-extension-spark-3-3/src/test/scala/org/apache/spark/sql/ZorderSuite.scala index 90fc17e2430..a08366f1d4a 100644 --- a/extensions/spark/kyuubi-extension-spark-3-3/src/test/scala/org/apache/spark/sql/ZorderSuite.scala +++ b/extensions/spark/kyuubi-extension-spark-3-3/src/test/scala/org/apache/spark/sql/ZorderSuite.scala @@ -17,13 +17,14 @@ package org.apache.spark.sql +import org.apache.spark.sql.catalyst.parser.ParserInterface import org.apache.spark.sql.catalyst.plans.logical.{RebalancePartitions, Sort} import org.apache.spark.sql.internal.SQLConf -import org.apache.kyuubi.sql.KyuubiSQLConf +import org.apache.kyuubi.sql.{KyuubiSQLConf, SparkKyuubiSparkSQLParser} import org.apache.kyuubi.sql.zorder.Zorder -trait ZorderWithCodegenEnabledSuiteBase33 extends ZorderWithCodegenEnabledSuiteBase { +trait ZorderSuiteSpark33 extends ZorderSuiteBase { test("Add rebalance before zorder") { Seq("true" -> false, "false" -> true).foreach { case (useOriginalOrdering, zorder) => @@ -106,6 +107,18 @@ trait ZorderWithCodegenEnabledSuiteBase33 extends ZorderWithCodegenEnabledSuiteB } } -class ZorderWithCodegenEnabledSuite extends ZorderWithCodegenEnabledSuiteBase33 {} +trait ParserSuite { self: ZorderSuiteBase => + override def createParser: ParserInterface = { + new SparkKyuubiSparkSQLParser(spark.sessionState.sqlParser) + } +} + +class ZorderWithCodegenEnabledSuite + extends ZorderWithCodegenEnabledSuiteBase + with ZorderSuiteSpark33 + with ParserSuite {} -class ZorderWithCodegenDisabledSuite extends ZorderWithCodegenEnabledSuiteBase33 {} +class ZorderWithCodegenDisabledSuite + extends ZorderWithCodegenDisabledSuiteBase + with ZorderSuiteSpark33 + with ParserSuite {} diff --git a/extensions/spark/kyuubi-spark-connector-kudu/pom.xml b/extensions/spark/kyuubi-extension-spark-3-4/pom.xml similarity index 64% rename from extensions/spark/kyuubi-spark-connector-kudu/pom.xml rename to extensions/spark/kyuubi-extension-spark-3-4/pom.xml index 97356cd9332..ee5b5f1558a 100644 --- a/extensions/spark/kyuubi-spark-connector-kudu/pom.xml +++ b/extensions/spark/kyuubi-extension-spark-3-4/pom.xml @@ -21,13 +21,13 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../../pom.xml - kyuubi-spark-connector-kudu_2.12 + kyuubi-extension-spark-3-4_${scala.binary.version} jar - Kyuubi Spark Kudu Connector + Kyuubi Dev Spark Extensions (for Spark 3.4) https://kyuubi.apache.org/ @@ -38,20 +38,14 @@ - org.apache.logging.log4j - log4j-api - provided - - - - org.apache.logging.log4j - log4j-core + org.apache.spark + spark-sql_${scala.binary.version} provided org.apache.spark - spark-sql_${scala.binary.version} + spark-hive_${scala.binary.version} provided @@ -62,48 +56,45 @@ - org.apache.kudu - kudu-client - - - - org.apache.spark - spark-catalyst_${scala.binary.version} - test-jar + org.apache.kyuubi + kyuubi-download + ${project.version} + pom test - org.scalatestplus - scalacheck-1-17_${scala.binary.version} + org.apache.kyuubi + kyuubi-util-scala_${scala.binary.version} + ${project.version} + test-jar test - com.dimafeng - testcontainers-scala-scalatest_${scala.binary.version} + org.apache.spark + spark-core_${scala.binary.version} + test-jar test org.apache.spark - spark-sql_${scala.binary.version} - ${spark.version} + spark-catalyst_${scala.binary.version} test-jar test - org.apache.kyuubi - kyuubi-common_${scala.binary.version} - ${project.version} + org.scalatestplus + scalacheck-1-17_${scala.binary.version} test - org.apache.kyuubi - kyuubi-common_${scala.binary.version} - ${project.version} + org.apache.spark + spark-sql_${scala.binary.version} + ${spark.version} test-jar test @@ -136,16 +127,55 @@ jakarta.xml.bind-api test + + + org.apache.logging.log4j + log4j-slf4j-impl + test + - org.apache.maven.plugins - maven-dependency-plugin + org.codehaus.mojo + build-helper-maven-plugin + + + regex-property + + regex-property + + + spark.home + ${project.basedir}/../../../externals/kyuubi-download/target/${spark.archive.name} + (.+)\.tgz + $1 + + + + + + org.scalatest + scalatest-maven-plugin + + + + ${spark.home} + ${scala.binary.version} + + + + + org.antlr + antlr4-maven-plugin - true + true + ${project.basedir}/src/main/antlr4 @@ -156,43 +186,9 @@ false - org.apache.kudu:kudu-client - com.stumbleupon:async + org.apache.kyuubi:* - - - org.apache.kudu:kudu-client - - META-INF/maven/** - META-INF/native/** - META-INF/native-image/** - MANIFEST.MF - LICENSE - LICENSE.txt - NOTICE - NOTICE.txt - *.properties - **/*.proto - - - - - - org.apache.kudu - ${kyuubi.shade.packageName}.org.apache.kudu - - org.apache.kudu.** - - - - com.stumbleupon:async - ${kyuubi.shade.packageName}.com.stumbleupon.async - - com.stumbleupon.async.** - - - @@ -203,20 +199,6 @@ - - - org.apache.maven.plugins - maven-jar-plugin - - - prepare-test-jar - - test-jar - - test-compile - - - target/scala-${scala.binary.version}/classes target/scala-${scala.binary.version}/test-classes diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/antlr4/org/apache/kyuubi/sql/KyuubiSparkSQL.g4 b/extensions/spark/kyuubi-extension-spark-3-4/src/main/antlr4/org/apache/kyuubi/sql/KyuubiSparkSQL.g4 new file mode 100644 index 00000000000..e52b7f5cfeb --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/antlr4/org/apache/kyuubi/sql/KyuubiSparkSQL.g4 @@ -0,0 +1,191 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +grammar KyuubiSparkSQL; + +@members { + /** + * Verify whether current token is a valid decimal token (which contains dot). + * Returns true if the character that follows the token is not a digit or letter or underscore. + * + * For example: + * For char stream "2.3", "2." is not a valid decimal token, because it is followed by digit '3'. + * For char stream "2.3_", "2.3" is not a valid decimal token, because it is followed by '_'. + * For char stream "2.3W", "2.3" is not a valid decimal token, because it is followed by 'W'. + * For char stream "12.0D 34.E2+0.12 " 12.0D is a valid decimal token because it is followed + * by a space. 34.E2 is a valid decimal token because it is followed by symbol '+' + * which is not a digit or letter or underscore. + */ + public boolean isValidDecimal() { + int nextChar = _input.LA(1); + if (nextChar >= 'A' && nextChar <= 'Z' || nextChar >= '0' && nextChar <= '9' || + nextChar == '_') { + return false; + } else { + return true; + } + } + } + +tokens { + DELIMITER +} + +singleStatement + : statement EOF + ; + +statement + : OPTIMIZE multipartIdentifier whereClause? zorderClause #optimizeZorder + | .*? #passThrough + ; + +whereClause + : WHERE partitionPredicate = predicateToken + ; + +zorderClause + : ZORDER BY order+=multipartIdentifier (',' order+=multipartIdentifier)* + ; + +// We don't have an expression rule in our grammar here, so we just grab the tokens and defer +// parsing them to later. +predicateToken + : .+? + ; + +multipartIdentifier + : parts+=identifier ('.' parts+=identifier)* + ; + +identifier + : strictIdentifier + ; + +strictIdentifier + : IDENTIFIER #unquotedIdentifier + | quotedIdentifier #quotedIdentifierAlternative + | nonReserved #unquotedIdentifier + ; + +quotedIdentifier + : BACKQUOTED_IDENTIFIER + ; + +nonReserved + : AND + | BY + | FALSE + | DATE + | INTERVAL + | OPTIMIZE + | OR + | TABLE + | TIMESTAMP + | TRUE + | WHERE + | ZORDER + ; + +AND: 'AND'; +BY: 'BY'; +FALSE: 'FALSE'; +DATE: 'DATE'; +INTERVAL: 'INTERVAL'; +OPTIMIZE: 'OPTIMIZE'; +OR: 'OR'; +TABLE: 'TABLE'; +TIMESTAMP: 'TIMESTAMP'; +TRUE: 'TRUE'; +WHERE: 'WHERE'; +ZORDER: 'ZORDER'; + +MINUS: '-'; + +BIGINT_LITERAL + : DIGIT+ 'L' + ; + +SMALLINT_LITERAL + : DIGIT+ 'S' + ; + +TINYINT_LITERAL + : DIGIT+ 'Y' + ; + +INTEGER_VALUE + : DIGIT+ + ; + +DECIMAL_VALUE + : DIGIT+ EXPONENT + | DECIMAL_DIGITS EXPONENT? {isValidDecimal()}? + ; + +DOUBLE_LITERAL + : DIGIT+ EXPONENT? 'D' + | DECIMAL_DIGITS EXPONENT? 'D' {isValidDecimal()}? + ; + +BIGDECIMAL_LITERAL + : DIGIT+ EXPONENT? 'BD' + | DECIMAL_DIGITS EXPONENT? 'BD' {isValidDecimal()}? + ; + +BACKQUOTED_IDENTIFIER + : '`' ( ~'`' | '``' )* '`' + ; + +IDENTIFIER + : (LETTER | DIGIT | '_')+ + ; + +fragment DECIMAL_DIGITS + : DIGIT+ '.' DIGIT* + | '.' DIGIT+ + ; + +fragment EXPONENT + : 'E' [+-]? DIGIT+ + ; + +fragment DIGIT + : [0-9] + ; + +fragment LETTER + : [A-Z] + ; + +SIMPLE_COMMENT + : '--' ~[\r\n]* '\r'? '\n'? -> channel(HIDDEN) + ; + +BRACKETED_COMMENT + : '/*' .*? '*/' -> channel(HIDDEN) + ; + +WS : [ \r\n\t]+ -> channel(HIDDEN) + ; + +// Catch-all for anything we can't recognize. +// We use this to be able to ignore and recover all the text +// when splitting statements with DelimiterLexer +UNRECOGNIZED + : . + ; diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/DropIgnoreNonexistent.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/DropIgnoreNonexistent.scala new file mode 100644 index 00000000000..e33632b8b30 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/DropIgnoreNonexistent.scala @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kyuubi.sql + +import org.apache.spark.sql.SparkSession +import org.apache.spark.sql.catalyst.analysis.{UnresolvedFunctionName, UnresolvedRelation} +import org.apache.spark.sql.catalyst.plans.logical.{DropFunction, DropNamespace, LogicalPlan, NoopCommand, UncacheTable} +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.execution.command.{AlterTableDropPartitionCommand, DropTableCommand} + +import org.apache.kyuubi.sql.KyuubiSQLConf._ + +case class DropIgnoreNonexistent(session: SparkSession) extends Rule[LogicalPlan] { + + override def apply(plan: LogicalPlan): LogicalPlan = { + if (conf.getConf(DROP_IGNORE_NONEXISTENT)) { + plan match { + case i @ AlterTableDropPartitionCommand(_, _, false, _, _) => + i.copy(ifExists = true) + case i @ DropTableCommand(_, false, _, _) => + i.copy(ifExists = true) + case i @ DropNamespace(_, false, _) => + i.copy(ifExists = true) + case UncacheTable(u: UnresolvedRelation, false, _) => + NoopCommand("UNCACHE TABLE", u.multipartIdentifier) + case DropFunction(u: UnresolvedFunctionName, false) => + NoopCommand("DROP FUNCTION", u.multipartIdentifier) + case _ => plan + } + } else { + plan + } + } + +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/InferRebalanceAndSortOrders.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/InferRebalanceAndSortOrders.scala new file mode 100644 index 00000000000..fcbf5c0a122 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/InferRebalanceAndSortOrders.scala @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import scala.annotation.tailrec + +import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeSet, Expression, NamedExpression, UnaryExpression} +import org.apache.spark.sql.catalyst.planning.ExtractEquiJoinKeys +import org.apache.spark.sql.catalyst.plans.{FullOuter, Inner, LeftAnti, LeftOuter, LeftSemi, RightOuter} +import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, Filter, LogicalPlan, Project, Sort, SubqueryAlias, View} + +/** + * Infer the columns for Rebalance and Sort to improve the compression ratio. + * + * For example + * {{{ + * INSERT INTO TABLE t PARTITION(p='a') + * SELECT * FROM t1 JOIN t2 on t1.c1 = t2.c1 + * }}} + * the inferred columns are: t1.c1 + */ +object InferRebalanceAndSortOrders { + + type PartitioningAndOrdering = (Seq[Expression], Seq[Expression]) + + private def getAliasMap(named: Seq[NamedExpression]): Map[Expression, Attribute] = { + @tailrec + def throughUnary(e: Expression): Expression = e match { + case u: UnaryExpression if u.deterministic => + throughUnary(u.child) + case _ => e + } + + named.flatMap { + case a @ Alias(child, _) => + Some((throughUnary(child).canonicalized, a.toAttribute)) + case _ => None + }.toMap + } + + def infer(plan: LogicalPlan): Option[PartitioningAndOrdering] = { + def candidateKeys( + input: LogicalPlan, + output: AttributeSet = AttributeSet.empty): Option[PartitioningAndOrdering] = { + input match { + case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, _, _, _, _, _) => + joinType match { + case LeftSemi | LeftAnti | LeftOuter => Some((leftKeys, leftKeys)) + case RightOuter => Some((rightKeys, rightKeys)) + case Inner | FullOuter => + if (output.isEmpty) { + Some((leftKeys ++ rightKeys, leftKeys ++ rightKeys)) + } else { + assert(leftKeys.length == rightKeys.length) + val keys = leftKeys.zip(rightKeys).flatMap { case (left, right) => + if (left.references.subsetOf(output)) { + Some(left) + } else if (right.references.subsetOf(output)) { + Some(right) + } else { + None + } + } + Some((keys, keys)) + } + case _ => None + } + case agg: Aggregate => + val aliasMap = getAliasMap(agg.aggregateExpressions) + Some(( + agg.groupingExpressions.map(p => aliasMap.getOrElse(p.canonicalized, p)), + agg.groupingExpressions.map(o => aliasMap.getOrElse(o.canonicalized, o)))) + case s: Sort => Some((s.order.map(_.child), s.order.map(_.child))) + case p: Project => + val aliasMap = getAliasMap(p.projectList) + candidateKeys(p.child, p.references).map { case (partitioning, ordering) => + ( + partitioning.map(p => aliasMap.getOrElse(p.canonicalized, p)), + ordering.map(o => aliasMap.getOrElse(o.canonicalized, o))) + } + case f: Filter => candidateKeys(f.child, output) + case s: SubqueryAlias => candidateKeys(s.child, output) + case v: View => candidateKeys(v.child, output) + + case _ => None + } + } + + candidateKeys(plan).map { case (partitioning, ordering) => + ( + partitioning.filter(_.references.subsetOf(plan.outputSet)), + ordering.filter(_.references.subsetOf(plan.outputSet))) + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/InsertShuffleNodeBeforeJoin.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/InsertShuffleNodeBeforeJoin.scala new file mode 100644 index 00000000000..1a02e8c1e67 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/InsertShuffleNodeBeforeJoin.scala @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import org.apache.spark.sql.catalyst.plans.physical.Distribution +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.execution.{SortExec, SparkPlan} +import org.apache.spark.sql.execution.adaptive.QueryStageExec +import org.apache.spark.sql.execution.aggregate.BaseAggregateExec +import org.apache.spark.sql.execution.exchange.{Exchange, ShuffleExchangeExec} +import org.apache.spark.sql.execution.joins.{ShuffledHashJoinExec, SortMergeJoinExec} +import org.apache.spark.sql.internal.SQLConf + +import org.apache.kyuubi.sql.KyuubiSQLConf._ + +/** + * Insert shuffle node before join if it doesn't exist to make `OptimizeSkewedJoin` works. + */ +object InsertShuffleNodeBeforeJoin extends Rule[SparkPlan] { + + override def apply(plan: SparkPlan): SparkPlan = { + // this rule has no meaning without AQE + if (!conf.getConf(FORCE_SHUFFLE_BEFORE_JOIN) || + !conf.getConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED)) { + return plan + } + + val newPlan = insertShuffleBeforeJoin(plan) + if (plan.fastEquals(newPlan)) { + plan + } else { + // make sure the output partitioning and ordering will not be broken. + KyuubiEnsureRequirements.apply(newPlan) + } + } + + // Since spark 3.3, insertShuffleBeforeJoin shouldn't be applied if join is skewed. + private def insertShuffleBeforeJoin(plan: SparkPlan): SparkPlan = plan transformUp { + case smj @ SortMergeJoinExec(_, _, _, _, l, r, isSkewJoin) if !isSkewJoin => + smj.withNewChildren(checkAndInsertShuffle(smj.requiredChildDistribution.head, l) :: + checkAndInsertShuffle(smj.requiredChildDistribution(1), r) :: Nil) + + case shj: ShuffledHashJoinExec if !shj.isSkewJoin => + if (!shj.left.isInstanceOf[Exchange] && !shj.right.isInstanceOf[Exchange]) { + shj.withNewChildren(withShuffleExec(shj.requiredChildDistribution.head, shj.left) :: + withShuffleExec(shj.requiredChildDistribution(1), shj.right) :: Nil) + } else if (!shj.left.isInstanceOf[Exchange]) { + shj.withNewChildren( + withShuffleExec(shj.requiredChildDistribution.head, shj.left) :: shj.right :: Nil) + } else if (!shj.right.isInstanceOf[Exchange]) { + shj.withNewChildren( + shj.left :: withShuffleExec(shj.requiredChildDistribution(1), shj.right) :: Nil) + } else { + shj + } + } + + private def checkAndInsertShuffle( + distribution: Distribution, + child: SparkPlan): SparkPlan = child match { + case SortExec(_, _, _: Exchange, _) => + child + case SortExec(_, _, _: QueryStageExec, _) => + child + case sort @ SortExec(_, _, agg: BaseAggregateExec, _) => + sort.withNewChildren(withShuffleExec(distribution, agg) :: Nil) + case _ => + withShuffleExec(distribution, child) + } + + private def withShuffleExec(distribution: Distribution, child: SparkPlan): SparkPlan = { + val numPartitions = distribution.requiredNumPartitions + .getOrElse(conf.numShufflePartitions) + ShuffleExchangeExec(distribution.createPartitioning(numPartitions), child) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiEnsureRequirements.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiEnsureRequirements.scala new file mode 100644 index 00000000000..a17e0a4652b --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiEnsureRequirements.scala @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import org.apache.spark.sql.catalyst.expressions.SortOrder +import org.apache.spark.sql.catalyst.plans.physical.{BroadcastDistribution, Distribution, UnspecifiedDistribution} +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.execution.{SortExec, SparkPlan} +import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ShuffleExchangeExec} + +/** + * Copy from Apache Spark `EnsureRequirements` + * 1. remove reorder join predicates + * 2. remove shuffle pruning + */ +object KyuubiEnsureRequirements extends Rule[SparkPlan] { + private def ensureDistributionAndOrdering(operator: SparkPlan): SparkPlan = { + val requiredChildDistributions: Seq[Distribution] = operator.requiredChildDistribution + val requiredChildOrderings: Seq[Seq[SortOrder]] = operator.requiredChildOrdering + var children: Seq[SparkPlan] = operator.children + assert(requiredChildDistributions.length == children.length) + assert(requiredChildOrderings.length == children.length) + + // Ensure that the operator's children satisfy their output distribution requirements. + children = children.zip(requiredChildDistributions).map { + case (child, distribution) if child.outputPartitioning.satisfies(distribution) => + child + case (child, BroadcastDistribution(mode)) => + BroadcastExchangeExec(mode, child) + case (child, distribution) => + val numPartitions = distribution.requiredNumPartitions + .getOrElse(conf.numShufflePartitions) + ShuffleExchangeExec(distribution.createPartitioning(numPartitions), child) + } + + // Get the indexes of children which have specified distribution requirements and need to have + // same number of partitions. + val childrenIndexes = requiredChildDistributions.zipWithIndex.filter { + case (UnspecifiedDistribution, _) => false + case (_: BroadcastDistribution, _) => false + case _ => true + }.map(_._2) + + val childrenNumPartitions = + childrenIndexes.map(children(_).outputPartitioning.numPartitions).toSet + + if (childrenNumPartitions.size > 1) { + // Get the number of partitions which is explicitly required by the distributions. + val requiredNumPartitions = { + val numPartitionsSet = childrenIndexes.flatMap { + index => requiredChildDistributions(index).requiredNumPartitions + }.toSet + assert( + numPartitionsSet.size <= 1, + s"$operator have incompatible requirements of the number of partitions for its children") + numPartitionsSet.headOption + } + + // If there are non-shuffle children that satisfy the required distribution, we have + // some tradeoffs when picking the expected number of shuffle partitions: + // 1. We should avoid shuffling these children. + // 2. We should have a reasonable parallelism. + val nonShuffleChildrenNumPartitions = + childrenIndexes.map(children).filterNot(_.isInstanceOf[ShuffleExchangeExec]) + .map(_.outputPartitioning.numPartitions) + val expectedChildrenNumPartitions = + if (nonShuffleChildrenNumPartitions.nonEmpty) { + if (nonShuffleChildrenNumPartitions.length == childrenIndexes.length) { + // Here we pick the max number of partitions among these non-shuffle children. + nonShuffleChildrenNumPartitions.max + } else { + // Here we pick the max number of partitions among these non-shuffle children as the + // expected number of shuffle partitions. However, if it's smaller than + // `conf.numShufflePartitions`, we pick `conf.numShufflePartitions` as the + // expected number of shuffle partitions. + math.max(nonShuffleChildrenNumPartitions.max, conf.defaultNumShufflePartitions) + } + } else { + childrenNumPartitions.max + } + + val targetNumPartitions = requiredNumPartitions.getOrElse(expectedChildrenNumPartitions) + + children = children.zip(requiredChildDistributions).zipWithIndex.map { + case ((child, distribution), index) if childrenIndexes.contains(index) => + if (child.outputPartitioning.numPartitions == targetNumPartitions) { + child + } else { + val defaultPartitioning = distribution.createPartitioning(targetNumPartitions) + child match { + // If child is an exchange, we replace it with a new one having defaultPartitioning. + case ShuffleExchangeExec(_, c, _) => ShuffleExchangeExec(defaultPartitioning, c) + case _ => ShuffleExchangeExec(defaultPartitioning, child) + } + } + + case ((child, _), _) => child + } + } + + // Now that we've performed any necessary shuffles, add sorts to guarantee output orderings: + children = children.zip(requiredChildOrderings).map { case (child, requiredOrdering) => + // If child.outputOrdering already satisfies the requiredOrdering, we do not need to sort. + if (SortOrder.orderingSatisfies(child.outputOrdering, requiredOrdering)) { + child + } else { + SortExec(requiredOrdering, global = false, child = child) + } + } + + operator.withNewChildren(children) + } + + def apply(plan: SparkPlan): SparkPlan = plan.transformUp { + case operator: SparkPlan => + ensureDistributionAndOrdering(operator) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiQueryStagePreparation.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiQueryStagePreparation.scala new file mode 100644 index 00000000000..a7fcbecd422 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiQueryStagePreparation.scala @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import org.apache.spark.sql.SparkSession +import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.execution.SparkPlan +import org.apache.spark.sql.execution.adaptive.QueryStageExec +import org.apache.spark.sql.execution.command.{ResetCommand, SetCommand} +import org.apache.spark.sql.execution.exchange.{BroadcastExchangeLike, ReusedExchangeExec, ShuffleExchangeLike} +import org.apache.spark.sql.internal.SQLConf + +import org.apache.kyuubi.sql.KyuubiSQLConf._ + +/** + * This rule split stage into two parts: + * 1. previous stage + * 2. final stage + * For final stage, we can inject extra config. It's useful if we use repartition to optimize + * small files that needs bigger shuffle partition size than previous. + * + * Let's say we have a query with 3 stages, then the logical machine like: + * + * Set/Reset Command -> cleanup previousStage config if user set the spark config. + * Query -> AQE -> stage1 -> preparation (use previousStage to overwrite spark config) + * -> AQE -> stage2 -> preparation (use spark config) + * -> AQE -> stage3 -> preparation (use finalStage config to overwrite spark config, + * store spark config to previousStage.) + * + * An example of the new finalStage config: + * `spark.sql.adaptive.advisoryPartitionSizeInBytes` -> + * `spark.sql.finalStage.adaptive.advisoryPartitionSizeInBytes` + */ +case class FinalStageConfigIsolation(session: SparkSession) extends Rule[SparkPlan] { + import FinalStageConfigIsolation._ + + override def apply(plan: SparkPlan): SparkPlan = { + // this rule has no meaning without AQE + if (!conf.getConf(FINAL_STAGE_CONFIG_ISOLATION) || + !conf.getConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED)) { + return plan + } + + if (isFinalStage(plan)) { + // We can not get the whole plan at query preparation phase to detect if current plan is + // for writing, so we depend on a tag which is been injected at post resolution phase. + // Note: we should still do clean up previous config for non-final stage to avoid such case: + // the first statement is write, but the second statement is query. + if (conf.getConf(FINAL_STAGE_CONFIG_ISOLATION_WRITE_ONLY) && + !WriteUtils.isWrite(session, plan)) { + return plan + } + + // set config for final stage + session.conf.getAll.filter(_._1.startsWith(FINAL_STAGE_CONFIG_PREFIX)).foreach { + case (k, v) => + val sparkConfigKey = s"spark.sql.${k.substring(FINAL_STAGE_CONFIG_PREFIX.length)}" + val previousStageConfigKey = + s"$PREVIOUS_STAGE_CONFIG_PREFIX${k.substring(FINAL_STAGE_CONFIG_PREFIX.length)}" + // store the previous config only if we have not stored, to avoid some query only + // have one stage that will overwrite real config. + if (!session.sessionState.conf.contains(previousStageConfigKey)) { + val originalValue = + if (session.conf.getOption(sparkConfigKey).isDefined) { + session.sessionState.conf.getConfString(sparkConfigKey) + } else { + // the default value of config is None, so we need to use a internal tag + INTERNAL_UNSET_CONFIG_TAG + } + logInfo(s"Store config: $sparkConfigKey to previousStage, " + + s"original value: $originalValue ") + session.sessionState.conf.setConfString(previousStageConfigKey, originalValue) + } + logInfo(s"For final stage: set $sparkConfigKey = $v.") + session.conf.set(sparkConfigKey, v) + } + } else { + // reset config for previous stage + session.conf.getAll.filter(_._1.startsWith(PREVIOUS_STAGE_CONFIG_PREFIX)).foreach { + case (k, v) => + val sparkConfigKey = s"spark.sql.${k.substring(PREVIOUS_STAGE_CONFIG_PREFIX.length)}" + logInfo(s"For previous stage: set $sparkConfigKey = $v.") + if (v == INTERNAL_UNSET_CONFIG_TAG) { + session.conf.unset(sparkConfigKey) + } else { + session.conf.set(sparkConfigKey, v) + } + // unset config so that we do not need to reset configs for every previous stage + session.conf.unset(k) + } + } + + plan + } + + /** + * Currently formula depend on AQE in Spark 3.1.1, not sure it can work in future. + */ + private def isFinalStage(plan: SparkPlan): Boolean = { + var shuffleNum = 0 + var broadcastNum = 0 + var reusedNum = 0 + var queryStageNum = 0 + + def collectNumber(p: SparkPlan): SparkPlan = { + p transform { + case shuffle: ShuffleExchangeLike => + shuffleNum += 1 + shuffle + + case broadcast: BroadcastExchangeLike => + broadcastNum += 1 + broadcast + + case reusedExchangeExec: ReusedExchangeExec => + reusedNum += 1 + reusedExchangeExec + + // query stage is leaf node so we need to transform it manually + // compatible with Spark 3.5: + // SPARK-42101: table cache is a independent query stage, so do not need include it. + case queryStage: QueryStageExec if queryStage.nodeName != "TableCacheQueryStage" => + queryStageNum += 1 + collectNumber(queryStage.plan) + queryStage + } + } + collectNumber(plan) + + if (shuffleNum == 0) { + // we don not care about broadcast stage here since it won't change partition number. + true + } else if (shuffleNum + broadcastNum + reusedNum == queryStageNum) { + true + } else { + false + } + } +} +object FinalStageConfigIsolation { + final val SQL_PREFIX = "spark.sql." + final val FINAL_STAGE_CONFIG_PREFIX = "spark.sql.finalStage." + final val PREVIOUS_STAGE_CONFIG_PREFIX = "spark.sql.previousStage." + final val INTERNAL_UNSET_CONFIG_TAG = "__INTERNAL_UNSET_CONFIG_TAG__" + + def getPreviousStageConfigKey(configKey: String): Option[String] = { + if (configKey.startsWith(SQL_PREFIX)) { + Some(s"$PREVIOUS_STAGE_CONFIG_PREFIX${configKey.substring(SQL_PREFIX.length)}") + } else { + None + } + } +} + +case class FinalStageConfigIsolationCleanRule(session: SparkSession) extends Rule[LogicalPlan] { + import FinalStageConfigIsolation._ + + override def apply(plan: LogicalPlan): LogicalPlan = plan match { + case set @ SetCommand(Some((k, Some(_)))) if k.startsWith(SQL_PREFIX) => + checkAndUnsetPreviousStageConfig(k) + set + + case reset @ ResetCommand(Some(k)) if k.startsWith(SQL_PREFIX) => + checkAndUnsetPreviousStageConfig(k) + reset + + case other => other + } + + private def checkAndUnsetPreviousStageConfig(configKey: String): Unit = { + getPreviousStageConfigKey(configKey).foreach { previousStageConfigKey => + if (session.sessionState.conf.contains(previousStageConfigKey)) { + logInfo(s"For previous stage: unset $previousStageConfigKey") + session.conf.unset(previousStageConfigKey) + } + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLConf.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLConf.scala new file mode 100644 index 00000000000..6f45dae126e --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLConf.scala @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import org.apache.spark.network.util.ByteUnit +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.internal.SQLConf._ + +object KyuubiSQLConf { + + val INSERT_REPARTITION_BEFORE_WRITE = + buildConf("spark.sql.optimizer.insertRepartitionBeforeWrite.enabled") + .doc("Add repartition node at the top of query plan. An approach of merging small files.") + .version("1.2.0") + .booleanConf + .createWithDefault(true) + + val INSERT_REPARTITION_NUM = + buildConf("spark.sql.optimizer.insertRepartitionNum") + .doc(s"The partition number if ${INSERT_REPARTITION_BEFORE_WRITE.key} is enabled. " + + s"If AQE is disabled, the default value is ${SQLConf.SHUFFLE_PARTITIONS.key}. " + + "If AQE is enabled, the default value is none that means depend on AQE. " + + "This config is used for Spark 3.1 only.") + .version("1.2.0") + .intConf + .createOptional + + val DYNAMIC_PARTITION_INSERTION_REPARTITION_NUM = + buildConf("spark.sql.optimizer.dynamicPartitionInsertionRepartitionNum") + .doc(s"The partition number of each dynamic partition if " + + s"${INSERT_REPARTITION_BEFORE_WRITE.key} is enabled. " + + "We will repartition by dynamic partition columns to reduce the small file but that " + + "can cause data skew. This config is to extend the partition of dynamic " + + "partition column to avoid skew but may generate some small files.") + .version("1.2.0") + .intConf + .createWithDefault(100) + + val FORCE_SHUFFLE_BEFORE_JOIN = + buildConf("spark.sql.optimizer.forceShuffleBeforeJoin.enabled") + .doc("Ensure shuffle node exists before shuffled join (shj and smj) to make AQE " + + "`OptimizeSkewedJoin` works (complex scenario join, multi table join).") + .version("1.2.0") + .booleanConf + .createWithDefault(false) + + val FINAL_STAGE_CONFIG_ISOLATION = + buildConf("spark.sql.optimizer.finalStageConfigIsolation.enabled") + .doc("If true, the final stage support use different config with previous stage. " + + "The prefix of final stage config key should be `spark.sql.finalStage.`." + + "For example, the raw spark config: `spark.sql.adaptive.advisoryPartitionSizeInBytes`, " + + "then the final stage config should be: " + + "`spark.sql.finalStage.adaptive.advisoryPartitionSizeInBytes`.") + .version("1.2.0") + .booleanConf + .createWithDefault(false) + + val SQL_CLASSIFICATION = "spark.sql.analyzer.classification" + val SQL_CLASSIFICATION_ENABLED = + buildConf("spark.sql.analyzer.classification.enabled") + .doc("When true, allows Kyuubi engine to judge this SQL's classification " + + s"and set `$SQL_CLASSIFICATION` back into sessionConf. " + + "Through this configuration item, Spark can optimizing configuration dynamic") + .version("1.4.0") + .booleanConf + .createWithDefault(false) + + val INSERT_ZORDER_BEFORE_WRITING = + buildConf("spark.sql.optimizer.insertZorderBeforeWriting.enabled") + .doc("When true, we will follow target table properties to insert zorder or not. " + + "The key properties are: 1) kyuubi.zorder.enabled; if this property is true, we will " + + "insert zorder before writing data. 2) kyuubi.zorder.cols; string split by comma, we " + + "will zorder by these cols.") + .version("1.4.0") + .booleanConf + .createWithDefault(true) + + val ZORDER_GLOBAL_SORT_ENABLED = + buildConf("spark.sql.optimizer.zorderGlobalSort.enabled") + .doc("When true, we do a global sort using zorder. Note that, it can cause data skew " + + "issue if the zorder columns have less cardinality. When false, we only do local sort " + + "using zorder.") + .version("1.4.0") + .booleanConf + .createWithDefault(true) + + val REBALANCE_BEFORE_ZORDER = + buildConf("spark.sql.optimizer.rebalanceBeforeZorder.enabled") + .doc("When true, we do a rebalance before zorder in case data skew. " + + "Note that, if the insertion is dynamic partition we will use the partition " + + "columns to rebalance. Note that, this config only affects with Spark 3.3.x") + .version("1.6.0") + .booleanConf + .createWithDefault(false) + + val REBALANCE_ZORDER_COLUMNS_ENABLED = + buildConf("spark.sql.optimizer.rebalanceZorderColumns.enabled") + .doc(s"When true and ${REBALANCE_BEFORE_ZORDER.key} is true, we do rebalance before " + + s"Z-Order. If it's dynamic partition insert, the rebalance expression will include " + + s"both partition columns and Z-Order columns. Note that, this config only " + + s"affects with Spark 3.3.x") + .version("1.6.0") + .booleanConf + .createWithDefault(false) + + val TWO_PHASE_REBALANCE_BEFORE_ZORDER = + buildConf("spark.sql.optimizer.twoPhaseRebalanceBeforeZorder.enabled") + .doc(s"When true and ${REBALANCE_BEFORE_ZORDER.key} is true, we do two phase rebalance " + + s"before Z-Order for the dynamic partition write. The first phase rebalance using " + + s"dynamic partition column; The second phase rebalance using dynamic partition column + " + + s"Z-Order columns. Note that, this config only affects with Spark 3.3.x") + .version("1.6.0") + .booleanConf + .createWithDefault(false) + + val ZORDER_USING_ORIGINAL_ORDERING_ENABLED = + buildConf("spark.sql.optimizer.zorderUsingOriginalOrdering.enabled") + .doc(s"When true and ${REBALANCE_BEFORE_ZORDER.key} is true, we do sort by " + + s"the original ordering i.e. lexicographical order. Note that, this config only " + + s"affects with Spark 3.3.x") + .version("1.6.0") + .booleanConf + .createWithDefault(false) + + val WATCHDOG_MAX_PARTITIONS = + buildConf("spark.sql.watchdog.maxPartitions") + .doc("Set the max partition number when spark scans a data source. " + + "Enable maxPartitions Strategy by specifying this configuration. " + + "Add maxPartitions Strategy to avoid scan excessive partitions " + + "on partitioned table, it's optional that works with defined") + .version("1.4.0") + .intConf + .createOptional + + val WATCHDOG_MAX_FILE_SIZE = + buildConf("spark.sql.watchdog.maxFileSize") + .doc("Set the maximum size in bytes of files when spark scans a data source. " + + "Enable maxFileSize Strategy by specifying this configuration. " + + "Add maxFileSize Strategy to avoid scan excessive size of files," + + " it's optional that works with defined") + .version("1.8.0") + .bytesConf(ByteUnit.BYTE) + .createOptional + + val WATCHDOG_FORCED_MAXOUTPUTROWS = + buildConf("spark.sql.watchdog.forcedMaxOutputRows") + .doc("Add ForcedMaxOutputRows rule to avoid huge output rows of non-limit query " + + "unexpectedly, it's optional that works with defined") + .version("1.4.0") + .intConf + .createOptional + + val DROP_IGNORE_NONEXISTENT = + buildConf("spark.sql.optimizer.dropIgnoreNonExistent") + .doc("Do not report an error if DROP DATABASE/TABLE/VIEW/FUNCTION/PARTITION specifies " + + "a non-existent database/table/view/function/partition") + .version("1.5.0") + .booleanConf + .createWithDefault(false) + + val INFER_REBALANCE_AND_SORT_ORDERS = + buildConf("spark.sql.optimizer.inferRebalanceAndSortOrders.enabled") + .doc("When ture, infer columns for rebalance and sort orders from original query, " + + "e.g. the join keys from join. It can avoid compression ratio regression.") + .version("1.7.0") + .booleanConf + .createWithDefault(false) + + val INFER_REBALANCE_AND_SORT_ORDERS_MAX_COLUMNS = + buildConf("spark.sql.optimizer.inferRebalanceAndSortOrdersMaxColumns") + .doc("The max columns of inferred columns.") + .version("1.7.0") + .intConf + .checkValue(_ > 0, "must be positive number") + .createWithDefault(3) + + val INSERT_REPARTITION_BEFORE_WRITE_IF_NO_SHUFFLE = + buildConf("spark.sql.optimizer.insertRepartitionBeforeWriteIfNoShuffle.enabled") + .doc("When true, add repartition even if the original plan does not have shuffle.") + .version("1.7.0") + .booleanConf + .createWithDefault(false) + + val FINAL_STAGE_CONFIG_ISOLATION_WRITE_ONLY = + buildConf("spark.sql.optimizer.finalStageConfigIsolationWriteOnly.enabled") + .doc("When true, only enable final stage isolation for writing.") + .version("1.7.0") + .booleanConf + .createWithDefault(true) + + val FINAL_WRITE_STAGE_EAGERLY_KILL_EXECUTORS_ENABLED = + buildConf("spark.sql.finalWriteStage.eagerlyKillExecutors.enabled") + .doc("When true, eagerly kill redundant executors before running final write stage.") + .version("1.8.0") + .booleanConf + .createWithDefault(false) + + val FINAL_WRITE_STAGE_EAGERLY_KILL_EXECUTORS_KILL_ALL = + buildConf("spark.sql.finalWriteStage.eagerlyKillExecutors.killAll") + .doc("When true, eagerly kill all executors before running final write stage. " + + "Mainly for test.") + .version("1.8.0") + .booleanConf + .createWithDefault(false) + + val FINAL_WRITE_STAGE_SKIP_KILLING_EXECUTORS_FOR_TABLE_CACHE = + buildConf("spark.sql.finalWriteStage.skipKillingExecutorsForTableCache") + .doc("When true, skip killing executors if the plan has table caches.") + .version("1.8.0") + .booleanConf + .createWithDefault(true) + + val FINAL_WRITE_STAGE_PARTITION_FACTOR = + buildConf("spark.sql.finalWriteStage.retainExecutorsFactor") + .doc("If the target executors * factor < active executors, and " + + "target executors * factor > min executors, then kill redundant executors.") + .version("1.8.0") + .doubleConf + .checkValue(_ >= 1, "must be bigger than or equal to 1") + .createWithDefault(1.2) + + val FINAL_WRITE_STAGE_RESOURCE_ISOLATION_ENABLED = + buildConf("spark.sql.finalWriteStage.resourceIsolation.enabled") + .doc( + "When true, make final write stage resource isolation using custom RDD resource profile.") + .version("1.8.0") + .booleanConf + .createWithDefault(false) + + val FINAL_WRITE_STAGE_EXECUTOR_CORES = + buildConf("spark.sql.finalWriteStage.executorCores") + .doc("Specify the executor core request for final write stage. " + + "It would be passed to the RDD resource profile.") + .version("1.8.0") + .intConf + .createOptional + + val FINAL_WRITE_STAGE_EXECUTOR_MEMORY = + buildConf("spark.sql.finalWriteStage.executorMemory") + .doc("Specify the executor on heap memory request for final write stage. " + + "It would be passed to the RDD resource profile.") + .version("1.8.0") + .stringConf + .createOptional + + val FINAL_WRITE_STAGE_EXECUTOR_MEMORY_OVERHEAD = + buildConf("spark.sql.finalWriteStage.executorMemoryOverhead") + .doc("Specify the executor memory overhead request for final write stage. " + + "It would be passed to the RDD resource profile.") + .version("1.8.0") + .stringConf + .createOptional + + val FINAL_WRITE_STAGE_EXECUTOR_OFF_HEAP_MEMORY = + buildConf("spark.sql.finalWriteStage.executorOffHeapMemory") + .doc("Specify the executor off heap memory request for final write stage. " + + "It would be passed to the RDD resource profile.") + .version("1.8.0") + .stringConf + .createOptional +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLExtensionException.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLExtensionException.scala new file mode 100644 index 00000000000..88c5a988fd9 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLExtensionException.scala @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import java.sql.SQLException + +class KyuubiSQLExtensionException(reason: String, cause: Throwable) + extends SQLException(reason, cause) { + + def this(reason: String) = { + this(reason, null) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLAstBuilder.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLAstBuilder.scala new file mode 100644 index 00000000000..cc00bf88e94 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLAstBuilder.scala @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import scala.collection.JavaConverters.asScalaBufferConverter +import scala.collection.mutable.ListBuffer + +import org.antlr.v4.runtime.ParserRuleContext +import org.antlr.v4.runtime.misc.Interval +import org.antlr.v4.runtime.tree.ParseTree +import org.apache.spark.sql.catalyst.SQLConfHelper +import org.apache.spark.sql.catalyst.analysis.{UnresolvedAttribute, UnresolvedRelation, UnresolvedStar} +import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.parser.ParserUtils.withOrigin +import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, Project, Sort} + +import org.apache.kyuubi.sql.KyuubiSparkSQLParser._ +import org.apache.kyuubi.sql.zorder.{OptimizeZorderStatement, Zorder} + +class KyuubiSparkSQLAstBuilder extends KyuubiSparkSQLBaseVisitor[AnyRef] with SQLConfHelper { + + def buildOptimizeStatement( + unparsedPredicateOptimize: UnparsedPredicateOptimize, + parseExpression: String => Expression): LogicalPlan = { + + val UnparsedPredicateOptimize(tableIdent, tablePredicate, orderExpr) = + unparsedPredicateOptimize + + val predicate = tablePredicate.map(parseExpression) + verifyPartitionPredicates(predicate) + val table = UnresolvedRelation(tableIdent) + val tableWithFilter = predicate match { + case Some(expr) => Filter(expr, table) + case None => table + } + val query = + Sort( + SortOrder(orderExpr, Ascending, NullsLast, Seq.empty) :: Nil, + conf.getConf(KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED), + Project(Seq(UnresolvedStar(None)), tableWithFilter)) + OptimizeZorderStatement(tableIdent, query) + } + + private def verifyPartitionPredicates(predicates: Option[Expression]): Unit = { + predicates.foreach { + case p if !isLikelySelective(p) => + throw new KyuubiSQLExtensionException(s"unsupported partition predicates: ${p.sql}") + case _ => + } + } + + /** + * Forked from Apache Spark's org.apache.spark.sql.catalyst.expressions.PredicateHelper + * The `PredicateHelper.isLikelySelective()` is available since Spark-3.3, forked for Spark + * that is lower than 3.3. + * + * Returns whether an expression is likely to be selective + */ + private def isLikelySelective(e: Expression): Boolean = e match { + case Not(expr) => isLikelySelective(expr) + case And(l, r) => isLikelySelective(l) || isLikelySelective(r) + case Or(l, r) => isLikelySelective(l) && isLikelySelective(r) + case _: StringRegexExpression => true + case _: BinaryComparison => true + case _: In | _: InSet => true + case _: StringPredicate => true + case BinaryPredicate(_) => true + case _: MultiLikeBase => true + case _ => false + } + + private object BinaryPredicate { + def unapply(expr: Expression): Option[Expression] = expr match { + case _: Contains => Option(expr) + case _: StartsWith => Option(expr) + case _: EndsWith => Option(expr) + case _ => None + } + } + + /** + * Create an expression from the given context. This method just passes the context on to the + * visitor and only takes care of typing (We assume that the visitor returns an Expression here). + */ + protected def expression(ctx: ParserRuleContext): Expression = typedVisit(ctx) + + protected def multiPart(ctx: ParserRuleContext): Seq[String] = typedVisit(ctx) + + override def visitSingleStatement(ctx: SingleStatementContext): LogicalPlan = { + visit(ctx.statement()).asInstanceOf[LogicalPlan] + } + + override def visitOptimizeZorder( + ctx: OptimizeZorderContext): UnparsedPredicateOptimize = withOrigin(ctx) { + val tableIdent = multiPart(ctx.multipartIdentifier()) + + val predicate = Option(ctx.whereClause()) + .map(_.partitionPredicate) + .map(extractRawText(_)) + + val zorderCols = ctx.zorderClause().order.asScala + .map(visitMultipartIdentifier) + .map(UnresolvedAttribute(_)) + .toSeq + + val orderExpr = + if (zorderCols.length == 1) { + zorderCols.head + } else { + Zorder(zorderCols) + } + UnparsedPredicateOptimize(tableIdent, predicate, orderExpr) + } + + override def visitPassThrough(ctx: PassThroughContext): LogicalPlan = null + + override def visitMultipartIdentifier(ctx: MultipartIdentifierContext): Seq[String] = + withOrigin(ctx) { + ctx.parts.asScala.map(_.getText).toSeq + } + + override def visitZorderClause(ctx: ZorderClauseContext): Seq[UnresolvedAttribute] = + withOrigin(ctx) { + val res = ListBuffer[UnresolvedAttribute]() + ctx.multipartIdentifier().forEach { identifier => + res += UnresolvedAttribute(identifier.parts.asScala.map(_.getText).toSeq) + } + res.toSeq + } + + private def typedVisit[T](ctx: ParseTree): T = { + ctx.accept(this).asInstanceOf[T] + } + + private def extractRawText(exprContext: ParserRuleContext): String = { + // Extract the raw expression which will be parsed later + exprContext.getStart.getInputStream.getText(new Interval( + exprContext.getStart.getStartIndex, + exprContext.getStop.getStopIndex)) + } +} + +/** + * a logical plan contains an unparsed expression that will be parsed by spark. + */ +trait UnparsedExpressionLogicalPlan extends LogicalPlan { + override def output: Seq[Attribute] = throw new UnsupportedOperationException() + + override def children: Seq[LogicalPlan] = throw new UnsupportedOperationException() + + protected def withNewChildrenInternal( + newChildren: IndexedSeq[LogicalPlan]): LogicalPlan = + throw new UnsupportedOperationException() +} + +case class UnparsedPredicateOptimize( + tableIdent: Seq[String], + tablePredicate: Option[String], + orderExpr: Expression) extends UnparsedExpressionLogicalPlan {} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLCommonExtension.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLCommonExtension.scala new file mode 100644 index 00000000000..f39ad3cc390 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLCommonExtension.scala @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import org.apache.spark.sql.SparkSessionExtensions + +import org.apache.kyuubi.sql.zorder.{InsertZorderBeforeWritingDatasource33, InsertZorderBeforeWritingHive33, ResolveZorder} + +class KyuubiSparkSQLCommonExtension extends (SparkSessionExtensions => Unit) { + override def apply(extensions: SparkSessionExtensions): Unit = { + KyuubiSparkSQLCommonExtension.injectCommonExtensions(extensions) + } +} + +object KyuubiSparkSQLCommonExtension { + def injectCommonExtensions(extensions: SparkSessionExtensions): Unit = { + // inject zorder parser and related rules + extensions.injectParser { case (_, parser) => new SparkKyuubiSparkSQLParser(parser) } + extensions.injectResolutionRule(ResolveZorder) + + // Note that: + // InsertZorderBeforeWritingDatasource and InsertZorderBeforeWritingHive + // should be applied before + // RepartitionBeforeWriting and RebalanceBeforeWriting + // because we can only apply one of them (i.e. Global Sort or Repartition/Rebalance) + extensions.injectPostHocResolutionRule(InsertZorderBeforeWritingDatasource33) + extensions.injectPostHocResolutionRule(InsertZorderBeforeWritingHive33) + extensions.injectPostHocResolutionRule(FinalStageConfigIsolationCleanRule) + + extensions.injectQueryStagePrepRule(_ => InsertShuffleNodeBeforeJoin) + + extensions.injectQueryStagePrepRule(FinalStageConfigIsolation(_)) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala new file mode 100644 index 00000000000..792315d897a --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLExtension.scala @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import org.apache.spark.sql.{FinalStageResourceManager, InjectCustomResourceProfile, SparkSessionExtensions} + +import org.apache.kyuubi.sql.watchdog.{ForcedMaxOutputRowsRule, MaxScanStrategy} + +// scalastyle:off line.size.limit +/** + * Depend on Spark SQL Extension framework, we can use this extension follow steps + * 1. move this jar into $SPARK_HOME/jars + * 2. add config into `spark-defaults.conf`: `spark.sql.extensions=org.apache.kyuubi.sql.KyuubiSparkSQLExtension` + */ +// scalastyle:on line.size.limit +class KyuubiSparkSQLExtension extends (SparkSessionExtensions => Unit) { + override def apply(extensions: SparkSessionExtensions): Unit = { + KyuubiSparkSQLCommonExtension.injectCommonExtensions(extensions) + + extensions.injectPostHocResolutionRule(RebalanceBeforeWritingDatasource) + extensions.injectPostHocResolutionRule(RebalanceBeforeWritingHive) + extensions.injectPostHocResolutionRule(DropIgnoreNonexistent) + + // watchdog extension + extensions.injectOptimizerRule(ForcedMaxOutputRowsRule) + extensions.injectPlannerStrategy(MaxScanStrategy) + + extensions.injectQueryStagePrepRule(FinalStageResourceManager(_)) + extensions.injectQueryStagePrepRule(InjectCustomResourceProfile) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala new file mode 100644 index 00000000000..c4418c33c44 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLParser.scala @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import org.antlr.v4.runtime._ +import org.antlr.v4.runtime.atn.PredictionMode +import org.antlr.v4.runtime.misc.{Interval, ParseCancellationException} +import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.catalyst.{FunctionIdentifier, SQLConfHelper, TableIdentifier} +import org.apache.spark.sql.catalyst.expressions.Expression +import org.apache.spark.sql.catalyst.parser.{ParseErrorListener, ParseException, ParserInterface, PostProcessor} +import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan +import org.apache.spark.sql.catalyst.trees.Origin +import org.apache.spark.sql.types.{DataType, StructType} + +abstract class KyuubiSparkSQLParserBase extends ParserInterface with SQLConfHelper { + def delegate: ParserInterface + def astBuilder: KyuubiSparkSQLAstBuilder + + override def parsePlan(sqlText: String): LogicalPlan = parse(sqlText) { parser => + astBuilder.visit(parser.singleStatement()) match { + case optimize: UnparsedPredicateOptimize => + astBuilder.buildOptimizeStatement(optimize, delegate.parseExpression) + case plan: LogicalPlan => plan + case _ => delegate.parsePlan(sqlText) + } + } + + protected def parse[T](command: String)(toResult: KyuubiSparkSQLParser => T): T = { + val lexer = new KyuubiSparkSQLLexer( + new UpperCaseCharStream(CharStreams.fromString(command))) + lexer.removeErrorListeners() + lexer.addErrorListener(ParseErrorListener) + + val tokenStream = new CommonTokenStream(lexer) + val parser = new KyuubiSparkSQLParser(tokenStream) + parser.addParseListener(PostProcessor) + parser.removeErrorListeners() + parser.addErrorListener(ParseErrorListener) + + try { + try { + // first, try parsing with potentially faster SLL mode + parser.getInterpreter.setPredictionMode(PredictionMode.SLL) + toResult(parser) + } catch { + case _: ParseCancellationException => + // if we fail, parse with LL mode + tokenStream.seek(0) // rewind input stream + parser.reset() + + // Try Again. + parser.getInterpreter.setPredictionMode(PredictionMode.LL) + toResult(parser) + } + } catch { + case e: ParseException if e.command.isDefined => + throw e + case e: ParseException => + throw e.withCommand(command) + case e: AnalysisException => + val position = Origin(e.line, e.startPosition) + throw new ParseException(Option(command), e.message, position, position) + } + } + + override def parseExpression(sqlText: String): Expression = { + delegate.parseExpression(sqlText) + } + + override def parseTableIdentifier(sqlText: String): TableIdentifier = { + delegate.parseTableIdentifier(sqlText) + } + + override def parseFunctionIdentifier(sqlText: String): FunctionIdentifier = { + delegate.parseFunctionIdentifier(sqlText) + } + + override def parseMultipartIdentifier(sqlText: String): Seq[String] = { + delegate.parseMultipartIdentifier(sqlText) + } + + override def parseTableSchema(sqlText: String): StructType = { + delegate.parseTableSchema(sqlText) + } + + override def parseDataType(sqlText: String): DataType = { + delegate.parseDataType(sqlText) + } + + /** + * This functions was introduced since spark-3.3, for more details, please see + * https://github.com/apache/spark/pull/34543 + */ + override def parseQuery(sqlText: String): LogicalPlan = { + delegate.parseQuery(sqlText) + } +} + +class SparkKyuubiSparkSQLParser( + override val delegate: ParserInterface) + extends KyuubiSparkSQLParserBase { + def astBuilder: KyuubiSparkSQLAstBuilder = new KyuubiSparkSQLAstBuilder +} + +/* Copied from Apache Spark's to avoid dependency on Spark Internals */ +class UpperCaseCharStream(wrapped: CodePointCharStream) extends CharStream { + override def consume(): Unit = wrapped.consume() + override def getSourceName(): String = wrapped.getSourceName + override def index(): Int = wrapped.index + override def mark(): Int = wrapped.mark + override def release(marker: Int): Unit = wrapped.release(marker) + override def seek(where: Int): Unit = wrapped.seek(where) + override def size(): Int = wrapped.size + + override def getText(interval: Interval): String = wrapped.getText(interval) + + // scalastyle:off + override def LA(i: Int): Int = { + val la = wrapped.LA(i) + if (la == 0 || la == IntStream.EOF) la + else Character.toUpperCase(la) + } + // scalastyle:on +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/RebalanceBeforeWriting.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/RebalanceBeforeWriting.scala new file mode 100644 index 00000000000..3cbacdd2f03 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/RebalanceBeforeWriting.scala @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import org.apache.spark.sql.SparkSession +import org.apache.spark.sql.catalyst.expressions.{Ascending, Attribute, SortOrder} +import org.apache.spark.sql.catalyst.plans.logical._ + +trait RepartitionBuilderWithRebalance extends RepartitionBuilder { + override def buildRepartition( + dynamicPartitionColumns: Seq[Attribute], + query: LogicalPlan): LogicalPlan = { + if (!conf.getConf(KyuubiSQLConf.INFER_REBALANCE_AND_SORT_ORDERS) || + dynamicPartitionColumns.nonEmpty) { + RebalancePartitions(dynamicPartitionColumns, query) + } else { + val maxColumns = conf.getConf(KyuubiSQLConf.INFER_REBALANCE_AND_SORT_ORDERS_MAX_COLUMNS) + val inferred = InferRebalanceAndSortOrders.infer(query) + if (inferred.isDefined) { + val (partitioning, ordering) = inferred.get + val rebalance = RebalancePartitions(partitioning.take(maxColumns), query) + if (ordering.nonEmpty) { + val sortOrders = ordering.take(maxColumns).map(o => SortOrder(o, Ascending)) + Sort(sortOrders, false, rebalance) + } else { + rebalance + } + } else { + RebalancePartitions(dynamicPartitionColumns, query) + } + } + } + + override def canInsertRepartitionByExpression(plan: LogicalPlan): Boolean = { + super.canInsertRepartitionByExpression(plan) && { + plan match { + case _: RebalancePartitions => false + case _ => true + } + } + } +} + +/** + * For datasource table, there two commands can write data to table + * 1. InsertIntoHadoopFsRelationCommand + * 2. CreateDataSourceTableAsSelectCommand + * This rule add a RebalancePartitions node between write and query + */ +case class RebalanceBeforeWritingDatasource(session: SparkSession) + extends RepartitionBeforeWritingDatasourceBase + with RepartitionBuilderWithRebalance {} + +/** + * For Hive table, there two commands can write data to table + * 1. InsertIntoHiveTable + * 2. CreateHiveTableAsSelectCommand + * This rule add a RebalancePartitions node between write and query + */ +case class RebalanceBeforeWritingHive(session: SparkSession) + extends RepartitionBeforeWritingHiveBase + with RepartitionBuilderWithRebalance {} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/RepartitionBeforeWritingBase.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/RepartitionBeforeWritingBase.scala new file mode 100644 index 00000000000..3ebb9740f5f --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/RepartitionBeforeWritingBase.scala @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import org.apache.spark.sql.catalyst.expressions.Attribute +import org.apache.spark.sql.catalyst.plans.logical._ +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand +import org.apache.spark.sql.hive.execution.InsertIntoHiveTable +import org.apache.spark.sql.internal.StaticSQLConf + +trait RepartitionBuilder extends Rule[LogicalPlan] with RepartitionBeforeWriteHelper { + def buildRepartition( + dynamicPartitionColumns: Seq[Attribute], + query: LogicalPlan): LogicalPlan +} + +/** + * For datasource table, there two commands can write data to table + * 1. InsertIntoHadoopFsRelationCommand + * 2. CreateDataSourceTableAsSelectCommand + * This rule add a repartition node between write and query + */ +abstract class RepartitionBeforeWritingDatasourceBase extends RepartitionBuilder { + + override def apply(plan: LogicalPlan): LogicalPlan = { + if (conf.getConf(KyuubiSQLConf.INSERT_REPARTITION_BEFORE_WRITE)) { + addRepartition(plan) + } else { + plan + } + } + + private def addRepartition(plan: LogicalPlan): LogicalPlan = plan match { + case i @ InsertIntoHadoopFsRelationCommand(_, sp, _, pc, bucket, _, _, query, _, _, _, _) + if query.resolved && bucket.isEmpty && canInsertRepartitionByExpression(query) => + val dynamicPartitionColumns = pc.filterNot(attr => sp.contains(attr.name)) + i.copy(query = buildRepartition(dynamicPartitionColumns, query)) + + case u @ Union(children, _, _) => + u.copy(children = children.map(addRepartition)) + + case _ => plan + } +} + +/** + * For Hive table, there two commands can write data to table + * 1. InsertIntoHiveTable + * 2. CreateHiveTableAsSelectCommand + * This rule add a repartition node between write and query + */ +abstract class RepartitionBeforeWritingHiveBase extends RepartitionBuilder { + override def apply(plan: LogicalPlan): LogicalPlan = { + if (conf.getConf(StaticSQLConf.CATALOG_IMPLEMENTATION) == "hive" && + conf.getConf(KyuubiSQLConf.INSERT_REPARTITION_BEFORE_WRITE)) { + addRepartition(plan) + } else { + plan + } + } + + def addRepartition(plan: LogicalPlan): LogicalPlan = plan match { + case i @ InsertIntoHiveTable(table, partition, query, _, _, _, _, _, _, _, _) + if query.resolved && table.bucketSpec.isEmpty && canInsertRepartitionByExpression(query) => + val dynamicPartitionColumns = partition.filter(_._2.isEmpty).keys + .flatMap(name => query.output.find(_.name == name)).toSeq + i.copy(query = buildRepartition(dynamicPartitionColumns, query)) + + case u @ Union(children, _, _) => + u.copy(children = children.map(addRepartition)) + + case _ => plan + } +} + +trait RepartitionBeforeWriteHelper extends Rule[LogicalPlan] { + private def hasBenefit(plan: LogicalPlan): Boolean = { + def probablyHasShuffle: Boolean = plan.find { + case _: Join => true + case _: Aggregate => true + case _: Distinct => true + case _: Deduplicate => true + case _: Window => true + case s: Sort if s.global => true + case _: RepartitionOperation => true + case _: GlobalLimit => true + case _ => false + }.isDefined + + conf.getConf(KyuubiSQLConf.INSERT_REPARTITION_BEFORE_WRITE_IF_NO_SHUFFLE) || probablyHasShuffle + } + + def canInsertRepartitionByExpression(plan: LogicalPlan): Boolean = { + def canInsert(p: LogicalPlan): Boolean = p match { + case Project(_, child) => canInsert(child) + case SubqueryAlias(_, child) => canInsert(child) + case Limit(_, _) => false + case _: Sort => false + case _: RepartitionByExpression => false + case _: Repartition => false + case _ => true + } + + // 1. make sure AQE is enabled, otherwise it is no meaning to add a shuffle + // 2. make sure it does not break the semantics of original plan + // 3. try to avoid adding a shuffle if it has potential performance regression + conf.adaptiveExecutionEnabled && canInsert(plan) && hasBenefit(plan) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/WriteUtils.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/WriteUtils.scala new file mode 100644 index 00000000000..89dd8319480 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/WriteUtils.scala @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql + +import org.apache.spark.sql.SparkSession +import org.apache.spark.sql.execution.{SparkPlan, UnionExec} +import org.apache.spark.sql.execution.command.DataWritingCommandExec +import org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec + +object WriteUtils { + def isWrite(session: SparkSession, plan: SparkPlan): Boolean = { + plan match { + case _: DataWritingCommandExec => true + case _: V2TableWriteExec => true + case u: UnionExec if u.children.nonEmpty => u.children.forall(isWrite(session, _)) + case _ => false + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/watchdog/ForcedMaxOutputRowsBase.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/watchdog/ForcedMaxOutputRowsBase.scala new file mode 100644 index 00000000000..4f897d1b600 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/watchdog/ForcedMaxOutputRowsBase.scala @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql.watchdog + +import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation +import org.apache.spark.sql.catalyst.dsl.expressions._ +import org.apache.spark.sql.catalyst.expressions.Alias +import org.apache.spark.sql.catalyst.plans.logical._ +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.execution.command.DataWritingCommand + +import org.apache.kyuubi.sql.KyuubiSQLConf + +/* + * Add ForcedMaxOutputRows rule for output rows limitation + * to avoid huge output rows of non_limit query unexpectedly + * mainly applied to cases as below: + * + * case 1: + * {{{ + * SELECT [c1, c2, ...] + * }}} + * + * case 2: + * {{{ + * WITH CTE AS ( + * ...) + * SELECT [c1, c2, ...] FROM CTE ... + * }}} + * + * The Logical Rule add a GlobalLimit node before root project + * */ +trait ForcedMaxOutputRowsBase extends Rule[LogicalPlan] { + + protected def isChildAggregate(a: Aggregate): Boolean + + protected def canInsertLimitInner(p: LogicalPlan): Boolean = p match { + case Aggregate(_, Alias(_, "havingCondition") :: Nil, _) => false + case agg: Aggregate => !isChildAggregate(agg) + case _: RepartitionByExpression => true + case _: Distinct => true + case _: Filter => true + case _: Project => true + case Limit(_, _) => true + case _: Sort => true + case Union(children, _, _) => + if (children.exists(_.isInstanceOf[DataWritingCommand])) { + false + } else { + true + } + case _: MultiInstanceRelation => true + case _: Join => true + case _ => false + } + + protected def canInsertLimit(p: LogicalPlan, maxOutputRowsOpt: Option[Int]): Boolean = { + maxOutputRowsOpt match { + case Some(forcedMaxOutputRows) => canInsertLimitInner(p) && + !p.maxRows.exists(_ <= forcedMaxOutputRows) + case None => false + } + } + + override def apply(plan: LogicalPlan): LogicalPlan = { + val maxOutputRowsOpt = conf.getConf(KyuubiSQLConf.WATCHDOG_FORCED_MAXOUTPUTROWS) + plan match { + case p if p.resolved && canInsertLimit(p, maxOutputRowsOpt) => + Limit( + maxOutputRowsOpt.get, + plan) + case _ => plan + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/watchdog/ForcedMaxOutputRowsRule.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/watchdog/ForcedMaxOutputRowsRule.scala new file mode 100644 index 00000000000..a3d990b1098 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/watchdog/ForcedMaxOutputRowsRule.scala @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql.watchdog + +import org.apache.spark.sql.SparkSession +import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, CommandResult, LogicalPlan, Union, WithCTE} +import org.apache.spark.sql.execution.command.DataWritingCommand + +case class ForcedMaxOutputRowsRule(sparkSession: SparkSession) extends ForcedMaxOutputRowsBase { + + override protected def isChildAggregate(a: Aggregate): Boolean = false + + override protected def canInsertLimitInner(p: LogicalPlan): Boolean = p match { + case WithCTE(plan, _) => this.canInsertLimitInner(plan) + case plan: LogicalPlan => plan match { + case Union(children, _, _) => !children.exists { + case _: DataWritingCommand => true + case p: CommandResult if p.commandLogicalPlan.isInstanceOf[DataWritingCommand] => true + case _ => false + } + case _ => super.canInsertLimitInner(plan) + } + } + + override protected def canInsertLimit(p: LogicalPlan, maxOutputRowsOpt: Option[Int]): Boolean = { + p match { + case WithCTE(plan, _) => this.canInsertLimit(plan, maxOutputRowsOpt) + case _ => super.canInsertLimit(p, maxOutputRowsOpt) + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/watchdog/KyuubiWatchDogException.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/watchdog/KyuubiWatchDogException.scala new file mode 100644 index 00000000000..e44309192a9 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/watchdog/KyuubiWatchDogException.scala @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql.watchdog + +import org.apache.kyuubi.sql.KyuubiSQLExtensionException + +final class MaxPartitionExceedException( + private val reason: String = "", + private val cause: Throwable = None.orNull) + extends KyuubiSQLExtensionException(reason, cause) + +final class MaxFileSizeExceedException( + private val reason: String = "", + private val cause: Throwable = None.orNull) + extends KyuubiSQLExtensionException(reason, cause) diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/watchdog/MaxScanStrategy.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/watchdog/MaxScanStrategy.scala new file mode 100644 index 00000000000..1ed55ebc2fd --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/watchdog/MaxScanStrategy.scala @@ -0,0 +1,305 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql.watchdog + +import org.apache.hadoop.fs.Path +import org.apache.spark.sql.{PruneFileSourcePartitionHelper, SparkSession, Strategy} +import org.apache.spark.sql.catalyst.SQLConfHelper +import org.apache.spark.sql.catalyst.catalog.{CatalogTable, HiveTableRelation} +import org.apache.spark.sql.catalyst.planning.ScanOperation +import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan +import org.apache.spark.sql.execution.SparkPlan +import org.apache.spark.sql.execution.datasources.{CatalogFileIndex, HadoopFsRelation, InMemoryFileIndex, LogicalRelation} +import org.apache.spark.sql.types.StructType + +import org.apache.kyuubi.sql.KyuubiSQLConf + +/** + * Add MaxScanStrategy to avoid scan excessive partitions or files + * 1. Check if scan exceed maxPartition of partitioned table + * 2. Check if scan exceed maxFileSize (calculated by hive table and partition statistics) + * This Strategy Add Planner Strategy after LogicalOptimizer + * @param session + */ +case class MaxScanStrategy(session: SparkSession) + extends Strategy + with SQLConfHelper + with PruneFileSourcePartitionHelper { + override def apply(plan: LogicalPlan): Seq[SparkPlan] = { + val maxScanPartitionsOpt = conf.getConf(KyuubiSQLConf.WATCHDOG_MAX_PARTITIONS) + val maxFileSizeOpt = conf.getConf(KyuubiSQLConf.WATCHDOG_MAX_FILE_SIZE) + if (maxScanPartitionsOpt.isDefined || maxFileSizeOpt.isDefined) { + checkScan(plan, maxScanPartitionsOpt, maxFileSizeOpt) + } + Nil + } + + private def checkScan( + plan: LogicalPlan, + maxScanPartitionsOpt: Option[Int], + maxFileSizeOpt: Option[Long]): Unit = { + plan match { + case ScanOperation(_, _, _, relation: HiveTableRelation) => + if (relation.isPartitioned) { + relation.prunedPartitions match { + case Some(prunedPartitions) => + if (maxScanPartitionsOpt.exists(_ < prunedPartitions.size)) { + throw new MaxPartitionExceedException( + s""" + |SQL job scan hive partition: ${prunedPartitions.size} + |exceed restrict of hive scan maxPartition ${maxScanPartitionsOpt.get} + |You should optimize your SQL logical according partition structure + |or shorten query scope such as p_date, detail as below: + |Table: ${relation.tableMeta.qualifiedName} + |Owner: ${relation.tableMeta.owner} + |Partition Structure: ${relation.partitionCols.map(_.name).mkString(", ")} + |""".stripMargin) + } + lazy val scanFileSize = prunedPartitions.flatMap(_.stats).map(_.sizeInBytes).sum + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw partTableMaxFileExceedError( + scanFileSize, + maxFileSizeOpt.get, + Some(relation.tableMeta), + prunedPartitions.flatMap(_.storage.locationUri).map(_.toString), + relation.partitionCols.map(_.name)) + } + case _ => + lazy val scanPartitions: Int = session + .sessionState.catalog.externalCatalog.listPartitionNames( + relation.tableMeta.database, + relation.tableMeta.identifier.table).size + if (maxScanPartitionsOpt.exists(_ < scanPartitions)) { + throw new MaxPartitionExceedException( + s""" + |Your SQL job scan a whole huge table without any partition filter, + |You should optimize your SQL logical according partition structure + |or shorten query scope such as p_date, detail as below: + |Table: ${relation.tableMeta.qualifiedName} + |Owner: ${relation.tableMeta.owner} + |Partition Structure: ${relation.partitionCols.map(_.name).mkString(", ")} + |""".stripMargin) + } + + lazy val scanFileSize: BigInt = + relation.tableMeta.stats.map(_.sizeInBytes).getOrElse { + session + .sessionState.catalog.externalCatalog.listPartitions( + relation.tableMeta.database, + relation.tableMeta.identifier.table).flatMap(_.stats).map(_.sizeInBytes).sum + } + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw new MaxFileSizeExceedException( + s""" + |Your SQL job scan a whole huge table without any partition filter, + |You should optimize your SQL logical according partition structure + |or shorten query scope such as p_date, detail as below: + |Table: ${relation.tableMeta.qualifiedName} + |Owner: ${relation.tableMeta.owner} + |Partition Structure: ${relation.partitionCols.map(_.name).mkString(", ")} + |""".stripMargin) + } + } + } else { + lazy val scanFileSize = relation.tableMeta.stats.map(_.sizeInBytes).sum + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw nonPartTableMaxFileExceedError( + scanFileSize, + maxFileSizeOpt.get, + Some(relation.tableMeta)) + } + } + case ScanOperation( + _, + _, + filters, + relation @ LogicalRelation( + fsRelation @ HadoopFsRelation( + fileIndex: InMemoryFileIndex, + partitionSchema, + _, + _, + _, + _), + _, + _, + _)) => + if (fsRelation.partitionSchema.nonEmpty) { + val (partitionKeyFilters, dataFilter) = + getPartitionKeyFiltersAndDataFilters( + SparkSession.active, + relation, + partitionSchema, + filters, + relation.output) + val prunedPartitions = fileIndex.listFiles( + partitionKeyFilters.toSeq, + dataFilter) + if (maxScanPartitionsOpt.exists(_ < prunedPartitions.size)) { + throw maxPartitionExceedError( + prunedPartitions.size, + maxScanPartitionsOpt.get, + relation.catalogTable, + fileIndex.rootPaths, + fsRelation.partitionSchema) + } + lazy val scanFileSize = prunedPartitions.flatMap(_.files).map(_.getLen).sum + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw partTableMaxFileExceedError( + scanFileSize, + maxFileSizeOpt.get, + relation.catalogTable, + fileIndex.rootPaths.map(_.toString), + fsRelation.partitionSchema.map(_.name)) + } + } else { + lazy val scanFileSize = fileIndex.sizeInBytes + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw nonPartTableMaxFileExceedError( + scanFileSize, + maxFileSizeOpt.get, + relation.catalogTable) + } + } + case ScanOperation( + _, + _, + filters, + logicalRelation @ LogicalRelation( + fsRelation @ HadoopFsRelation( + catalogFileIndex: CatalogFileIndex, + partitionSchema, + _, + _, + _, + _), + _, + _, + _)) => + if (fsRelation.partitionSchema.nonEmpty) { + val (partitionKeyFilters, _) = + getPartitionKeyFiltersAndDataFilters( + SparkSession.active, + logicalRelation, + partitionSchema, + filters, + logicalRelation.output) + + val fileIndex = catalogFileIndex.filterPartitions( + partitionKeyFilters.toSeq) + + lazy val prunedPartitionSize = fileIndex.partitionSpec().partitions.size + if (maxScanPartitionsOpt.exists(_ < prunedPartitionSize)) { + throw maxPartitionExceedError( + prunedPartitionSize, + maxScanPartitionsOpt.get, + logicalRelation.catalogTable, + catalogFileIndex.rootPaths, + fsRelation.partitionSchema) + } + + lazy val scanFileSize = fileIndex + .listFiles(Nil, Nil).flatMap(_.files).map(_.getLen).sum + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw partTableMaxFileExceedError( + scanFileSize, + maxFileSizeOpt.get, + logicalRelation.catalogTable, + catalogFileIndex.rootPaths.map(_.toString), + fsRelation.partitionSchema.map(_.name)) + } + } else { + lazy val scanFileSize = catalogFileIndex.sizeInBytes + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw nonPartTableMaxFileExceedError( + scanFileSize, + maxFileSizeOpt.get, + logicalRelation.catalogTable) + } + } + case _ => + } + } + + def maxPartitionExceedError( + prunedPartitionSize: Int, + maxPartitionSize: Int, + tableMeta: Option[CatalogTable], + rootPaths: Seq[Path], + partitionSchema: StructType): Throwable = { + val truncatedPaths = + if (rootPaths.length > 5) { + rootPaths.slice(0, 5).mkString(",") + """... """ + (rootPaths.length - 5) + " more paths" + } else { + rootPaths.mkString(",") + } + + new MaxPartitionExceedException( + s""" + |SQL job scan data source partition: $prunedPartitionSize + |exceed restrict of data source scan maxPartition $maxPartitionSize + |You should optimize your SQL logical according partition structure + |or shorten query scope such as p_date, detail as below: + |Table: ${tableMeta.map(_.qualifiedName).getOrElse("")} + |Owner: ${tableMeta.map(_.owner).getOrElse("")} + |RootPaths: $truncatedPaths + |Partition Structure: ${partitionSchema.map(_.name).mkString(", ")} + |""".stripMargin) + } + + private def partTableMaxFileExceedError( + scanFileSize: Number, + maxFileSize: Long, + tableMeta: Option[CatalogTable], + rootPaths: Seq[String], + partitions: Seq[String]): Throwable = { + val truncatedPaths = + if (rootPaths.length > 5) { + rootPaths.slice(0, 5).mkString(",") + """... """ + (rootPaths.length - 5) + " more paths" + } else { + rootPaths.mkString(",") + } + + new MaxFileSizeExceedException( + s""" + |SQL job scan file size in bytes: $scanFileSize + |exceed restrict of table scan maxFileSize $maxFileSize + |You should optimize your SQL logical according partition structure + |or shorten query scope such as p_date, detail as below: + |Table: ${tableMeta.map(_.qualifiedName).getOrElse("")} + |Owner: ${tableMeta.map(_.owner).getOrElse("")} + |RootPaths: $truncatedPaths + |Partition Structure: ${partitions.mkString(", ")} + |""".stripMargin) + } + + private def nonPartTableMaxFileExceedError( + scanFileSize: Number, + maxFileSize: Long, + tableMeta: Option[CatalogTable]): Throwable = { + new MaxFileSizeExceedException( + s""" + |SQL job scan file size in bytes: $scanFileSize + |exceed restrict of table scan maxFileSize $maxFileSize + |detail as below: + |Table: ${tableMeta.map(_.qualifiedName).getOrElse("")} + |Owner: ${tableMeta.map(_.owner).getOrElse("")} + |Location: ${tableMeta.map(_.location).getOrElse("")} + |""".stripMargin) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/InsertZorderBeforeWriting.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/InsertZorderBeforeWriting.scala new file mode 100644 index 00000000000..b3f98ec6d7f --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/InsertZorderBeforeWriting.scala @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql.zorder + +import org.apache.spark.sql.SparkSession +import org.apache.spark.sql.catalyst.catalog.CatalogTable +import org.apache.spark.sql.catalyst.expressions.{Ascending, Attribute, Expression, NullsLast, SortOrder} +import org.apache.spark.sql.catalyst.plans.logical._ +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand +import org.apache.spark.sql.hive.execution.InsertIntoHiveTable + +import org.apache.kyuubi.sql.{KyuubiSQLConf, KyuubiSQLExtensionException} + +trait InsertZorderHelper33 extends Rule[LogicalPlan] with ZorderBuilder { + private val KYUUBI_ZORDER_ENABLED = "kyuubi.zorder.enabled" + private val KYUUBI_ZORDER_COLS = "kyuubi.zorder.cols" + + def isZorderEnabled(props: Map[String, String]): Boolean = { + props.contains(KYUUBI_ZORDER_ENABLED) && + "true".equalsIgnoreCase(props(KYUUBI_ZORDER_ENABLED)) && + props.contains(KYUUBI_ZORDER_COLS) + } + + def getZorderColumns(props: Map[String, String]): Seq[String] = { + val cols = props.get(KYUUBI_ZORDER_COLS) + assert(cols.isDefined) + cols.get.split(",").map(_.trim) + } + + def canInsertZorder(query: LogicalPlan): Boolean = query match { + case Project(_, child) => canInsertZorder(child) + // TODO: actually, we can force zorder even if existed some shuffle + case _: Sort => false + case _: RepartitionByExpression => false + case _: Repartition => false + case _ => true + } + + def insertZorder( + catalogTable: CatalogTable, + plan: LogicalPlan, + dynamicPartitionColumns: Seq[Attribute]): LogicalPlan = { + if (!canInsertZorder(plan)) { + return plan + } + val cols = getZorderColumns(catalogTable.properties) + val resolver = session.sessionState.conf.resolver + val output = plan.output + val bound = cols.flatMap(col => output.find(attr => resolver(attr.name, col))) + if (bound.size < cols.size) { + logWarning(s"target table does not contain all zorder cols: ${cols.mkString(",")}, " + + s"please check your table properties ${KYUUBI_ZORDER_COLS}.") + plan + } else { + if (conf.getConf(KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED) && + conf.getConf(KyuubiSQLConf.REBALANCE_BEFORE_ZORDER)) { + throw new KyuubiSQLExtensionException(s"${KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED.key} " + + s"and ${KyuubiSQLConf.REBALANCE_BEFORE_ZORDER.key} can not be enabled together.") + } + if (conf.getConf(KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED) && + dynamicPartitionColumns.nonEmpty) { + logWarning(s"Dynamic partition insertion with global sort may produce small files.") + } + + val zorderExpr = + if (bound.length == 1) { + bound + } else if (conf.getConf(KyuubiSQLConf.ZORDER_USING_ORIGINAL_ORDERING_ENABLED)) { + bound.asInstanceOf[Seq[Expression]] + } else { + buildZorder(bound) :: Nil + } + val (global, orderExprs, child) = + if (conf.getConf(KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED)) { + (true, zorderExpr, plan) + } else if (conf.getConf(KyuubiSQLConf.REBALANCE_BEFORE_ZORDER)) { + val rebalanceExpr = + if (dynamicPartitionColumns.isEmpty) { + // static partition insert + bound + } else if (conf.getConf(KyuubiSQLConf.REBALANCE_ZORDER_COLUMNS_ENABLED)) { + // improve data compression ratio + dynamicPartitionColumns.asInstanceOf[Seq[Expression]] ++ bound + } else { + dynamicPartitionColumns.asInstanceOf[Seq[Expression]] + } + // for dynamic partition insert, Spark always sort the partition columns, + // so here we sort partition columns + zorder. + val rebalance = + if (dynamicPartitionColumns.nonEmpty && + conf.getConf(KyuubiSQLConf.TWO_PHASE_REBALANCE_BEFORE_ZORDER)) { + // improve compression ratio + RebalancePartitions( + rebalanceExpr, + RebalancePartitions(dynamicPartitionColumns, plan)) + } else { + RebalancePartitions(rebalanceExpr, plan) + } + (false, dynamicPartitionColumns.asInstanceOf[Seq[Expression]] ++ zorderExpr, rebalance) + } else { + (false, zorderExpr, plan) + } + val order = orderExprs.map { expr => + SortOrder(expr, Ascending, NullsLast, Seq.empty) + } + Sort(order, global, child) + } + } + + override def buildZorder(children: Seq[Expression]): ZorderBase = Zorder(children) + + def session: SparkSession + def applyInternal(plan: LogicalPlan): LogicalPlan + + final override def apply(plan: LogicalPlan): LogicalPlan = { + if (conf.getConf(KyuubiSQLConf.INSERT_ZORDER_BEFORE_WRITING)) { + applyInternal(plan) + } else { + plan + } + } +} + +case class InsertZorderBeforeWritingDatasource33(session: SparkSession) + extends InsertZorderHelper33 { + override def applyInternal(plan: LogicalPlan): LogicalPlan = plan match { + case insert: InsertIntoHadoopFsRelationCommand + if insert.query.resolved && + insert.bucketSpec.isEmpty && insert.catalogTable.isDefined && + isZorderEnabled(insert.catalogTable.get.properties) => + val dynamicPartition = + insert.partitionColumns.filterNot(attr => insert.staticPartitions.contains(attr.name)) + val newQuery = insertZorder(insert.catalogTable.get, insert.query, dynamicPartition) + if (newQuery.eq(insert.query)) { + insert + } else { + insert.copy(query = newQuery) + } + + case _ => plan + } +} + +case class InsertZorderBeforeWritingHive33(session: SparkSession) + extends InsertZorderHelper33 { + override def applyInternal(plan: LogicalPlan): LogicalPlan = plan match { + case insert: InsertIntoHiveTable + if insert.query.resolved && + insert.table.bucketSpec.isEmpty && isZorderEnabled(insert.table.properties) => + val dynamicPartition = insert.partition.filter(_._2.isEmpty).keys + .flatMap(name => insert.query.output.find(_.name == name)).toSeq + val newQuery = insertZorder(insert.table, insert.query, dynamicPartition) + if (newQuery.eq(insert.query)) { + insert + } else { + insert.copy(query = newQuery) + } + + case _ => plan + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/InsertZorderBeforeWritingBase.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/InsertZorderBeforeWritingBase.scala new file mode 100644 index 00000000000..2c59d148e98 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/InsertZorderBeforeWritingBase.scala @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql.zorder + +import java.util.Locale + +import org.apache.spark.sql.SparkSession +import org.apache.spark.sql.catalyst.catalog.CatalogTable +import org.apache.spark.sql.catalyst.expressions.{Ascending, Expression, NullsLast, SortOrder} +import org.apache.spark.sql.catalyst.plans.logical._ +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand +import org.apache.spark.sql.hive.execution.InsertIntoHiveTable + +import org.apache.kyuubi.sql.KyuubiSQLConf + +/** + * TODO: shall we forbid zorder if it's dynamic partition inserts ? + * Insert zorder before writing datasource if the target table properties has zorder properties + */ +abstract class InsertZorderBeforeWritingDatasourceBase + extends InsertZorderHelper { + override def applyInternal(plan: LogicalPlan): LogicalPlan = plan match { + case insert: InsertIntoHadoopFsRelationCommand + if insert.query.resolved && insert.bucketSpec.isEmpty && insert.catalogTable.isDefined && + isZorderEnabled(insert.catalogTable.get.properties) => + val newQuery = insertZorder(insert.catalogTable.get, insert.query) + if (newQuery.eq(insert.query)) { + insert + } else { + insert.copy(query = newQuery) + } + case _ => plan + } +} + +/** + * TODO: shall we forbid zorder if it's dynamic partition inserts ? + * Insert zorder before writing hive if the target table properties has zorder properties + */ +abstract class InsertZorderBeforeWritingHiveBase + extends InsertZorderHelper { + override def applyInternal(plan: LogicalPlan): LogicalPlan = plan match { + case insert: InsertIntoHiveTable + if insert.query.resolved && insert.table.bucketSpec.isEmpty && + isZorderEnabled(insert.table.properties) => + val newQuery = insertZorder(insert.table, insert.query) + if (newQuery.eq(insert.query)) { + insert + } else { + insert.copy(query = newQuery) + } + case _ => plan + } +} + +trait ZorderBuilder { + def buildZorder(children: Seq[Expression]): ZorderBase +} + +trait InsertZorderHelper extends Rule[LogicalPlan] with ZorderBuilder { + private val KYUUBI_ZORDER_ENABLED = "kyuubi.zorder.enabled" + private val KYUUBI_ZORDER_COLS = "kyuubi.zorder.cols" + + def isZorderEnabled(props: Map[String, String]): Boolean = { + props.contains(KYUUBI_ZORDER_ENABLED) && + "true".equalsIgnoreCase(props(KYUUBI_ZORDER_ENABLED)) && + props.contains(KYUUBI_ZORDER_COLS) + } + + def getZorderColumns(props: Map[String, String]): Seq[String] = { + val cols = props.get(KYUUBI_ZORDER_COLS) + assert(cols.isDefined) + cols.get.split(",").map(_.trim.toLowerCase(Locale.ROOT)) + } + + def canInsertZorder(query: LogicalPlan): Boolean = query match { + case Project(_, child) => canInsertZorder(child) + // TODO: actually, we can force zorder even if existed some shuffle + case _: Sort => false + case _: RepartitionByExpression => false + case _: Repartition => false + case _ => true + } + + def insertZorder(catalogTable: CatalogTable, plan: LogicalPlan): LogicalPlan = { + if (!canInsertZorder(plan)) { + return plan + } + val cols = getZorderColumns(catalogTable.properties) + val attrs = plan.output.map(attr => (attr.name, attr)).toMap + if (cols.exists(!attrs.contains(_))) { + logWarning(s"target table does not contain all zorder cols: ${cols.mkString(",")}, " + + s"please check your table properties ${KYUUBI_ZORDER_COLS}.") + plan + } else { + val bound = cols.map(attrs(_)) + val orderExpr = + if (bound.length == 1) { + bound.head + } else { + buildZorder(bound) + } + // TODO: We can do rebalance partitions before local sort of zorder after SPARK 3.3 + // see https://github.com/apache/spark/pull/34542 + Sort( + SortOrder(orderExpr, Ascending, NullsLast, Seq.empty) :: Nil, + conf.getConf(KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED), + plan) + } + } + + def applyInternal(plan: LogicalPlan): LogicalPlan + + final override def apply(plan: LogicalPlan): LogicalPlan = { + if (conf.getConf(KyuubiSQLConf.INSERT_ZORDER_BEFORE_WRITING)) { + applyInternal(plan) + } else { + plan + } + } +} + +/** + * TODO: shall we forbid zorder if it's dynamic partition inserts ? + * Insert zorder before writing datasource if the target table properties has zorder properties + */ +case class InsertZorderBeforeWritingDatasource(session: SparkSession) + extends InsertZorderBeforeWritingDatasourceBase { + override def buildZorder(children: Seq[Expression]): ZorderBase = Zorder(children) +} + +/** + * TODO: shall we forbid zorder if it's dynamic partition inserts ? + * Insert zorder before writing hive if the target table properties has zorder properties + */ +case class InsertZorderBeforeWritingHive(session: SparkSession) + extends InsertZorderBeforeWritingHiveBase { + override def buildZorder(children: Seq[Expression]): ZorderBase = Zorder(children) +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderCommandBase.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderCommandBase.scala new file mode 100644 index 00000000000..21d1cf2a25b --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderCommandBase.scala @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql.zorder + +import org.apache.spark.sql.{Row, SparkSession} +import org.apache.spark.sql.catalyst.catalog.CatalogTable +import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan +import org.apache.spark.sql.execution.SparkPlan +import org.apache.spark.sql.execution.command.DataWritingCommand +import org.apache.spark.sql.hive.execution.InsertIntoHiveTable + +import org.apache.kyuubi.sql.KyuubiSQLExtensionException + +/** + * A runnable command for zorder, we delegate to real command to execute + */ +abstract class OptimizeZorderCommandBase extends DataWritingCommand { + def catalogTable: CatalogTable + + override def outputColumnNames: Seq[String] = query.output.map(_.name) + + private def isHiveTable: Boolean = { + catalogTable.provider.isEmpty || + (catalogTable.provider.isDefined && "hive".equalsIgnoreCase(catalogTable.provider.get)) + } + + private def getWritingCommand(session: SparkSession): DataWritingCommand = { + // TODO: Support convert hive relation to datasource relation, can see + // [[org.apache.spark.sql.hive.RelationConversions]] + InsertIntoHiveTable( + catalogTable, + catalogTable.partitionColumnNames.map(p => (p, None)).toMap, + query, + overwrite = true, + ifPartitionNotExists = false, + outputColumnNames) + } + + override def run(session: SparkSession, child: SparkPlan): Seq[Row] = { + // TODO: Support datasource relation + // TODO: Support read and insert overwrite the same table for some table format + if (!isHiveTable) { + throw new KyuubiSQLExtensionException("only support hive table") + } + + val command = getWritingCommand(session) + command.run(session, child) + DataWritingCommand.propogateMetrics(session.sparkContext, command, metrics) + Seq.empty + } +} + +/** + * A runnable command for zorder, we delegate to real command to execute + */ +case class OptimizeZorderCommand( + catalogTable: CatalogTable, + query: LogicalPlan) + extends OptimizeZorderCommandBase { + protected def withNewChildInternal(newChild: LogicalPlan): LogicalPlan = { + copy(query = newChild) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderStatementBase.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderStatementBase.scala new file mode 100644 index 00000000000..895f9e24be3 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderStatementBase.scala @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql.zorder + +import org.apache.spark.sql.catalyst.expressions.Attribute +import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, UnaryNode} + +/** + * A zorder statement that contains we parsed from SQL. + * We should convert this plan to certain command at Analyzer. + */ +case class OptimizeZorderStatement( + tableIdentifier: Seq[String], + query: LogicalPlan) extends UnaryNode { + override def child: LogicalPlan = query + override def output: Seq[Attribute] = child.output + protected def withNewChildInternal(newChild: LogicalPlan): LogicalPlan = + copy(query = newChild) +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/ResolveZorderBase.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/ResolveZorderBase.scala new file mode 100644 index 00000000000..9f735caa7a7 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/ResolveZorderBase.scala @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql.zorder + +import org.apache.spark.sql.SparkSession +import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.catalyst.catalog.{CatalogTable, HiveTableRelation} +import org.apache.spark.sql.catalyst.expressions.AttributeSet +import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, SubqueryAlias} +import org.apache.spark.sql.catalyst.rules.Rule + +import org.apache.kyuubi.sql.KyuubiSQLExtensionException + +/** + * Resolve `OptimizeZorderStatement` to `OptimizeZorderCommand` + */ +abstract class ResolveZorderBase extends Rule[LogicalPlan] { + def session: SparkSession + def buildOptimizeZorderCommand( + catalogTable: CatalogTable, + query: LogicalPlan): OptimizeZorderCommandBase + + protected def checkQueryAllowed(query: LogicalPlan): Unit = query foreach { + case Filter(condition, SubqueryAlias(_, tableRelation: HiveTableRelation)) => + if (tableRelation.partitionCols.isEmpty) { + throw new KyuubiSQLExtensionException("Filters are only supported for partitioned table") + } + + val partitionKeyIds = AttributeSet(tableRelation.partitionCols) + if (condition.references.isEmpty || !condition.references.subsetOf(partitionKeyIds)) { + throw new KyuubiSQLExtensionException("Only partition column filters are allowed") + } + + case _ => + } + + protected def getTableIdentifier(tableIdent: Seq[String]): TableIdentifier = tableIdent match { + case Seq(tbl) => TableIdentifier.apply(tbl) + case Seq(db, tbl) => TableIdentifier.apply(tbl, Some(db)) + case _ => throw new KyuubiSQLExtensionException( + "only support session catalog table, please use db.table instead") + } + + override def apply(plan: LogicalPlan): LogicalPlan = plan match { + case statement: OptimizeZorderStatement if statement.query.resolved => + checkQueryAllowed(statement.query) + val tableIdentifier = getTableIdentifier(statement.tableIdentifier) + val catalogTable = session.sessionState.catalog.getTableMetadata(tableIdentifier) + buildOptimizeZorderCommand(catalogTable, statement.query) + + case _ => plan + } +} + +/** + * Resolve `OptimizeZorderStatement` to `OptimizeZorderCommand` + */ +case class ResolveZorder(session: SparkSession) extends ResolveZorderBase { + override def buildOptimizeZorderCommand( + catalogTable: CatalogTable, + query: LogicalPlan): OptimizeZorderCommandBase = { + OptimizeZorderCommand(catalogTable, query) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/ZorderBase.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/ZorderBase.scala new file mode 100644 index 00000000000..e4d98ccbe84 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/ZorderBase.scala @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql.zorder + +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.analysis.TypeCheckResult +import org.apache.spark.sql.catalyst.expressions.Expression +import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode, FalseLiteral} +import org.apache.spark.sql.catalyst.expressions.codegen.Block._ +import org.apache.spark.sql.types.{BinaryType, DataType} + +import org.apache.kyuubi.sql.KyuubiSQLExtensionException + +abstract class ZorderBase extends Expression { + override def foldable: Boolean = children.forall(_.foldable) + override def nullable: Boolean = false + override def dataType: DataType = BinaryType + override def prettyName: String = "zorder" + + override def checkInputDataTypes(): TypeCheckResult = { + try { + defaultNullValues + TypeCheckResult.TypeCheckSuccess + } catch { + case e: KyuubiSQLExtensionException => + TypeCheckResult.TypeCheckFailure(e.getMessage) + } + } + + @transient + private[this] lazy val defaultNullValues: Array[Any] = + children.map(_.dataType) + .map(ZorderBytesUtils.defaultValue) + .toArray + + override def eval(input: InternalRow): Any = { + val childrenValues = children.zipWithIndex.map { + case (child: Expression, index) => + val v = child.eval(input) + if (v == null) { + defaultNullValues(index) + } else { + v + } + } + ZorderBytesUtils.interleaveBits(childrenValues.toArray) + } + + override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { + val evals = children.map(_.genCode(ctx)) + val defaultValues = ctx.addReferenceObj("defaultValues", defaultNullValues) + val values = ctx.freshName("values") + val util = ZorderBytesUtils.getClass.getName.stripSuffix("$") + val inputs = evals.zipWithIndex.map { + case (eval, index) => + s""" + |${eval.code} + |if (${eval.isNull}) { + | $values[$index] = $defaultValues[$index]; + |} else { + | $values[$index] = ${eval.value}; + |} + |""".stripMargin + } + ev.copy( + code = + code""" + |byte[] ${ev.value} = null; + |Object[] $values = new Object[${evals.length}]; + |${inputs.mkString("\n")} + |${ev.value} = $util.interleaveBits($values); + |""".stripMargin, + isNull = FalseLiteral) + } +} + +case class Zorder(children: Seq[Expression]) extends ZorderBase { + protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): Expression = + copy(children = newChildren) +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/ZorderBytesUtils.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/ZorderBytesUtils.scala new file mode 100644 index 00000000000..d249f1dc32f --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/kyuubi/sql/zorder/ZorderBytesUtils.scala @@ -0,0 +1,517 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql.zorder + +import java.lang.{Double => jDouble, Float => jFloat} + +import org.apache.spark.sql.types._ +import org.apache.spark.unsafe.types.UTF8String + +import org.apache.kyuubi.sql.KyuubiSQLExtensionException + +object ZorderBytesUtils { + final private val BIT_8_MASK = 1 << 7 + final private val BIT_16_MASK = 1 << 15 + final private val BIT_32_MASK = 1 << 31 + final private val BIT_64_MASK = 1L << 63 + + def interleaveBits(inputs: Array[Any]): Array[Byte] = { + inputs.length match { + // it's a more fast approach, use O(8 * 8) + // can see http://graphics.stanford.edu/~seander/bithacks.html#InterleaveTableObvious + case 1 => longToByte(toLong(inputs(0))) + case 2 => interleave2Longs(toLong(inputs(0)), toLong(inputs(1))) + case 3 => interleave3Longs(toLong(inputs(0)), toLong(inputs(1)), toLong(inputs(2))) + case 4 => + interleave4Longs(toLong(inputs(0)), toLong(inputs(1)), toLong(inputs(2)), toLong(inputs(3))) + case 5 => interleave5Longs( + toLong(inputs(0)), + toLong(inputs(1)), + toLong(inputs(2)), + toLong(inputs(3)), + toLong(inputs(4))) + case 6 => interleave6Longs( + toLong(inputs(0)), + toLong(inputs(1)), + toLong(inputs(2)), + toLong(inputs(3)), + toLong(inputs(4)), + toLong(inputs(5))) + case 7 => interleave7Longs( + toLong(inputs(0)), + toLong(inputs(1)), + toLong(inputs(2)), + toLong(inputs(3)), + toLong(inputs(4)), + toLong(inputs(5)), + toLong(inputs(6))) + case 8 => interleave8Longs( + toLong(inputs(0)), + toLong(inputs(1)), + toLong(inputs(2)), + toLong(inputs(3)), + toLong(inputs(4)), + toLong(inputs(5)), + toLong(inputs(6)), + toLong(inputs(7))) + + case _ => + // it's the default approach, use O(64 * n), n is the length of inputs + interleaveBitsDefault(inputs.map(toByteArray)) + } + } + + private def interleave2Longs(l1: Long, l2: Long): Array[Byte] = { + // output 8 * 16 bits + val result = new Array[Byte](16) + var i = 0 + while (i < 8) { + val tmp1 = ((l1 >> (i * 8)) & 0xFF).toShort + val tmp2 = ((l2 >> (i * 8)) & 0xFF).toShort + + var z = 0 + var j = 0 + while (j < 8) { + val x_masked = tmp1 & (1 << j) + val y_masked = tmp2 & (1 << j) + z |= (x_masked << j) + z |= (y_masked << (j + 1)) + j = j + 1 + } + result((7 - i) * 2 + 1) = (z & 0xFF).toByte + result((7 - i) * 2) = ((z >> 8) & 0xFF).toByte + i = i + 1 + } + result + } + + private def interleave3Longs(l1: Long, l2: Long, l3: Long): Array[Byte] = { + // output 8 * 24 bits + val result = new Array[Byte](24) + var i = 0 + while (i < 8) { + val tmp1 = ((l1 >> (i * 8)) & 0xFF).toInt + val tmp2 = ((l2 >> (i * 8)) & 0xFF).toInt + val tmp3 = ((l3 >> (i * 8)) & 0xFF).toInt + + var z = 0 + var j = 0 + while (j < 8) { + val r1_mask = tmp1 & (1 << j) + val r2_mask = tmp2 & (1 << j) + val r3_mask = tmp3 & (1 << j) + z |= (r1_mask << (2 * j)) | (r2_mask << (2 * j + 1)) | (r3_mask << (2 * j + 2)) + j = j + 1 + } + result((7 - i) * 3 + 2) = (z & 0xFF).toByte + result((7 - i) * 3 + 1) = ((z >> 8) & 0xFF).toByte + result((7 - i) * 3) = ((z >> 16) & 0xFF).toByte + i = i + 1 + } + result + } + + private def interleave4Longs(l1: Long, l2: Long, l3: Long, l4: Long): Array[Byte] = { + // output 8 * 32 bits + val result = new Array[Byte](32) + var i = 0 + while (i < 8) { + val tmp1 = ((l1 >> (i * 8)) & 0xFF).toInt + val tmp2 = ((l2 >> (i * 8)) & 0xFF).toInt + val tmp3 = ((l3 >> (i * 8)) & 0xFF).toInt + val tmp4 = ((l4 >> (i * 8)) & 0xFF).toInt + + var z = 0 + var j = 0 + while (j < 8) { + val r1_mask = tmp1 & (1 << j) + val r2_mask = tmp2 & (1 << j) + val r3_mask = tmp3 & (1 << j) + val r4_mask = tmp4 & (1 << j) + z |= (r1_mask << (3 * j)) | (r2_mask << (3 * j + 1)) | (r3_mask << (3 * j + 2)) | + (r4_mask << (3 * j + 3)) + j = j + 1 + } + result((7 - i) * 4 + 3) = (z & 0xFF).toByte + result((7 - i) * 4 + 2) = ((z >> 8) & 0xFF).toByte + result((7 - i) * 4 + 1) = ((z >> 16) & 0xFF).toByte + result((7 - i) * 4) = ((z >> 24) & 0xFF).toByte + i = i + 1 + } + result + } + + private def interleave5Longs( + l1: Long, + l2: Long, + l3: Long, + l4: Long, + l5: Long): Array[Byte] = { + // output 8 * 40 bits + val result = new Array[Byte](40) + var i = 0 + while (i < 8) { + val tmp1 = ((l1 >> (i * 8)) & 0xFF).toLong + val tmp2 = ((l2 >> (i * 8)) & 0xFF).toLong + val tmp3 = ((l3 >> (i * 8)) & 0xFF).toLong + val tmp4 = ((l4 >> (i * 8)) & 0xFF).toLong + val tmp5 = ((l5 >> (i * 8)) & 0xFF).toLong + + var z = 0L + var j = 0 + while (j < 8) { + val r1_mask = tmp1 & (1 << j) + val r2_mask = tmp2 & (1 << j) + val r3_mask = tmp3 & (1 << j) + val r4_mask = tmp4 & (1 << j) + val r5_mask = tmp5 & (1 << j) + z |= (r1_mask << (4 * j)) | (r2_mask << (4 * j + 1)) | (r3_mask << (4 * j + 2)) | + (r4_mask << (4 * j + 3)) | (r5_mask << (4 * j + 4)) + j = j + 1 + } + result((7 - i) * 5 + 4) = (z & 0xFF).toByte + result((7 - i) * 5 + 3) = ((z >> 8) & 0xFF).toByte + result((7 - i) * 5 + 2) = ((z >> 16) & 0xFF).toByte + result((7 - i) * 5 + 1) = ((z >> 24) & 0xFF).toByte + result((7 - i) * 5) = ((z >> 32) & 0xFF).toByte + i = i + 1 + } + result + } + + private def interleave6Longs( + l1: Long, + l2: Long, + l3: Long, + l4: Long, + l5: Long, + l6: Long): Array[Byte] = { + // output 8 * 48 bits + val result = new Array[Byte](48) + var i = 0 + while (i < 8) { + val tmp1 = ((l1 >> (i * 8)) & 0xFF).toLong + val tmp2 = ((l2 >> (i * 8)) & 0xFF).toLong + val tmp3 = ((l3 >> (i * 8)) & 0xFF).toLong + val tmp4 = ((l4 >> (i * 8)) & 0xFF).toLong + val tmp5 = ((l5 >> (i * 8)) & 0xFF).toLong + val tmp6 = ((l6 >> (i * 8)) & 0xFF).toLong + + var z = 0L + var j = 0 + while (j < 8) { + val r1_mask = tmp1 & (1 << j) + val r2_mask = tmp2 & (1 << j) + val r3_mask = tmp3 & (1 << j) + val r4_mask = tmp4 & (1 << j) + val r5_mask = tmp5 & (1 << j) + val r6_mask = tmp6 & (1 << j) + z |= (r1_mask << (5 * j)) | (r2_mask << (5 * j + 1)) | (r3_mask << (5 * j + 2)) | + (r4_mask << (5 * j + 3)) | (r5_mask << (5 * j + 4)) | (r6_mask << (5 * j + 5)) + j = j + 1 + } + result((7 - i) * 6 + 5) = (z & 0xFF).toByte + result((7 - i) * 6 + 4) = ((z >> 8) & 0xFF).toByte + result((7 - i) * 6 + 3) = ((z >> 16) & 0xFF).toByte + result((7 - i) * 6 + 2) = ((z >> 24) & 0xFF).toByte + result((7 - i) * 6 + 1) = ((z >> 32) & 0xFF).toByte + result((7 - i) * 6) = ((z >> 40) & 0xFF).toByte + i = i + 1 + } + result + } + + private def interleave7Longs( + l1: Long, + l2: Long, + l3: Long, + l4: Long, + l5: Long, + l6: Long, + l7: Long): Array[Byte] = { + // output 8 * 56 bits + val result = new Array[Byte](56) + var i = 0 + while (i < 8) { + val tmp1 = ((l1 >> (i * 8)) & 0xFF).toLong + val tmp2 = ((l2 >> (i * 8)) & 0xFF).toLong + val tmp3 = ((l3 >> (i * 8)) & 0xFF).toLong + val tmp4 = ((l4 >> (i * 8)) & 0xFF).toLong + val tmp5 = ((l5 >> (i * 8)) & 0xFF).toLong + val tmp6 = ((l6 >> (i * 8)) & 0xFF).toLong + val tmp7 = ((l7 >> (i * 8)) & 0xFF).toLong + + var z = 0L + var j = 0 + while (j < 8) { + val r1_mask = tmp1 & (1 << j) + val r2_mask = tmp2 & (1 << j) + val r3_mask = tmp3 & (1 << j) + val r4_mask = tmp4 & (1 << j) + val r5_mask = tmp5 & (1 << j) + val r6_mask = tmp6 & (1 << j) + val r7_mask = tmp7 & (1 << j) + z |= (r1_mask << (6 * j)) | (r2_mask << (6 * j + 1)) | (r3_mask << (6 * j + 2)) | + (r4_mask << (6 * j + 3)) | (r5_mask << (6 * j + 4)) | (r6_mask << (6 * j + 5)) | + (r7_mask << (6 * j + 6)) + j = j + 1 + } + result((7 - i) * 7 + 6) = (z & 0xFF).toByte + result((7 - i) * 7 + 5) = ((z >> 8) & 0xFF).toByte + result((7 - i) * 7 + 4) = ((z >> 16) & 0xFF).toByte + result((7 - i) * 7 + 3) = ((z >> 24) & 0xFF).toByte + result((7 - i) * 7 + 2) = ((z >> 32) & 0xFF).toByte + result((7 - i) * 7 + 1) = ((z >> 40) & 0xFF).toByte + result((7 - i) * 7) = ((z >> 48) & 0xFF).toByte + i = i + 1 + } + result + } + + private def interleave8Longs( + l1: Long, + l2: Long, + l3: Long, + l4: Long, + l5: Long, + l6: Long, + l7: Long, + l8: Long): Array[Byte] = { + // output 8 * 64 bits + val result = new Array[Byte](64) + var i = 0 + while (i < 8) { + val tmp1 = ((l1 >> (i * 8)) & 0xFF).toLong + val tmp2 = ((l2 >> (i * 8)) & 0xFF).toLong + val tmp3 = ((l3 >> (i * 8)) & 0xFF).toLong + val tmp4 = ((l4 >> (i * 8)) & 0xFF).toLong + val tmp5 = ((l5 >> (i * 8)) & 0xFF).toLong + val tmp6 = ((l6 >> (i * 8)) & 0xFF).toLong + val tmp7 = ((l7 >> (i * 8)) & 0xFF).toLong + val tmp8 = ((l8 >> (i * 8)) & 0xFF).toLong + + var z = 0L + var j = 0 + while (j < 8) { + val r1_mask = tmp1 & (1 << j) + val r2_mask = tmp2 & (1 << j) + val r3_mask = tmp3 & (1 << j) + val r4_mask = tmp4 & (1 << j) + val r5_mask = tmp5 & (1 << j) + val r6_mask = tmp6 & (1 << j) + val r7_mask = tmp7 & (1 << j) + val r8_mask = tmp8 & (1 << j) + z |= (r1_mask << (7 * j)) | (r2_mask << (7 * j + 1)) | (r3_mask << (7 * j + 2)) | + (r4_mask << (7 * j + 3)) | (r5_mask << (7 * j + 4)) | (r6_mask << (7 * j + 5)) | + (r7_mask << (7 * j + 6)) | (r8_mask << (7 * j + 7)) + j = j + 1 + } + result((7 - i) * 8 + 7) = (z & 0xFF).toByte + result((7 - i) * 8 + 6) = ((z >> 8) & 0xFF).toByte + result((7 - i) * 8 + 5) = ((z >> 16) & 0xFF).toByte + result((7 - i) * 8 + 4) = ((z >> 24) & 0xFF).toByte + result((7 - i) * 8 + 3) = ((z >> 32) & 0xFF).toByte + result((7 - i) * 8 + 2) = ((z >> 40) & 0xFF).toByte + result((7 - i) * 8 + 1) = ((z >> 48) & 0xFF).toByte + result((7 - i) * 8) = ((z >> 56) & 0xFF).toByte + i = i + 1 + } + result + } + + def interleaveBitsDefault(arrays: Array[Array[Byte]]): Array[Byte] = { + var totalLength = 0 + var maxLength = 0 + arrays.foreach { array => + totalLength += array.length + maxLength = maxLength.max(array.length * 8) + } + val result = new Array[Byte](totalLength) + var resultBit = 0 + + var bit = 0 + while (bit < maxLength) { + val bytePos = bit / 8 + val bitPos = bit % 8 + + for (arr <- arrays) { + val len = arr.length + if (bytePos < len) { + val resultBytePos = totalLength - 1 - resultBit / 8 + val resultBitPos = resultBit % 8 + result(resultBytePos) = + updatePos(result(resultBytePos), resultBitPos, arr(len - 1 - bytePos), bitPos) + resultBit += 1 + } + } + bit += 1 + } + result + } + + def updatePos(a: Byte, apos: Int, b: Byte, bpos: Int): Byte = { + var temp = (b & (1 << bpos)).toByte + if (apos > bpos) { + temp = (temp << (apos - bpos)).toByte + } else if (apos < bpos) { + temp = (temp >> (bpos - apos)).toByte + } + val atemp = (a & (1 << apos)).toByte + if (atemp == temp) { + return a + } + (a ^ (1 << apos)).toByte + } + + def toLong(a: Any): Long = { + a match { + case b: Boolean => (if (b) 1 else 0).toLong ^ BIT_64_MASK + case b: Byte => b.toLong ^ BIT_64_MASK + case s: Short => s.toLong ^ BIT_64_MASK + case i: Int => i.toLong ^ BIT_64_MASK + case l: Long => l ^ BIT_64_MASK + case f: Float => java.lang.Float.floatToRawIntBits(f).toLong ^ BIT_64_MASK + case d: Double => java.lang.Double.doubleToRawLongBits(d) ^ BIT_64_MASK + case str: UTF8String => str.getPrefix + case dec: Decimal => dec.toLong ^ BIT_64_MASK + case other: Any => + throw new KyuubiSQLExtensionException("Unsupported z-order type: " + other.getClass) + } + } + + def toByteArray(a: Any): Array[Byte] = { + a match { + case bo: Boolean => + booleanToByte(bo) + case b: Byte => + byteToByte(b) + case s: Short => + shortToByte(s) + case i: Int => + intToByte(i) + case l: Long => + longToByte(l) + case f: Float => + floatToByte(f) + case d: Double => + doubleToByte(d) + case str: UTF8String => + // truncate or padding str to 8 byte + paddingTo8Byte(str.getBytes) + case dec: Decimal => + longToByte(dec.toLong) + case other: Any => + throw new KyuubiSQLExtensionException("Unsupported z-order type: " + other.getClass) + } + } + + def booleanToByte(a: Boolean): Array[Byte] = { + if (a) { + byteToByte(1.toByte) + } else { + byteToByte(0.toByte) + } + } + + def byteToByte(a: Byte): Array[Byte] = { + val tmp = (a ^ BIT_8_MASK).toByte + Array(tmp) + } + + def shortToByte(a: Short): Array[Byte] = { + val tmp = a ^ BIT_16_MASK + Array(((tmp >> 8) & 0xFF).toByte, (tmp & 0xFF).toByte) + } + + def intToByte(a: Int): Array[Byte] = { + val result = new Array[Byte](4) + var i = 0 + val tmp = a ^ BIT_32_MASK + while (i <= 3) { + val offset = i * 8 + result(3 - i) = ((tmp >> offset) & 0xFF).toByte + i += 1 + } + result + } + + def longToByte(a: Long): Array[Byte] = { + val result = new Array[Byte](8) + var i = 0 + val tmp = a ^ BIT_64_MASK + while (i <= 7) { + val offset = i * 8 + result(7 - i) = ((tmp >> offset) & 0xFF).toByte + i += 1 + } + result + } + + def floatToByte(a: Float): Array[Byte] = { + val fi = jFloat.floatToRawIntBits(a) + intToByte(fi) + } + + def doubleToByte(a: Double): Array[Byte] = { + val dl = jDouble.doubleToRawLongBits(a) + longToByte(dl) + } + + def paddingTo8Byte(a: Array[Byte]): Array[Byte] = { + val len = a.length + if (len == 8) { + a + } else if (len > 8) { + val result = new Array[Byte](8) + System.arraycopy(a, 0, result, 0, 8) + result + } else { + val result = new Array[Byte](8) + System.arraycopy(a, 0, result, 8 - len, len) + result + } + } + + def defaultByteArrayValue(dataType: DataType): Array[Byte] = toByteArray { + defaultValue(dataType) + } + + def defaultValue(dataType: DataType): Any = { + dataType match { + case BooleanType => + true + case ByteType => + Byte.MaxValue + case ShortType => + Short.MaxValue + case IntegerType | DateType => + Int.MaxValue + case LongType | TimestampType | _: DecimalType => + Long.MaxValue + case FloatType => + Float.MaxValue + case DoubleType => + Double.MaxValue + case StringType => + // we pad string to 8 bytes so it's equal to long + UTF8String.fromBytes(longToByte(Long.MaxValue)) + case other: Any => + throw new KyuubiSQLExtensionException(s"Unsupported z-order type: ${other.catalogString}") + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala new file mode 100644 index 00000000000..81873476cc4 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala @@ -0,0 +1,289 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import scala.annotation.tailrec +import scala.collection.mutable +import scala.collection.mutable.ArrayBuffer + +import org.apache.spark.{ExecutorAllocationClient, MapOutputTrackerMaster, SparkContext, SparkEnv} +import org.apache.spark.internal.Logging +import org.apache.spark.resource.ResourceProfile +import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.execution.{FilterExec, ProjectExec, SortExec, SparkPlan} +import org.apache.spark.sql.execution.adaptive._ +import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec +import org.apache.spark.sql.execution.command.DataWritingCommandExec +import org.apache.spark.sql.execution.datasources.WriteFilesExec +import org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec +import org.apache.spark.sql.execution.exchange.{ENSURE_REQUIREMENTS, ShuffleExchangeExec} + +import org.apache.kyuubi.sql.{KyuubiSQLConf, WriteUtils} + +/** + * This rule assumes the final write stage has less cores requirement than previous, otherwise + * this rule would take no effect. + * + * It provide a feature: + * 1. Kill redundant executors before running final write stage + */ +case class FinalStageResourceManager(session: SparkSession) + extends Rule[SparkPlan] with FinalRebalanceStageHelper { + override def apply(plan: SparkPlan): SparkPlan = { + if (!conf.getConf(KyuubiSQLConf.FINAL_WRITE_STAGE_EAGERLY_KILL_EXECUTORS_ENABLED)) { + return plan + } + + if (!WriteUtils.isWrite(session, plan)) { + return plan + } + + val sc = session.sparkContext + val dra = sc.getConf.getBoolean("spark.dynamicAllocation.enabled", false) + val coresPerExecutor = sc.getConf.getInt("spark.executor.cores", 1) + val minExecutors = sc.getConf.getInt("spark.dynamicAllocation.minExecutors", 0) + val maxExecutors = sc.getConf.getInt("spark.dynamicAllocation.maxExecutors", Int.MaxValue) + val factor = conf.getConf(KyuubiSQLConf.FINAL_WRITE_STAGE_PARTITION_FACTOR) + val hasImprovementRoom = maxExecutors - 1 > minExecutors * factor + // Fast fail if: + // 1. DRA off + // 2. only work with yarn and k8s + // 3. maxExecutors is not bigger than minExecutors * factor + if (!dra || !sc.schedulerBackend.isInstanceOf[CoarseGrainedSchedulerBackend] || + !hasImprovementRoom) { + return plan + } + + val stageOpt = findFinalRebalanceStage(plan) + if (stageOpt.isEmpty) { + return plan + } + + // It's not safe to kill executors if this plan contains table cache. + // If the executor loses then the rdd would re-compute those partition. + if (hasTableCache(plan) && + conf.getConf(KyuubiSQLConf.FINAL_WRITE_STAGE_SKIP_KILLING_EXECUTORS_FOR_TABLE_CACHE)) { + return plan + } + + // TODO: move this to query stage optimizer when updating Spark to 3.5.x + // Since we are in `prepareQueryStage`, the AQE shuffle read has not been applied. + // So we need to apply it by self. + val shuffleRead = queryStageOptimizerRules.foldLeft(stageOpt.get.asInstanceOf[SparkPlan]) { + case (latest, rule) => rule.apply(latest) + } + val (targetCores, stage) = shuffleRead match { + case AQEShuffleReadExec(stage: ShuffleQueryStageExec, partitionSpecs) => + (partitionSpecs.length, stage) + case stage: ShuffleQueryStageExec => + // we can still kill executors if no AQE shuffle read, e.g., `.repartition(2)` + (stage.shuffle.numPartitions, stage) + case _ => + // it should never happen in current Spark, but to be safe do nothing if happens + logWarning("BUG, Please report to Apache Kyuubi community") + return plan + } + // The condition whether inject custom resource profile: + // - target executors < active executors + // - active executors - target executors > min executors + val numActiveExecutors = sc.getExecutorIds().length + val targetExecutors = (math.ceil(targetCores.toFloat / coresPerExecutor) * factor).toInt + .max(1) + val hasBenefits = targetExecutors < numActiveExecutors && + (numActiveExecutors - targetExecutors) > minExecutors + logInfo(s"The snapshot of current executors view, " + + s"active executors: $numActiveExecutors, min executor: $minExecutors, " + + s"target executors: $targetExecutors, has benefits: $hasBenefits") + if (hasBenefits) { + val shuffleId = stage.plan.asInstanceOf[ShuffleExchangeExec].shuffleDependency.shuffleId + val numReduce = stage.plan.asInstanceOf[ShuffleExchangeExec].numPartitions + // Now, there is only a final rebalance stage waiting to execute and all tasks of previous + // stage are finished. Kill redundant existed executors eagerly so the tasks of final + // stage can be centralized scheduled. + killExecutors(sc, targetExecutors, shuffleId, numReduce) + } + + plan + } + + /** + * The priority of kill executors follow: + * 1. kill executor who is younger than other (The older the JIT works better) + * 2. kill executor who produces less shuffle data first + */ + private def findExecutorToKill( + sc: SparkContext, + targetExecutors: Int, + shuffleId: Int, + numReduce: Int): Seq[String] = { + val tracker = SparkEnv.get.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster] + val shuffleStatusOpt = tracker.shuffleStatuses.get(shuffleId) + if (shuffleStatusOpt.isEmpty) { + return Seq.empty + } + val shuffleStatus = shuffleStatusOpt.get + val executorToBlockSize = new mutable.HashMap[String, Long] + shuffleStatus.withMapStatuses { mapStatus => + mapStatus.foreach { status => + var i = 0 + var sum = 0L + while (i < numReduce) { + sum += status.getSizeForBlock(i) + i += 1 + } + executorToBlockSize.getOrElseUpdate(status.location.executorId, sum) + } + } + + val backend = sc.schedulerBackend.asInstanceOf[CoarseGrainedSchedulerBackend] + val executorsWithRegistrationTs = backend.getExecutorsWithRegistrationTs() + val existedExecutors = executorsWithRegistrationTs.keys.toSet + val expectedNumExecutorToKill = existedExecutors.size - targetExecutors + if (expectedNumExecutorToKill < 1) { + return Seq.empty + } + + val executorIdsToKill = new ArrayBuffer[String]() + // We first kill executor who does not hold shuffle block. It would happen because + // the last stage is running fast and finished in a short time. The existed executors are + // from previous stages that have not been killed by DRA, so we can not find it by tracking + // shuffle status. + // We should evict executors by their alive time first and retain all of executors which + // have better locality for shuffle block. + executorsWithRegistrationTs.toSeq.sortBy(_._2).foreach { case (id, _) => + if (executorIdsToKill.length < expectedNumExecutorToKill && + !executorToBlockSize.contains(id)) { + executorIdsToKill.append(id) + } + } + + // Evict the rest executors according to the shuffle block size + executorToBlockSize.toSeq.sortBy(_._2).foreach { case (id, _) => + if (executorIdsToKill.length < expectedNumExecutorToKill && existedExecutors.contains(id)) { + executorIdsToKill.append(id) + } + } + + executorIdsToKill.toSeq + } + + private def killExecutors( + sc: SparkContext, + targetExecutors: Int, + shuffleId: Int, + numReduce: Int): Unit = { + val executorAllocationClient = sc.schedulerBackend.asInstanceOf[ExecutorAllocationClient] + + val executorsToKill = + if (conf.getConf(KyuubiSQLConf.FINAL_WRITE_STAGE_EAGERLY_KILL_EXECUTORS_KILL_ALL)) { + executorAllocationClient.getExecutorIds() + } else { + findExecutorToKill(sc, targetExecutors, shuffleId, numReduce) + } + logInfo(s"Request to kill executors, total count ${executorsToKill.size}, " + + s"[${executorsToKill.mkString(", ")}].") + if (executorsToKill.isEmpty) { + return + } + + // Note, `SparkContext#killExecutors` does not allow with DRA enabled, + // see `https://github.com/apache/spark/pull/20604`. + // It may cause the status in `ExecutorAllocationManager` inconsistent with + // `CoarseGrainedSchedulerBackend` for a while. But it should be synchronous finally. + // + // We should adjust target num executors, otherwise `YarnAllocator` might re-request original + // target executors if DRA has not updated target executors yet. + // Note, DRA would re-adjust executors if there are more tasks to be executed, so we are safe. + // + // * We kill executor + // * YarnAllocator re-request target executors + // * DRA can not release executors since they are new added + // ----------------------------------------------------------------> timeline + executorAllocationClient.killExecutors( + executorIds = executorsToKill, + adjustTargetNumExecutors = true, + countFailures = false, + force = false) + + FinalStageResourceManager.getAdjustedTargetExecutors(sc) + .filter(_ < targetExecutors).foreach { adjustedExecutors => + val delta = targetExecutors - adjustedExecutors + logInfo(s"Target executors after kill ($adjustedExecutors) is lower than required " + + s"($targetExecutors). Requesting $delta additional executor(s).") + executorAllocationClient.requestExecutors(delta) + } + } + + @transient private val queryStageOptimizerRules: Seq[Rule[SparkPlan]] = Seq( + OptimizeSkewInRebalancePartitions, + CoalesceShufflePartitions(session), + OptimizeShuffleWithLocalRead) +} + +object FinalStageResourceManager extends Logging { + + private[sql] def getAdjustedTargetExecutors(sc: SparkContext): Option[Int] = { + sc.schedulerBackend match { + case schedulerBackend: CoarseGrainedSchedulerBackend => + try { + val field = classOf[CoarseGrainedSchedulerBackend] + .getDeclaredField("requestedTotalExecutorsPerResourceProfile") + field.setAccessible(true) + schedulerBackend.synchronized { + val requestedTotalExecutorsPerResourceProfile = + field.get(schedulerBackend).asInstanceOf[mutable.HashMap[ResourceProfile, Int]] + val defaultRp = sc.resourceProfileManager.defaultResourceProfile + requestedTotalExecutorsPerResourceProfile.get(defaultRp) + } + } catch { + case e: Exception => + logWarning("Failed to get requestedTotalExecutors of Default ResourceProfile", e) + None + } + case _ => None + } + } +} + +trait FinalRebalanceStageHelper extends AdaptiveSparkPlanHelper { + @tailrec + final protected def findFinalRebalanceStage(plan: SparkPlan): Option[ShuffleQueryStageExec] = { + plan match { + case write: DataWritingCommandExec => findFinalRebalanceStage(write.child) + case write: V2TableWriteExec => findFinalRebalanceStage(write.child) + case write: WriteFilesExec => findFinalRebalanceStage(write.child) + case p: ProjectExec => findFinalRebalanceStage(p.child) + case f: FilterExec => findFinalRebalanceStage(f.child) + case s: SortExec if !s.global => findFinalRebalanceStage(s.child) + case stage: ShuffleQueryStageExec + if stage.isMaterialized && stage.mapStats.isDefined && + stage.plan.isInstanceOf[ShuffleExchangeExec] && + stage.plan.asInstanceOf[ShuffleExchangeExec].shuffleOrigin != ENSURE_REQUIREMENTS => + Some(stage) + case _ => None + } + } + + final protected def hasTableCache(plan: SparkPlan): Boolean = { + find(plan) { + case _: InMemoryTableScanExec => true + case _ => false + }.isDefined + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/InjectCustomResourceProfile.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/InjectCustomResourceProfile.scala new file mode 100644 index 00000000000..64421d6bfab --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/InjectCustomResourceProfile.scala @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.sql.catalyst.rules.Rule +import org.apache.spark.sql.execution.{CustomResourceProfileExec, SparkPlan} +import org.apache.spark.sql.execution.adaptive._ + +import org.apache.kyuubi.sql.{KyuubiSQLConf, WriteUtils} + +/** + * Inject custom resource profile for final write stage, so we can specify custom + * executor resource configs. + */ +case class InjectCustomResourceProfile(session: SparkSession) + extends Rule[SparkPlan] with FinalRebalanceStageHelper { + override def apply(plan: SparkPlan): SparkPlan = { + if (!conf.getConf(KyuubiSQLConf.FINAL_WRITE_STAGE_RESOURCE_ISOLATION_ENABLED)) { + return plan + } + + if (!WriteUtils.isWrite(session, plan)) { + return plan + } + + val stage = findFinalRebalanceStage(plan) + if (stage.isEmpty) { + return plan + } + + // TODO: Ideally, We can call `CoarseGrainedSchedulerBackend.requestTotalExecutors` eagerly + // to reduce the task submit pending time, but it may lose task locality. + // + // By default, it would request executors when catch stage submit event. + injectCustomResourceProfile(plan, stage.get.id) + } + + private def injectCustomResourceProfile(plan: SparkPlan, id: Int): SparkPlan = { + plan match { + case stage: ShuffleQueryStageExec if stage.id == id => + CustomResourceProfileExec(stage) + case _ => plan.mapChildren(child => injectCustomResourceProfile(child, id)) + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/PruneFileSourcePartitionHelper.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/PruneFileSourcePartitionHelper.scala new file mode 100644 index 00000000000..ce496eb474c --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/PruneFileSourcePartitionHelper.scala @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.sql.catalyst.expressions.{AttributeReference, AttributeSet, Expression, ExpressionSet, PredicateHelper, SubqueryExpression} +import org.apache.spark.sql.catalyst.plans.logical.LeafNode +import org.apache.spark.sql.execution.datasources.DataSourceStrategy +import org.apache.spark.sql.types.StructType + +trait PruneFileSourcePartitionHelper extends PredicateHelper { + + def getPartitionKeyFiltersAndDataFilters( + sparkSession: SparkSession, + relation: LeafNode, + partitionSchema: StructType, + filters: Seq[Expression], + output: Seq[AttributeReference]): (ExpressionSet, Seq[Expression]) = { + val normalizedFilters = DataSourceStrategy.normalizeExprs( + filters.filter(f => f.deterministic && !SubqueryExpression.hasSubquery(f)), + output) + val partitionColumns = + relation.resolve(partitionSchema, sparkSession.sessionState.analyzer.resolver) + val partitionSet = AttributeSet(partitionColumns) + val (partitionFilters, dataFilters) = normalizedFilters.partition(f => + f.references.subsetOf(partitionSet)) + val extraPartitionFilter = + dataFilters.flatMap(extractPredicatesWithinOutputSet(_, partitionSet)) + + (ExpressionSet(partitionFilters ++ extraPartitionFilter), dataFilters) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/execution/CustomResourceProfileExec.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/execution/CustomResourceProfileExec.scala new file mode 100644 index 00000000000..3698140fbd0 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/main/scala/org/apache/spark/sql/execution/CustomResourceProfileExec.scala @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution + +import org.apache.spark.network.util.{ByteUnit, JavaUtils} +import org.apache.spark.rdd.RDD +import org.apache.spark.resource.{ExecutorResourceRequests, ResourceProfileBuilder} +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.expressions.{Attribute, SortOrder} +import org.apache.spark.sql.catalyst.plans.physical.Partitioning +import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics} +import org.apache.spark.sql.vectorized.ColumnarBatch +import org.apache.spark.util.Utils + +import org.apache.kyuubi.sql.KyuubiSQLConf._ + +/** + * This node wraps the final executed plan and inject custom resource profile to the RDD. + * It assumes that, the produced RDD would create the `ResultStage` in `DAGScheduler`, + * so it makes resource isolation between previous and final stage. + * + * Note that, Spark does not support config `minExecutors` for each resource profile. + * Which means, it would retain `minExecutors` for each resource profile. + * So, suggest set `spark.dynamicAllocation.minExecutors` to 0 if enable this feature. + */ +case class CustomResourceProfileExec(child: SparkPlan) extends UnaryExecNode { + override def output: Seq[Attribute] = child.output + override def outputPartitioning: Partitioning = child.outputPartitioning + override def outputOrdering: Seq[SortOrder] = child.outputOrdering + override def supportsColumnar: Boolean = child.supportsColumnar + override def supportsRowBased: Boolean = child.supportsRowBased + override protected def doCanonicalize(): SparkPlan = child.canonicalized + + private val executorCores = conf.getConf(FINAL_WRITE_STAGE_EXECUTOR_CORES).getOrElse( + sparkContext.getConf.getInt("spark.executor.cores", 1)) + private val executorMemory = conf.getConf(FINAL_WRITE_STAGE_EXECUTOR_MEMORY).getOrElse( + sparkContext.getConf.get("spark.executor.memory", "2G")) + private val executorMemoryOverhead = + conf.getConf(FINAL_WRITE_STAGE_EXECUTOR_MEMORY_OVERHEAD) + .getOrElse(sparkContext.getConf.get("spark.executor.memoryOverhead", "1G")) + private val executorOffHeapMemory = conf.getConf(FINAL_WRITE_STAGE_EXECUTOR_OFF_HEAP_MEMORY) + + override lazy val metrics: Map[String, SQLMetric] = { + val base = Map( + "executorCores" -> SQLMetrics.createMetric(sparkContext, "executor cores"), + "executorMemory" -> SQLMetrics.createMetric(sparkContext, "executor memory (MiB)"), + "executorMemoryOverhead" -> SQLMetrics.createMetric( + sparkContext, + "executor memory overhead (MiB)")) + val addition = executorOffHeapMemory.map(_ => + "executorOffHeapMemory" -> + SQLMetrics.createMetric(sparkContext, "executor off heap memory (MiB)")).toMap + base ++ addition + } + + private def wrapResourceProfile[T](rdd: RDD[T]): RDD[T] = { + if (Utils.isTesting) { + // do nothing for local testing + return rdd + } + + metrics("executorCores") += executorCores + metrics("executorMemory") += JavaUtils.byteStringAs(executorMemory, ByteUnit.MiB) + metrics("executorMemoryOverhead") += JavaUtils.byteStringAs( + executorMemoryOverhead, + ByteUnit.MiB) + executorOffHeapMemory.foreach(m => + metrics("executorOffHeapMemory") += JavaUtils.byteStringAs(m, ByteUnit.MiB)) + + val executionId = sparkContext.getLocalProperty(SQLExecution.EXECUTION_ID_KEY) + SQLMetrics.postDriverMetricUpdates(sparkContext, executionId, metrics.values.toSeq) + + val resourceProfileBuilder = new ResourceProfileBuilder() + val executorResourceRequests = new ExecutorResourceRequests() + executorResourceRequests.cores(executorCores) + executorResourceRequests.memory(executorMemory) + executorResourceRequests.memoryOverhead(executorMemoryOverhead) + executorOffHeapMemory.foreach(executorResourceRequests.offHeapMemory) + resourceProfileBuilder.require(executorResourceRequests) + rdd.withResources(resourceProfileBuilder.build()) + rdd + } + + override protected def doExecute(): RDD[InternalRow] = { + val rdd = child.execute() + wrapResourceProfile(rdd) + } + + override protected def doExecuteColumnar(): RDD[ColumnarBatch] = { + val rdd = child.executeColumnar() + wrapResourceProfile(rdd) + } + + override protected def withNewChildInternal(newChild: SparkPlan): SparkPlan = { + this.copy(child = newChild) + } +} diff --git a/extensions/spark/kyuubi-spark-connector-kudu/src/test/resources/log4j2-test.xml b/extensions/spark/kyuubi-extension-spark-3-4/src/test/resources/log4j2-test.xml similarity index 100% rename from extensions/spark/kyuubi-spark-connector-kudu/src/test/resources/log4j2-test.xml rename to extensions/spark/kyuubi-extension-spark-3-4/src/test/resources/log4j2-test.xml diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/DropIgnoreNonexistentSuite.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/DropIgnoreNonexistentSuite.scala new file mode 100644 index 00000000000..bbc61fb4408 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/DropIgnoreNonexistentSuite.scala @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql + +import org.apache.spark.sql.catalyst.plans.logical.{DropNamespace, NoopCommand} +import org.apache.spark.sql.execution.command._ + +import org.apache.kyuubi.sql.KyuubiSQLConf + +class DropIgnoreNonexistentSuite extends KyuubiSparkSQLExtensionTest { + + test("drop ignore nonexistent") { + withSQLConf(KyuubiSQLConf.DROP_IGNORE_NONEXISTENT.key -> "true") { + // drop nonexistent database + val df1 = sql("DROP DATABASE nonexistent_database") + assert(df1.queryExecution.analyzed.asInstanceOf[DropNamespace].ifExists == true) + + // drop nonexistent function + val df4 = sql("DROP FUNCTION nonexistent_function") + assert(df4.queryExecution.analyzed.isInstanceOf[NoopCommand]) + + // drop nonexistent PARTITION + withTable("test") { + sql("CREATE TABLE IF NOT EXISTS test(i int) PARTITIONED BY (p int)") + val df5 = sql("ALTER TABLE test DROP PARTITION (p = 1)") + assert(df5.queryExecution.analyzed + .asInstanceOf[AlterTableDropPartitionCommand].ifExists == true) + } + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/FinalStageConfigIsolationSuite.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/FinalStageConfigIsolationSuite.scala new file mode 100644 index 00000000000..96c8ae6e8b0 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/FinalStageConfigIsolationSuite.scala @@ -0,0 +1,203 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.sql.execution.adaptive.{AQEShuffleReadExec, QueryStageExec} +import org.apache.spark.sql.internal.SQLConf + +import org.apache.kyuubi.sql.{FinalStageConfigIsolation, KyuubiSQLConf} + +class FinalStageConfigIsolationSuite extends KyuubiSparkSQLExtensionTest { + override protected def beforeAll(): Unit = { + super.beforeAll() + setupData() + } + + test("final stage config set reset check") { + withSQLConf( + KyuubiSQLConf.FINAL_STAGE_CONFIG_ISOLATION.key -> "true", + KyuubiSQLConf.FINAL_STAGE_CONFIG_ISOLATION_WRITE_ONLY.key -> "false", + "spark.sql.finalStage.adaptive.coalescePartitions.minPartitionNum" -> "1", + "spark.sql.finalStage.adaptive.advisoryPartitionSizeInBytes" -> "100") { + // use loop to double check final stage config doesn't affect the sql query each other + (1 to 3).foreach { _ => + sql("SELECT COUNT(*) FROM VALUES(1) as t(c)").collect() + assert(spark.sessionState.conf.getConfString( + "spark.sql.previousStage.adaptive.coalescePartitions.minPartitionNum") === + FinalStageConfigIsolation.INTERNAL_UNSET_CONFIG_TAG) + assert(spark.sessionState.conf.getConfString( + "spark.sql.adaptive.coalescePartitions.minPartitionNum") === + "1") + assert(spark.sessionState.conf.getConfString( + "spark.sql.finalStage.adaptive.coalescePartitions.minPartitionNum") === + "1") + + // 64MB + assert(spark.sessionState.conf.getConfString( + "spark.sql.previousStage.adaptive.advisoryPartitionSizeInBytes") === + "67108864b") + assert(spark.sessionState.conf.getConfString( + "spark.sql.adaptive.advisoryPartitionSizeInBytes") === + "100") + assert(spark.sessionState.conf.getConfString( + "spark.sql.finalStage.adaptive.advisoryPartitionSizeInBytes") === + "100") + } + + sql("SET spark.sql.adaptive.advisoryPartitionSizeInBytes=1") + assert(spark.sessionState.conf.getConfString( + "spark.sql.adaptive.advisoryPartitionSizeInBytes") === + "1") + assert(!spark.sessionState.conf.contains( + "spark.sql.previousStage.adaptive.advisoryPartitionSizeInBytes")) + + sql("SET a=1") + assert(spark.sessionState.conf.getConfString("a") === "1") + + sql("RESET spark.sql.adaptive.coalescePartitions.minPartitionNum") + assert(!spark.sessionState.conf.contains( + "spark.sql.adaptive.coalescePartitions.minPartitionNum")) + assert(!spark.sessionState.conf.contains( + "spark.sql.previousStage.adaptive.coalescePartitions.minPartitionNum")) + + sql("RESET a") + assert(!spark.sessionState.conf.contains("a")) + } + } + + test("final stage config isolation") { + def checkPartitionNum( + sqlString: String, + previousPartitionNum: Int, + finalPartitionNum: Int): Unit = { + val df = sql(sqlString) + df.collect() + val shuffleReaders = collect(df.queryExecution.executedPlan) { + case customShuffleReader: AQEShuffleReadExec => customShuffleReader + } + assert(shuffleReaders.nonEmpty) + // reorder stage by stage id to ensure we get the right stage + val sortedShuffleReaders = shuffleReaders.sortWith { + case (s1, s2) => + s1.child.asInstanceOf[QueryStageExec].id < s2.child.asInstanceOf[QueryStageExec].id + } + if (sortedShuffleReaders.length > 1) { + assert(sortedShuffleReaders.head.partitionSpecs.length === previousPartitionNum) + } + assert(sortedShuffleReaders.last.partitionSpecs.length === finalPartitionNum) + assert(df.rdd.partitions.length === finalPartitionNum) + } + + withSQLConf( + SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1", + SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1", + SQLConf.SHUFFLE_PARTITIONS.key -> "3", + KyuubiSQLConf.FINAL_STAGE_CONFIG_ISOLATION.key -> "true", + KyuubiSQLConf.FINAL_STAGE_CONFIG_ISOLATION_WRITE_ONLY.key -> "false", + "spark.sql.adaptive.advisoryPartitionSizeInBytes" -> "1", + "spark.sql.adaptive.coalescePartitions.minPartitionSize" -> "1", + "spark.sql.finalStage.adaptive.advisoryPartitionSizeInBytes" -> "10000000") { + + // use loop to double check final stage config doesn't affect the sql query each other + (1 to 3).foreach { _ => + checkPartitionNum( + "SELECT c1, count(*) FROM t1 GROUP BY c1", + 1, + 1) + + checkPartitionNum( + "SELECT c2, count(*) FROM (SELECT c1, count(*) as c2 FROM t1 GROUP BY c1) GROUP BY c2", + 3, + 1) + + checkPartitionNum( + "SELECT t1.c1, count(*) FROM t1 JOIN t2 ON t1.c2 = t2.c2 GROUP BY t1.c1", + 3, + 1) + + checkPartitionNum( + """ + | SELECT /*+ REPARTITION */ + | t1.c1, count(*) FROM t1 + | JOIN t2 ON t1.c2 = t2.c2 + | JOIN t3 ON t1.c1 = t3.c1 + | GROUP BY t1.c1 + |""".stripMargin, + 3, + 1) + + // one shuffle reader + checkPartitionNum( + """ + | SELECT /*+ BROADCAST(t1) */ + | t1.c1, t2.c2 FROM t1 + | JOIN t2 ON t1.c2 = t2.c2 + | DISTRIBUTE BY c1 + |""".stripMargin, + 1, + 1) + + // test ReusedExchange + checkPartitionNum( + """ + |SELECT /*+ REPARTITION */ t0.c2 FROM ( + |SELECT t1.c1, (count(*) + c1) as c2 FROM t1 GROUP BY t1.c1 + |) t0 JOIN ( + |SELECT t1.c1, (count(*) + c1) as c2 FROM t1 GROUP BY t1.c1 + |) t1 ON t0.c2 = t1.c2 + |""".stripMargin, + 3, + 1) + + // one shuffle reader + checkPartitionNum( + """ + |SELECT t0.c1 FROM ( + |SELECT t1.c1 FROM t1 GROUP BY t1.c1 + |) t0 JOIN ( + |SELECT t1.c1 FROM t1 GROUP BY t1.c1 + |) t1 ON t0.c1 = t1.c1 + |""".stripMargin, + 1, + 1) + } + } + } + + test("final stage config isolation write only") { + withSQLConf( + KyuubiSQLConf.FINAL_STAGE_CONFIG_ISOLATION.key -> "true", + KyuubiSQLConf.FINAL_STAGE_CONFIG_ISOLATION_WRITE_ONLY.key -> "true", + "spark.sql.finalStage.adaptive.advisoryPartitionSizeInBytes" -> "7") { + sql("set spark.sql.adaptive.advisoryPartitionSizeInBytes=5") + sql("SELECT * FROM t1").count() + assert(spark.conf.getOption("spark.sql.adaptive.advisoryPartitionSizeInBytes") + .contains("5")) + + withTable("tmp") { + sql("CREATE TABLE t1 USING PARQUET SELECT /*+ repartition */ 1 AS c1, 'a' AS c2") + assert(spark.conf.getOption("spark.sql.adaptive.advisoryPartitionSizeInBytes") + .contains("7")) + } + + sql("SELECT * FROM t1").count() + assert(spark.conf.getOption("spark.sql.adaptive.advisoryPartitionSizeInBytes") + .contains("5")) + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/FinalStageResourceManagerSuite.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/FinalStageResourceManagerSuite.scala new file mode 100644 index 00000000000..4b9991ef6f2 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/FinalStageResourceManagerSuite.scala @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.SparkConf +import org.scalatest.time.{Minutes, Span} + +import org.apache.kyuubi.sql.KyuubiSQLConf +import org.apache.kyuubi.tags.SparkLocalClusterTest + +@SparkLocalClusterTest +class FinalStageResourceManagerSuite extends KyuubiSparkSQLExtensionTest { + + override def sparkConf(): SparkConf = { + // It is difficult to run spark in local-cluster mode when spark.testing is set. + sys.props.remove("spark.testing") + + super.sparkConf().set("spark.master", "local-cluster[3, 1, 1024]") + .set("spark.dynamicAllocation.enabled", "true") + .set("spark.dynamicAllocation.initialExecutors", "3") + .set("spark.dynamicAllocation.minExecutors", "1") + .set("spark.dynamicAllocation.shuffleTracking.enabled", "true") + .set(KyuubiSQLConf.FINAL_STAGE_CONFIG_ISOLATION.key, "true") + .set(KyuubiSQLConf.FINAL_WRITE_STAGE_EAGERLY_KILL_EXECUTORS_ENABLED.key, "true") + } + + test("[KYUUBI #5136][Bug] Final Stage hangs forever") { + // Prerequisite to reproduce the bug: + // 1. Dynamic allocation is enabled. + // 2. Dynamic allocation min executors is 1. + // 3. target executors < active executors. + // 4. No active executor is left after FinalStageResourceManager killed executors. + // This is possible because FinalStageResourceManager retained executors may already be + // requested to be killed but not died yet. + // 5. Final Stage required executors is 1. + withSQLConf( + (KyuubiSQLConf.FINAL_WRITE_STAGE_EAGERLY_KILL_EXECUTORS_KILL_ALL.key, "true")) { + withTable("final_stage") { + eventually(timeout(Span(10, Minutes))) { + sql( + "CREATE TABLE final_stage AS SELECT id, count(*) as num FROM (SELECT 0 id) GROUP BY id") + } + assert(FinalStageResourceManager.getAdjustedTargetExecutors(spark.sparkContext).get == 1) + } + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/InjectResourceProfileSuite.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/InjectResourceProfileSuite.scala new file mode 100644 index 00000000000..b0767b18708 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/InjectResourceProfileSuite.scala @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent} +import org.apache.spark.sql.execution.ui.SparkListenerSQLAdaptiveExecutionUpdate + +import org.apache.kyuubi.sql.KyuubiSQLConf + +class InjectResourceProfileSuite extends KyuubiSparkSQLExtensionTest { + private def checkCustomResourceProfile(sqlString: String, exists: Boolean): Unit = { + @volatile var lastEvent: SparkListenerSQLAdaptiveExecutionUpdate = null + val listener = new SparkListener { + override def onOtherEvent(event: SparkListenerEvent): Unit = { + event match { + case e: SparkListenerSQLAdaptiveExecutionUpdate => lastEvent = e + case _ => + } + } + } + + spark.sparkContext.addSparkListener(listener) + try { + sql(sqlString).collect() + spark.sparkContext.listenerBus.waitUntilEmpty() + assert(lastEvent != null) + var current = lastEvent.sparkPlanInfo + var shouldStop = false + while (!shouldStop) { + if (current.nodeName != "CustomResourceProfile") { + if (current.children.isEmpty) { + assert(!exists) + shouldStop = true + } else { + current = current.children.head + } + } else { + assert(exists) + shouldStop = true + } + } + } finally { + spark.sparkContext.removeSparkListener(listener) + } + } + + test("Inject resource profile") { + withTable("t") { + withSQLConf( + "spark.sql.adaptive.forceApply" -> "true", + KyuubiSQLConf.FINAL_STAGE_CONFIG_ISOLATION.key -> "true", + KyuubiSQLConf.FINAL_WRITE_STAGE_RESOURCE_ISOLATION_ENABLED.key -> "true") { + + sql("CREATE TABLE t (c1 int, c2 string) USING PARQUET") + + checkCustomResourceProfile("INSERT INTO TABLE t VALUES(1, 'a')", false) + checkCustomResourceProfile("SELECT 1", false) + checkCustomResourceProfile( + "INSERT INTO TABLE t SELECT /*+ rebalance */ * FROM VALUES(1, 'a')", + true) + } + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/InsertShuffleNodeBeforeJoinSuite.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/InsertShuffleNodeBeforeJoinSuite.scala new file mode 100644 index 00000000000..f0d38465734 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/InsertShuffleNodeBeforeJoinSuite.scala @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql + +class InsertShuffleNodeBeforeJoinSuite extends InsertShuffleNodeBeforeJoinSuiteBase diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/InsertShuffleNodeBeforeJoinSuiteBase.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/InsertShuffleNodeBeforeJoinSuiteBase.scala new file mode 100644 index 00000000000..c657dee49f3 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/InsertShuffleNodeBeforeJoinSuiteBase.scala @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.SparkConf +import org.apache.spark.sql.execution.exchange.{ENSURE_REQUIREMENTS, ShuffleExchangeLike} +import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf} + +import org.apache.kyuubi.sql.KyuubiSQLConf + +trait InsertShuffleNodeBeforeJoinSuiteBase extends KyuubiSparkSQLExtensionTest { + override protected def beforeAll(): Unit = { + super.beforeAll() + setupData() + } + + override def sparkConf(): SparkConf = { + super.sparkConf() + .set( + StaticSQLConf.SPARK_SESSION_EXTENSIONS.key, + "org.apache.kyuubi.sql.KyuubiSparkSQLCommonExtension") + } + + test("force shuffle before join") { + def checkShuffleNodeNum(sqlString: String, num: Int): Unit = { + var expectedResult: Seq[Row] = Seq.empty + withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { + expectedResult = sql(sqlString).collect() + } + val df = sql(sqlString) + checkAnswer(df, expectedResult) + assert( + collect(df.queryExecution.executedPlan) { + case shuffle: ShuffleExchangeLike if shuffle.shuffleOrigin == ENSURE_REQUIREMENTS => + shuffle + }.size == num) + } + + withSQLConf( + SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1", + KyuubiSQLConf.FORCE_SHUFFLE_BEFORE_JOIN.key -> "true") { + Seq("SHUFFLE_HASH", "MERGE").foreach { joinHint => + // positive case + checkShuffleNodeNum( + s""" + |SELECT /*+ $joinHint(t2, t3) */ t1.c1, t1.c2, t2.c1, t3.c1 from t1 + | JOIN t2 ON t1.c1 = t2.c1 + | JOIN t3 ON t1.c1 = t3.c1 + | """.stripMargin, + 4) + + // negative case + checkShuffleNodeNum( + s""" + |SELECT /*+ $joinHint(t2, t3) */ t1.c1, t1.c2, t2.c1, t3.c1 from t1 + | JOIN t2 ON t1.c1 = t2.c1 + | JOIN t3 ON t1.c2 = t3.c2 + | """.stripMargin, + 4) + } + + checkShuffleNodeNum( + """ + |SELECT t1.c1, t2.c1, t3.c2 from t1 + | JOIN t2 ON t1.c1 = t2.c1 + | JOIN ( + | SELECT c2, count(*) FROM t1 GROUP BY c2 + | ) t3 ON t1.c1 = t3.c2 + | """.stripMargin, + 5) + + checkShuffleNodeNum( + """ + |SELECT t1.c1, t2.c1, t3.c1 from t1 + | JOIN t2 ON t1.c1 = t2.c1 + | JOIN ( + | SELECT c1, count(*) FROM t1 GROUP BY c1 + | ) t3 ON t1.c1 = t3.c1 + | """.stripMargin, + 5) + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/KyuubiSparkSQLExtensionTest.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/KyuubiSparkSQLExtensionTest.scala new file mode 100644 index 00000000000..dd9ffbf169e --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/KyuubiSparkSQLExtensionTest.scala @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql + +import org.apache.hadoop.hive.conf.HiveConf.ConfVars +import org.apache.spark.SparkConf +import org.apache.spark.sql.execution.QueryExecution +import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper +import org.apache.spark.sql.execution.command.{DataWritingCommand, DataWritingCommandExec} +import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf} +import org.apache.spark.sql.test.SQLTestData.TestData +import org.apache.spark.sql.test.SQLTestUtils +import org.apache.spark.sql.util.QueryExecutionListener +import org.apache.spark.util.Utils + +import org.apache.kyuubi.sql.KyuubiSQLConf + +trait KyuubiSparkSQLExtensionTest extends QueryTest + with SQLTestUtils + with AdaptiveSparkPlanHelper { + sys.props.put("spark.testing", "1") + + private var _spark: Option[SparkSession] = None + protected def spark: SparkSession = _spark.getOrElse { + throw new RuntimeException("test spark session don't initial before using it.") + } + + override protected def beforeAll(): Unit = { + if (_spark.isEmpty) { + _spark = Option(SparkSession.builder() + .master("local[1]") + .config(sparkConf) + .enableHiveSupport() + .getOrCreate()) + } + super.beforeAll() + } + + override protected def afterAll(): Unit = { + super.afterAll() + cleanupData() + _spark.foreach(_.stop) + } + + protected def setupData(): Unit = { + val self = spark + import self.implicits._ + spark.sparkContext.parallelize( + (1 to 100).map(i => TestData(i, i.toString)), + 10) + .toDF("c1", "c2").createOrReplaceTempView("t1") + spark.sparkContext.parallelize( + (1 to 10).map(i => TestData(i, i.toString)), + 5) + .toDF("c1", "c2").createOrReplaceTempView("t2") + spark.sparkContext.parallelize( + (1 to 50).map(i => TestData(i, i.toString)), + 2) + .toDF("c1", "c2").createOrReplaceTempView("t3") + } + + private def cleanupData(): Unit = { + spark.sql("DROP VIEW IF EXISTS t1") + spark.sql("DROP VIEW IF EXISTS t2") + spark.sql("DROP VIEW IF EXISTS t3") + } + + def sparkConf(): SparkConf = { + val basePath = Utils.createTempDir() + "/" + getClass.getCanonicalName + val metastorePath = basePath + "/metastore_db" + val warehousePath = basePath + "/warehouse" + new SparkConf() + .set( + StaticSQLConf.SPARK_SESSION_EXTENSIONS.key, + "org.apache.kyuubi.sql.KyuubiSparkSQLExtension") + .set(KyuubiSQLConf.SQL_CLASSIFICATION_ENABLED.key, "true") + .set(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key, "true") + .set("spark.hadoop.hive.exec.dynamic.partition.mode", "nonstrict") + .set("spark.hadoop.hive.metastore.client.capability.check", "false") + .set( + ConfVars.METASTORECONNECTURLKEY.varname, + s"jdbc:derby:;databaseName=$metastorePath;create=true") + .set(StaticSQLConf.WAREHOUSE_PATH, warehousePath) + .set("spark.ui.enabled", "false") + } + + def withListener(sqlString: String)(callback: DataWritingCommand => Unit): Unit = { + withListener(sql(sqlString))(callback) + } + + def withListener(df: => DataFrame)(callback: DataWritingCommand => Unit): Unit = { + val listener = new QueryExecutionListener { + override def onFailure(f: String, qe: QueryExecution, e: Exception): Unit = {} + + override def onSuccess(funcName: String, qe: QueryExecution, duration: Long): Unit = { + qe.executedPlan match { + case write: DataWritingCommandExec => callback(write.cmd) + case _ => + } + } + } + spark.listenerManager.register(listener) + try { + df.collect() + sparkContext.listenerBus.waitUntilEmpty() + } finally { + spark.listenerManager.unregister(listener) + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/RebalanceBeforeWritingSuite.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/RebalanceBeforeWritingSuite.scala new file mode 100644 index 00000000000..1d9630f4937 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/RebalanceBeforeWritingSuite.scala @@ -0,0 +1,271 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.sql.catalyst.expressions.Attribute +import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, RebalancePartitions, Sort} +import org.apache.spark.sql.execution.command.DataWritingCommand +import org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand +import org.apache.spark.sql.hive.HiveUtils +import org.apache.spark.sql.hive.execution.InsertIntoHiveTable + +import org.apache.kyuubi.sql.KyuubiSQLConf + +class RebalanceBeforeWritingSuite extends KyuubiSparkSQLExtensionTest { + + test("check rebalance exists") { + def check(df: => DataFrame, expectedRebalanceNum: Int = 1): Unit = { + withSQLConf(KyuubiSQLConf.INSERT_REPARTITION_BEFORE_WRITE_IF_NO_SHUFFLE.key -> "true") { + withListener(df) { write => + assert(write.collect { + case r: RebalancePartitions => r + }.size == expectedRebalanceNum) + } + } + withSQLConf(KyuubiSQLConf.INSERT_REPARTITION_BEFORE_WRITE_IF_NO_SHUFFLE.key -> "false") { + withListener(df) { write => + assert(write.collect { + case r: RebalancePartitions => r + }.isEmpty) + } + } + } + + // It's better to set config explicitly in case of we change the default value. + withSQLConf(KyuubiSQLConf.INSERT_REPARTITION_BEFORE_WRITE.key -> "true") { + Seq("USING PARQUET", "").foreach { storage => + withTable("tmp1") { + sql(s"CREATE TABLE tmp1 (c1 int) $storage PARTITIONED BY (c2 string)") + check(sql("INSERT INTO TABLE tmp1 PARTITION(c2='a') " + + "SELECT * FROM VALUES(1),(2) AS t(c1)")) + } + + withTable("tmp1", "tmp2") { + sql(s"CREATE TABLE tmp1 (c1 int) $storage PARTITIONED BY (c2 string)") + sql(s"CREATE TABLE tmp2 (c1 int) $storage PARTITIONED BY (c2 string)") + check( + sql( + """FROM VALUES(1),(2) + |INSERT INTO TABLE tmp1 PARTITION(c2='a') SELECT * + |INSERT INTO TABLE tmp2 PARTITION(c2='a') SELECT * + |""".stripMargin), + 2) + } + + withTable("tmp1") { + sql(s"CREATE TABLE tmp1 (c1 int) $storage") + check(sql("INSERT INTO TABLE tmp1 SELECT * FROM VALUES(1),(2),(3) AS t(c1)")) + } + + withTable("tmp1", "tmp2") { + sql(s"CREATE TABLE tmp1 (c1 int) $storage") + sql(s"CREATE TABLE tmp2 (c1 int) $storage") + check( + sql( + """FROM VALUES(1),(2),(3) + |INSERT INTO TABLE tmp1 SELECT * + |INSERT INTO TABLE tmp2 SELECT * + |""".stripMargin), + 2) + } + + withTable("tmp1") { + sql(s"CREATE TABLE tmp1 $storage AS SELECT * FROM VALUES(1),(2),(3) AS t(c1)") + } + + withTable("tmp1") { + sql(s"CREATE TABLE tmp1 $storage PARTITIONED BY(c2) AS " + + s"SELECT * FROM VALUES(1, 'a'),(2, 'b') AS t(c1, c2)") + } + } + } + } + + test("check rebalance does not exists") { + def check(df: DataFrame): Unit = { + withListener(df) { write => + assert(write.collect { + case r: RebalancePartitions => r + }.isEmpty) + } + } + + withSQLConf( + KyuubiSQLConf.INSERT_REPARTITION_BEFORE_WRITE.key -> "true", + KyuubiSQLConf.INSERT_REPARTITION_BEFORE_WRITE_IF_NO_SHUFFLE.key -> "true") { + // test no write command + check(sql("SELECT * FROM VALUES(1, 'a'),(2, 'b') AS t(c1, c2)")) + check(sql("SELECT count(*) FROM VALUES(1, 'a'),(2, 'b') AS t(c1, c2)")) + + // test not supported plan + withTable("tmp1") { + sql(s"CREATE TABLE tmp1 (c1 int) PARTITIONED BY (c2 string)") + check(sql("INSERT INTO TABLE tmp1 PARTITION(c2) " + + "SELECT /*+ repartition(10) */ * FROM VALUES(1, 'a'),(2, 'b') AS t(c1, c2)")) + check(sql("INSERT INTO TABLE tmp1 PARTITION(c2) " + + "SELECT * FROM VALUES(1, 'a'),(2, 'b') AS t(c1, c2) ORDER BY c1")) + check(sql("INSERT INTO TABLE tmp1 PARTITION(c2) " + + "SELECT * FROM VALUES(1, 'a'),(2, 'b') AS t(c1, c2) LIMIT 10")) + } + } + + withSQLConf(KyuubiSQLConf.INSERT_REPARTITION_BEFORE_WRITE.key -> "false") { + Seq("USING PARQUET", "").foreach { storage => + withTable("tmp1") { + sql(s"CREATE TABLE tmp1 (c1 int) $storage PARTITIONED BY (c2 string)") + check(sql("INSERT INTO TABLE tmp1 PARTITION(c2) " + + "SELECT * FROM VALUES(1, 'a'),(2, 'b') AS t(c1, c2)")) + } + + withTable("tmp1") { + sql(s"CREATE TABLE tmp1 (c1 int) $storage") + check(sql("INSERT INTO TABLE tmp1 SELECT * FROM VALUES(1),(2),(3) AS t(c1)")) + } + } + } + } + + test("test dynamic partition write") { + def checkRepartitionExpression(sqlString: String): Unit = { + withListener(sqlString) { write => + assert(write.isInstanceOf[InsertIntoHiveTable]) + assert(write.collect { + case r: RebalancePartitions if r.partitionExpressions.size == 1 => + assert(r.partitionExpressions.head.asInstanceOf[Attribute].name === "c2") + r + }.size == 1) + } + } + + withSQLConf( + KyuubiSQLConf.INSERT_REPARTITION_BEFORE_WRITE.key -> "true", + KyuubiSQLConf.DYNAMIC_PARTITION_INSERTION_REPARTITION_NUM.key -> "2", + KyuubiSQLConf.INSERT_REPARTITION_BEFORE_WRITE_IF_NO_SHUFFLE.key -> "true") { + Seq("USING PARQUET", "").foreach { storage => + withTable("tmp1") { + sql(s"CREATE TABLE tmp1 (c1 int) $storage PARTITIONED BY (c2 string)") + checkRepartitionExpression("INSERT INTO TABLE tmp1 SELECT 1 as c1, 'a' as c2 ") + } + + withTable("tmp1") { + checkRepartitionExpression( + "CREATE TABLE tmp1 PARTITIONED BY(C2) SELECT 1 as c1, 'a' as c2") + } + } + } + } + + test("OptimizedCreateHiveTableAsSelectCommand") { + withSQLConf( + HiveUtils.CONVERT_METASTORE_PARQUET.key -> "true", + HiveUtils.CONVERT_METASTORE_CTAS.key -> "true", + KyuubiSQLConf.INSERT_REPARTITION_BEFORE_WRITE_IF_NO_SHUFFLE.key -> "true") { + withTable("t") { + withListener("CREATE TABLE t STORED AS parquet AS SELECT 1 as a") { write => + assert(write.isInstanceOf[InsertIntoHadoopFsRelationCommand]) + assert(write.collect { + case _: RebalancePartitions => true + }.size == 1) + } + } + } + } + + test("Infer rebalance and sorder orders") { + def checkShuffleAndSort(dataWritingCommand: LogicalPlan, sSize: Int, rSize: Int): Unit = { + assert(dataWritingCommand.isInstanceOf[DataWritingCommand]) + val plan = dataWritingCommand.asInstanceOf[DataWritingCommand].query + assert(plan.collect { + case s: Sort => s + }.size == sSize) + assert(plan.collect { + case r: RebalancePartitions if r.partitionExpressions.size == rSize => r + }.nonEmpty || rSize == 0) + } + + withView("v") { + withTable("t", "input1", "input2") { + withSQLConf(KyuubiSQLConf.INFER_REBALANCE_AND_SORT_ORDERS.key -> "true") { + sql(s"CREATE TABLE t (c1 int, c2 long) USING PARQUET PARTITIONED BY (p string)") + sql(s"CREATE TABLE input1 USING PARQUET AS SELECT * FROM VALUES(1,2),(1,3)") + sql(s"CREATE TABLE input2 USING PARQUET AS SELECT * FROM VALUES(1,3),(1,3)") + sql(s"CREATE VIEW v as SELECT col1, count(*) as col2 FROM input1 GROUP BY col1") + + val df0 = sql( + s""" + |INSERT INTO TABLE t PARTITION(p='a') + |SELECT /*+ broadcast(input2) */ input1.col1, input2.col1 + |FROM input1 + |JOIN input2 + |ON input1.col1 = input2.col1 + |""".stripMargin) + checkShuffleAndSort(df0.queryExecution.analyzed, 1, 1) + + val df1 = sql( + s""" + |INSERT INTO TABLE t PARTITION(p='a') + |SELECT /*+ broadcast(input2) */ input1.col1, input1.col2 + |FROM input1 + |LEFT JOIN input2 + |ON input1.col1 = input2.col1 and input1.col2 = input2.col2 + |""".stripMargin) + checkShuffleAndSort(df1.queryExecution.analyzed, 1, 2) + + val df2 = sql( + s""" + |INSERT INTO TABLE t PARTITION(p='a') + |SELECT col1 as c1, count(*) as c2 + |FROM input1 + |GROUP BY col1 + |HAVING count(*) > 0 + |""".stripMargin) + checkShuffleAndSort(df2.queryExecution.analyzed, 1, 1) + + // dynamic partition + val df3 = sql( + s""" + |INSERT INTO TABLE t PARTITION(p) + |SELECT /*+ broadcast(input2) */ input1.col1, input1.col2, input1.col2 + |FROM input1 + |JOIN input2 + |ON input1.col1 = input2.col1 + |""".stripMargin) + checkShuffleAndSort(df3.queryExecution.analyzed, 0, 1) + + // non-deterministic + val df4 = sql( + s""" + |INSERT INTO TABLE t PARTITION(p='a') + |SELECT col1 + rand(), count(*) as c2 + |FROM input1 + |GROUP BY col1 + |""".stripMargin) + checkShuffleAndSort(df4.queryExecution.analyzed, 0, 0) + + // view + val df5 = sql( + s""" + |INSERT INTO TABLE t PARTITION(p='a') + |SELECT * FROM v + |""".stripMargin) + checkShuffleAndSort(df5.queryExecution.analyzed, 1, 1) + } + } + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/WatchDogSuite.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/WatchDogSuite.scala new file mode 100644 index 00000000000..957089340ca --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/WatchDogSuite.scala @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +class WatchDogSuite extends WatchDogSuiteBase {} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/WatchDogSuiteBase.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/WatchDogSuiteBase.scala new file mode 100644 index 00000000000..a202e813c5e --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/WatchDogSuiteBase.scala @@ -0,0 +1,601 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import java.io.File + +import scala.collection.JavaConverters._ + +import org.apache.commons.io.FileUtils +import org.apache.spark.sql.catalyst.plans.logical.{GlobalLimit, LogicalPlan} + +import org.apache.kyuubi.sql.KyuubiSQLConf +import org.apache.kyuubi.sql.watchdog.{MaxFileSizeExceedException, MaxPartitionExceedException} + +trait WatchDogSuiteBase extends KyuubiSparkSQLExtensionTest { + override protected def beforeAll(): Unit = { + super.beforeAll() + setupData() + } + + case class LimitAndExpected(limit: Int, expected: Int) + + val limitAndExpecteds = List(LimitAndExpected(1, 1), LimitAndExpected(11, 10)) + + private def checkMaxPartition: Unit = { + withSQLConf(KyuubiSQLConf.WATCHDOG_MAX_PARTITIONS.key -> "100") { + checkAnswer(sql("SELECT count(distinct(p)) FROM test"), Row(10) :: Nil) + } + withSQLConf(KyuubiSQLConf.WATCHDOG_MAX_PARTITIONS.key -> "5") { + sql("SELECT * FROM test where p=1").queryExecution.sparkPlan + + sql(s"SELECT * FROM test WHERE p in (${Range(0, 5).toList.mkString(",")})") + .queryExecution.sparkPlan + + intercept[MaxPartitionExceedException]( + sql("SELECT * FROM test where p != 1").queryExecution.sparkPlan) + + intercept[MaxPartitionExceedException]( + sql("SELECT * FROM test").queryExecution.sparkPlan) + + intercept[MaxPartitionExceedException](sql( + s"SELECT * FROM test WHERE p in (${Range(0, 6).toList.mkString(",")})") + .queryExecution.sparkPlan) + } + } + + test("watchdog with scan maxPartitions -- hive") { + Seq("textfile", "parquet").foreach { format => + withTable("test", "temp") { + sql( + s""" + |CREATE TABLE test(i int) + |PARTITIONED BY (p int) + |STORED AS $format""".stripMargin) + spark.range(0, 10, 1).selectExpr("id as col") + .createOrReplaceTempView("temp") + + for (part <- Range(0, 10)) { + sql( + s""" + |INSERT OVERWRITE TABLE test PARTITION (p='$part') + |select col from temp""".stripMargin) + } + checkMaxPartition + } + } + } + + test("watchdog with scan maxPartitions -- data source") { + withTempDir { dir => + withTempView("test") { + spark.range(10).selectExpr("id", "id as p") + .write + .partitionBy("p") + .mode("overwrite") + .save(dir.getCanonicalPath) + spark.read.load(dir.getCanonicalPath).createOrReplaceTempView("test") + checkMaxPartition + } + } + } + + test("test watchdog: simple SELECT STATEMENT") { + + withSQLConf(KyuubiSQLConf.WATCHDOG_FORCED_MAXOUTPUTROWS.key -> "10") { + + List("", "ORDER BY c1", "ORDER BY c2").foreach { sort => + List("", " DISTINCT").foreach { distinct => + assert(sql( + s""" + |SELECT $distinct * + |FROM t1 + |$sort + |""".stripMargin).queryExecution.optimizedPlan.isInstanceOf[GlobalLimit]) + } + } + + limitAndExpecteds.foreach { case LimitAndExpected(limit, expected) => + List("", "ORDER BY c1", "ORDER BY c2").foreach { sort => + List("", "DISTINCT").foreach { distinct => + assert(sql( + s""" + |SELECT $distinct * + |FROM t1 + |$sort + |LIMIT $limit + |""".stripMargin).queryExecution.optimizedPlan.maxRows.contains(expected)) + } + } + } + } + } + + test("test watchdog: SELECT ... WITH AGGREGATE STATEMENT ") { + + withSQLConf(KyuubiSQLConf.WATCHDOG_FORCED_MAXOUTPUTROWS.key -> "10") { + + assert(!sql("SELECT count(*) FROM t1") + .queryExecution.optimizedPlan.isInstanceOf[GlobalLimit]) + + val sorts = List("", "ORDER BY cnt", "ORDER BY c1", "ORDER BY cnt, c1", "ORDER BY c1, cnt") + val havingConditions = List("", "HAVING cnt > 1") + + havingConditions.foreach { having => + sorts.foreach { sort => + assert(sql( + s""" + |SELECT c1, COUNT(*) as cnt + |FROM t1 + |GROUP BY c1 + |$having + |$sort + |""".stripMargin).queryExecution.optimizedPlan.isInstanceOf[GlobalLimit]) + } + } + + limitAndExpecteds.foreach { case LimitAndExpected(limit, expected) => + havingConditions.foreach { having => + sorts.foreach { sort => + assert(sql( + s""" + |SELECT c1, COUNT(*) as cnt + |FROM t1 + |GROUP BY c1 + |$having + |$sort + |LIMIT $limit + |""".stripMargin).queryExecution.optimizedPlan.maxRows.contains(expected)) + } + } + } + } + } + + test("test watchdog: SELECT with CTE forceMaxOutputRows") { + // simple CTE + val q1 = + """ + |WITH t2 AS ( + | SELECT * FROM t1 + |) + |""".stripMargin + + // nested CTE + val q2 = + """ + |WITH + | t AS (SELECT * FROM t1), + | t2 AS ( + | WITH t3 AS (SELECT * FROM t1) + | SELECT * FROM t3 + | ) + |""".stripMargin + withSQLConf(KyuubiSQLConf.WATCHDOG_FORCED_MAXOUTPUTROWS.key -> "10") { + + val sorts = List("", "ORDER BY c1", "ORDER BY c2") + + sorts.foreach { sort => + Seq(q1, q2).foreach { withQuery => + assert(sql( + s""" + |$withQuery + |SELECT * FROM t2 + |$sort + |""".stripMargin).queryExecution.optimizedPlan.isInstanceOf[GlobalLimit]) + } + } + + limitAndExpecteds.foreach { case LimitAndExpected(limit, expected) => + sorts.foreach { sort => + Seq(q1, q2).foreach { withQuery => + assert(sql( + s""" + |$withQuery + |SELECT * FROM t2 + |$sort + |LIMIT $limit + |""".stripMargin).queryExecution.optimizedPlan.maxRows.contains(expected)) + } + } + } + } + } + + test("test watchdog: SELECT AGGREGATE WITH CTE forceMaxOutputRows") { + + withSQLConf(KyuubiSQLConf.WATCHDOG_FORCED_MAXOUTPUTROWS.key -> "10") { + + assert(!sql( + """ + |WITH custom_cte AS ( + |SELECT * FROM t1 + |) + | + |SELECT COUNT(*) + |FROM custom_cte + |""".stripMargin).queryExecution + .analyzed.isInstanceOf[GlobalLimit]) + + val sorts = List("", "ORDER BY cnt", "ORDER BY c1", "ORDER BY cnt, c1", "ORDER BY c1, cnt") + val havingConditions = List("", "HAVING cnt > 1") + + havingConditions.foreach { having => + sorts.foreach { sort => + assert(sql( + s""" + |WITH custom_cte AS ( + |SELECT * FROM t1 + |) + | + |SELECT c1, COUNT(*) as cnt + |FROM custom_cte + |GROUP BY c1 + |$having + |$sort + |""".stripMargin).queryExecution.optimizedPlan.isInstanceOf[GlobalLimit]) + } + } + + limitAndExpecteds.foreach { case LimitAndExpected(limit, expected) => + havingConditions.foreach { having => + sorts.foreach { sort => + assert(sql( + s""" + |WITH custom_cte AS ( + |SELECT * FROM t1 + |) + | + |SELECT c1, COUNT(*) as cnt + |FROM custom_cte + |GROUP BY c1 + |$having + |$sort + |LIMIT $limit + |""".stripMargin).queryExecution.optimizedPlan.maxRows.contains(expected)) + } + } + } + } + } + + test("test watchdog: UNION Statement for forceMaxOutputRows") { + + withSQLConf(KyuubiSQLConf.WATCHDOG_FORCED_MAXOUTPUTROWS.key -> "10") { + + List("", "ALL").foreach { x => + assert(sql( + s""" + |SELECT c1, c2 FROM t1 + |UNION $x + |SELECT c1, c2 FROM t2 + |UNION $x + |SELECT c1, c2 FROM t3 + |""".stripMargin) + .queryExecution.optimizedPlan.isInstanceOf[GlobalLimit]) + } + + val sorts = List("", "ORDER BY cnt", "ORDER BY c1", "ORDER BY cnt, c1", "ORDER BY c1, cnt") + val havingConditions = List("", "HAVING cnt > 1") + + List("", "ALL").foreach { x => + havingConditions.foreach { having => + sorts.foreach { sort => + assert(sql( + s""" + |SELECT c1, count(c2) as cnt + |FROM t1 + |GROUP BY c1 + |$having + |UNION $x + |SELECT c1, COUNT(c2) as cnt + |FROM t2 + |GROUP BY c1 + |$having + |UNION $x + |SELECT c1, COUNT(c2) as cnt + |FROM t3 + |GROUP BY c1 + |$having + |$sort + |""".stripMargin) + .queryExecution.optimizedPlan.isInstanceOf[GlobalLimit]) + } + } + } + + limitAndExpecteds.foreach { case LimitAndExpected(limit, expected) => + assert(sql( + s""" + |SELECT c1, c2 FROM t1 + |UNION + |SELECT c1, c2 FROM t2 + |UNION + |SELECT c1, c2 FROM t3 + |LIMIT $limit + |""".stripMargin) + .queryExecution.optimizedPlan.maxRows.contains(expected)) + } + } + } + + test("test watchdog: Select View Statement for forceMaxOutputRows") { + withSQLConf(KyuubiSQLConf.WATCHDOG_FORCED_MAXOUTPUTROWS.key -> "3") { + withTable("tmp_table", "tmp_union") { + withView("tmp_view", "tmp_view2") { + sql(s"create table tmp_table (a int, b int)") + sql(s"insert into tmp_table values (1,10),(2,20),(3,30),(4,40),(5,50)") + sql(s"create table tmp_union (a int, b int)") + sql(s"insert into tmp_union values (6,60),(7,70),(8,80),(9,90),(10,100)") + sql(s"create view tmp_view2 as select * from tmp_union") + assert(!sql( + s""" + |CREATE VIEW tmp_view + |as + |SELECT * FROM + |tmp_table + |""".stripMargin) + .queryExecution.optimizedPlan.isInstanceOf[GlobalLimit]) + + assert(sql( + s""" + |SELECT * FROM + |tmp_view + |""".stripMargin) + .queryExecution.optimizedPlan.maxRows.contains(3)) + + assert(sql( + s""" + |SELECT * FROM + |tmp_view + |limit 11 + |""".stripMargin) + .queryExecution.optimizedPlan.maxRows.contains(3)) + + assert(sql( + s""" + |SELECT * FROM + |(select * from tmp_view + |UNION + |select * from tmp_view2) + |ORDER BY a + |DESC + |""".stripMargin) + .collect().head.get(0) === 10) + } + } + } + } + + test("test watchdog: Insert Statement for forceMaxOutputRows") { + + withSQLConf(KyuubiSQLConf.WATCHDOG_FORCED_MAXOUTPUTROWS.key -> "10") { + withTable("tmp_table", "tmp_insert") { + spark.sql(s"create table tmp_table (a int, b int)") + spark.sql(s"insert into tmp_table values (1,10),(2,20),(3,30),(4,40),(5,50)") + val multiInsertTableName1: String = "tmp_tbl1" + val multiInsertTableName2: String = "tmp_tbl2" + sql(s"drop table if exists $multiInsertTableName1") + sql(s"drop table if exists $multiInsertTableName2") + sql(s"create table $multiInsertTableName1 like tmp_table") + sql(s"create table $multiInsertTableName2 like tmp_table") + assert(!sql( + s""" + |FROM tmp_table + |insert into $multiInsertTableName1 select * limit 2 + |insert into $multiInsertTableName2 select * + |""".stripMargin) + .queryExecution.optimizedPlan.isInstanceOf[GlobalLimit]) + } + } + } + + test("test watchdog: Distribute by for forceMaxOutputRows") { + + withSQLConf(KyuubiSQLConf.WATCHDOG_FORCED_MAXOUTPUTROWS.key -> "10") { + withTable("tmp_table") { + spark.sql(s"create table tmp_table (a int, b int)") + spark.sql(s"insert into tmp_table values (1,10),(2,20),(3,30),(4,40),(5,50)") + assert(sql( + s""" + |SELECT * + |FROM tmp_table + |DISTRIBUTE BY a + |""".stripMargin) + .queryExecution.optimizedPlan.isInstanceOf[GlobalLimit]) + } + } + } + + test("test watchdog: Subquery for forceMaxOutputRows") { + withSQLConf(KyuubiSQLConf.WATCHDOG_FORCED_MAXOUTPUTROWS.key -> "1") { + withTable("tmp_table1") { + sql("CREATE TABLE spark_catalog.`default`.tmp_table1(KEY INT, VALUE STRING) USING PARQUET") + sql("INSERT INTO TABLE spark_catalog.`default`.tmp_table1 " + + "VALUES (1, 'aa'),(2,'bb'),(3, 'cc'),(4,'aa'),(5,'cc'),(6, 'aa')") + assert( + sql("select * from tmp_table1").queryExecution.optimizedPlan.isInstanceOf[GlobalLimit]) + val testSqlText = + """ + |select count(*) + |from tmp_table1 + |where tmp_table1.key in ( + |select distinct tmp_table1.key + |from tmp_table1 + |where tmp_table1.value = "aa" + |) + |""".stripMargin + val plan = sql(testSqlText).queryExecution.optimizedPlan + assert(!findGlobalLimit(plan)) + checkAnswer(sql(testSqlText), Row(3) :: Nil) + } + + def findGlobalLimit(plan: LogicalPlan): Boolean = plan match { + case _: GlobalLimit => true + case p if p.children.isEmpty => false + case p => p.children.exists(findGlobalLimit) + } + + } + } + + test("test watchdog: Join for forceMaxOutputRows") { + withSQLConf(KyuubiSQLConf.WATCHDOG_FORCED_MAXOUTPUTROWS.key -> "1") { + withTable("tmp_table1", "tmp_table2") { + sql("CREATE TABLE spark_catalog.`default`.tmp_table1(KEY INT, VALUE STRING) USING PARQUET") + sql("INSERT INTO TABLE spark_catalog.`default`.tmp_table1 " + + "VALUES (1, 'aa'),(2,'bb'),(3, 'cc'),(4,'aa'),(5,'cc'),(6, 'aa')") + sql("CREATE TABLE spark_catalog.`default`.tmp_table2(KEY INT, VALUE STRING) USING PARQUET") + sql("INSERT INTO TABLE spark_catalog.`default`.tmp_table2 " + + "VALUES (1, 'aa'),(2,'bb'),(3, 'cc'),(4,'aa'),(5,'cc'),(6, 'aa')") + val testSqlText = + """ + |select a.*,b.* + |from tmp_table1 a + |join + |tmp_table2 b + |on a.KEY = b.KEY + |""".stripMargin + val plan = sql(testSqlText).queryExecution.optimizedPlan + assert(findGlobalLimit(plan)) + } + + def findGlobalLimit(plan: LogicalPlan): Boolean = plan match { + case _: GlobalLimit => true + case p if p.children.isEmpty => false + case p => p.children.exists(findGlobalLimit) + } + } + } + + private def checkMaxFileSize(tableSize: Long, nonPartTableSize: Long): Unit = { + withSQLConf(KyuubiSQLConf.WATCHDOG_MAX_FILE_SIZE.key -> tableSize.toString) { + checkAnswer(sql("SELECT count(distinct(p)) FROM test"), Row(10) :: Nil) + } + + withSQLConf(KyuubiSQLConf.WATCHDOG_MAX_FILE_SIZE.key -> (tableSize / 2).toString) { + sql("SELECT * FROM test where p=1").queryExecution.sparkPlan + + sql(s"SELECT * FROM test WHERE p in (${Range(0, 3).toList.mkString(",")})") + .queryExecution.sparkPlan + + intercept[MaxFileSizeExceedException]( + sql("SELECT * FROM test where p != 1").queryExecution.sparkPlan) + + intercept[MaxFileSizeExceedException]( + sql("SELECT * FROM test").queryExecution.sparkPlan) + + intercept[MaxFileSizeExceedException](sql( + s"SELECT * FROM test WHERE p in (${Range(0, 6).toList.mkString(",")})") + .queryExecution.sparkPlan) + } + + withSQLConf(KyuubiSQLConf.WATCHDOG_MAX_FILE_SIZE.key -> nonPartTableSize.toString) { + checkAnswer(sql("SELECT count(*) FROM test_non_part"), Row(10000) :: Nil) + } + + withSQLConf(KyuubiSQLConf.WATCHDOG_MAX_FILE_SIZE.key -> (nonPartTableSize - 1).toString) { + intercept[MaxFileSizeExceedException]( + sql("SELECT * FROM test_non_part").queryExecution.sparkPlan) + } + } + + test("watchdog with scan maxFileSize -- hive") { + Seq(false).foreach { convertMetastoreParquet => + withTable("test", "test_non_part", "temp") { + spark.range(10000).selectExpr("id as col") + .createOrReplaceTempView("temp") + + // partitioned table + sql( + s""" + |CREATE TABLE test(i int) + |PARTITIONED BY (p int) + |STORED AS parquet""".stripMargin) + for (part <- Range(0, 10)) { + sql( + s""" + |INSERT OVERWRITE TABLE test PARTITION (p='$part') + |select col from temp""".stripMargin) + } + + val tablePath = new File(spark.sessionState.catalog.externalCatalog + .getTable("default", "test").location) + val tableSize = FileUtils.listFiles(tablePath, Array("parquet"), true).asScala + .map(_.length()).sum + assert(tableSize > 0) + + // non-partitioned table + sql( + s""" + |CREATE TABLE test_non_part(i int) + |STORED AS parquet""".stripMargin) + sql( + s""" + |INSERT OVERWRITE TABLE test_non_part + |select col from temp""".stripMargin) + sql("ANALYZE TABLE test_non_part COMPUTE STATISTICS") + + val nonPartTablePath = new File(spark.sessionState.catalog.externalCatalog + .getTable("default", "test_non_part").location) + val nonPartTableSize = FileUtils.listFiles(nonPartTablePath, Array("parquet"), true).asScala + .map(_.length()).sum + assert(nonPartTableSize > 0) + + // check + withSQLConf("spark.sql.hive.convertMetastoreParquet" -> convertMetastoreParquet.toString) { + checkMaxFileSize(tableSize, nonPartTableSize) + } + } + } + } + + test("watchdog with scan maxFileSize -- data source") { + withTempDir { dir => + withTempView("test", "test_non_part") { + // partitioned table + val tablePath = new File(dir, "test") + spark.range(10).selectExpr("id", "id as p") + .write + .partitionBy("p") + .mode("overwrite") + .parquet(tablePath.getCanonicalPath) + spark.read.load(tablePath.getCanonicalPath).createOrReplaceTempView("test") + + val tableSize = FileUtils.listFiles(tablePath, Array("parquet"), true).asScala + .map(_.length()).sum + assert(tableSize > 0) + + // non-partitioned table + val nonPartTablePath = new File(dir, "test_non_part") + spark.range(10000).selectExpr("id", "id as p") + .write + .mode("overwrite") + .parquet(nonPartTablePath.getCanonicalPath) + spark.read.load(nonPartTablePath.getCanonicalPath).createOrReplaceTempView("test_non_part") + + val nonPartTableSize = FileUtils.listFiles(nonPartTablePath, Array("parquet"), true).asScala + .map(_.length()).sum + assert(tableSize > 0) + + // check + checkMaxFileSize(tableSize, nonPartTableSize) + } + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/ZorderCoreBenchmark.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/ZorderCoreBenchmark.scala new file mode 100644 index 00000000000..9b1614fce31 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/ZorderCoreBenchmark.scala @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.SparkConf +import org.apache.spark.benchmark.Benchmark +import org.apache.spark.sql.benchmark.KyuubiBenchmarkBase +import org.apache.spark.sql.internal.StaticSQLConf + +import org.apache.kyuubi.sql.zorder.ZorderBytesUtils + +/** + * Benchmark to measure performance with zorder core. + * + * {{{ + * RUN_BENCHMARK=1 ./build/mvn clean test \ + * -pl extensions/spark/kyuubi-extension-spark-3-1 -am \ + * -Pspark-3.1,kyuubi-extension-spark-3-1 \ + * -Dtest=none -DwildcardSuites=org.apache.spark.sql.ZorderCoreBenchmark + * }}} + */ +class ZorderCoreBenchmark extends KyuubiSparkSQLExtensionTest with KyuubiBenchmarkBase { + private val runBenchmark = sys.env.contains("RUN_BENCHMARK") + private val numRows = 1 * 1000 * 1000 + + private def randomInt(numColumns: Int): Seq[Array[Any]] = { + (1 to numRows).map { l => + val arr = new Array[Any](numColumns) + (0 until numColumns).foreach(col => arr(col) = l) + arr + } + } + + private def randomLong(numColumns: Int): Seq[Array[Any]] = { + (1 to numRows).map { l => + val arr = new Array[Any](numColumns) + (0 until numColumns).foreach(col => arr(col) = l.toLong) + arr + } + } + + private def interleaveMultiByteArrayBenchmark(): Unit = { + val benchmark = + new Benchmark(s"$numRows rows zorder core benchmark", numRows, output = output) + benchmark.addCase("2 int columns benchmark", 3) { _ => + randomInt(2).foreach(ZorderBytesUtils.interleaveBits) + } + + benchmark.addCase("3 int columns benchmark", 3) { _ => + randomInt(3).foreach(ZorderBytesUtils.interleaveBits) + } + + benchmark.addCase("4 int columns benchmark", 3) { _ => + randomInt(4).foreach(ZorderBytesUtils.interleaveBits) + } + + benchmark.addCase("2 long columns benchmark", 3) { _ => + randomLong(2).foreach(ZorderBytesUtils.interleaveBits) + } + + benchmark.addCase("3 long columns benchmark", 3) { _ => + randomLong(3).foreach(ZorderBytesUtils.interleaveBits) + } + + benchmark.addCase("4 long columns benchmark", 3) { _ => + randomLong(4).foreach(ZorderBytesUtils.interleaveBits) + } + + benchmark.run() + } + + private def paddingTo8ByteBenchmark() { + val iterations = 10 * 1000 * 1000 + + val b2 = Array('a'.toByte, 'b'.toByte) + val benchmark = + new Benchmark(s"$iterations iterations paddingTo8Byte benchmark", iterations, output = output) + benchmark.addCase("2 length benchmark", 3) { _ => + (1 to iterations).foreach(_ => ZorderBytesUtils.paddingTo8Byte(b2)) + } + + val b16 = Array.tabulate(16) { i => i.toByte } + benchmark.addCase("16 length benchmark", 3) { _ => + (1 to iterations).foreach(_ => ZorderBytesUtils.paddingTo8Byte(b16)) + } + + benchmark.run() + } + + test("zorder core benchmark") { + assume(runBenchmark) + + withHeader { + interleaveMultiByteArrayBenchmark() + paddingTo8ByteBenchmark() + } + } + + override def sparkConf(): SparkConf = { + super.sparkConf().remove(StaticSQLConf.SPARK_SESSION_EXTENSIONS.key) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/ZorderSuite.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/ZorderSuite.scala new file mode 100644 index 00000000000..c2fa1619707 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/ZorderSuite.scala @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.sql.catalyst.parser.ParserInterface +import org.apache.spark.sql.catalyst.plans.logical.{RebalancePartitions, Sort} +import org.apache.spark.sql.internal.SQLConf + +import org.apache.kyuubi.sql.{KyuubiSQLConf, SparkKyuubiSparkSQLParser} +import org.apache.kyuubi.sql.zorder.Zorder + +trait ZorderSuiteSpark extends ZorderSuiteBase { + + test("Add rebalance before zorder") { + Seq("true" -> false, "false" -> true).foreach { case (useOriginalOrdering, zorder) => + withSQLConf( + KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED.key -> "false", + KyuubiSQLConf.REBALANCE_BEFORE_ZORDER.key -> "true", + KyuubiSQLConf.REBALANCE_ZORDER_COLUMNS_ENABLED.key -> "true", + KyuubiSQLConf.ZORDER_USING_ORIGINAL_ORDERING_ENABLED.key -> useOriginalOrdering) { + withTable("t") { + sql( + """ + |CREATE TABLE t (c1 int, c2 string) PARTITIONED BY (d string) + | TBLPROPERTIES ( + |'kyuubi.zorder.enabled'= 'true', + |'kyuubi.zorder.cols'= 'c1,C2') + |""".stripMargin) + val p = sql("INSERT INTO TABLE t PARTITION(d='a') SELECT * FROM VALUES(1,'a')") + .queryExecution.analyzed + assert(p.collect { + case sort: Sort + if !sort.global && + ((sort.order.exists(_.child.isInstanceOf[Zorder]) && zorder) || + (!sort.order.exists(_.child.isInstanceOf[Zorder]) && !zorder)) => sort + }.size == 1) + assert(p.collect { + case rebalance: RebalancePartitions + if rebalance.references.map(_.name).exists(_.equals("c1")) => rebalance + }.size == 1) + + val p2 = sql("INSERT INTO TABLE t PARTITION(d) SELECT * FROM VALUES(1,'a','b')") + .queryExecution.analyzed + assert(p2.collect { + case sort: Sort + if (!sort.global && Seq("c1", "c2", "d").forall(x => + sort.references.map(_.name).exists(_.equals(x)))) && + ((sort.order.exists(_.child.isInstanceOf[Zorder]) && zorder) || + (!sort.order.exists(_.child.isInstanceOf[Zorder]) && !zorder)) => sort + }.size == 1) + assert(p2.collect { + case rebalance: RebalancePartitions + if Seq("c1", "c2", "d").forall(x => + rebalance.references.map(_.name).exists(_.equals(x))) => rebalance + }.size == 1) + } + } + } + } + + test("Two phase rebalance before Z-Order") { + withSQLConf( + SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> + "org.apache.spark.sql.catalyst.optimizer.CollapseRepartition", + KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED.key -> "false", + KyuubiSQLConf.REBALANCE_BEFORE_ZORDER.key -> "true", + KyuubiSQLConf.TWO_PHASE_REBALANCE_BEFORE_ZORDER.key -> "true", + KyuubiSQLConf.REBALANCE_ZORDER_COLUMNS_ENABLED.key -> "true") { + withTable("t") { + sql( + """ + |CREATE TABLE t (c1 int) PARTITIONED BY (d string) + | TBLPROPERTIES ( + |'kyuubi.zorder.enabled'= 'true', + |'kyuubi.zorder.cols'= 'c1') + |""".stripMargin) + val p = sql("INSERT INTO TABLE t PARTITION(d) SELECT * FROM VALUES(1,'a')") + val rebalance = p.queryExecution.optimizedPlan.innerChildren + .flatMap(_.collect { case r: RebalancePartitions => r }) + assert(rebalance.size == 2) + assert(rebalance.head.partitionExpressions.flatMap(_.references.map(_.name)) + .contains("d")) + assert(rebalance.head.partitionExpressions.flatMap(_.references.map(_.name)) + .contains("c1")) + + assert(rebalance(1).partitionExpressions.flatMap(_.references.map(_.name)) + .contains("d")) + assert(!rebalance(1).partitionExpressions.flatMap(_.references.map(_.name)) + .contains("c1")) + } + } + } +} + +trait ParserSuite { self: ZorderSuiteBase => + override def createParser: ParserInterface = { + new SparkKyuubiSparkSQLParser(spark.sessionState.sqlParser) + } +} + +class ZorderWithCodegenEnabledSuite + extends ZorderWithCodegenEnabledSuiteBase + with ZorderSuiteSpark + with ParserSuite {} +class ZorderWithCodegenDisabledSuite + extends ZorderWithCodegenDisabledSuiteBase + with ZorderSuiteSpark + with ParserSuite {} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/ZorderSuiteBase.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/ZorderSuiteBase.scala new file mode 100644 index 00000000000..2d3eec95722 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/ZorderSuiteBase.scala @@ -0,0 +1,768 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.SparkConf +import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier} +import org.apache.spark.sql.catalyst.analysis.{UnresolvedAttribute, UnresolvedFunction, UnresolvedRelation, UnresolvedStar} +import org.apache.spark.sql.catalyst.expressions.{Alias, Ascending, AttributeReference, EqualTo, Expression, ExpressionEvalHelper, Literal, NullsLast, SortOrder} +import org.apache.spark.sql.catalyst.parser.{ParseException, ParserInterface} +import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, OneRowRelation, Project, Sort} +import org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand +import org.apache.spark.sql.functions._ +import org.apache.spark.sql.hive.execution.InsertIntoHiveTable +import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf} +import org.apache.spark.sql.types._ + +import org.apache.kyuubi.sql.{KyuubiSQLConf, KyuubiSQLExtensionException} +import org.apache.kyuubi.sql.zorder.{OptimizeZorderCommandBase, OptimizeZorderStatement, Zorder, ZorderBytesUtils} + +trait ZorderSuiteBase extends KyuubiSparkSQLExtensionTest with ExpressionEvalHelper { + override def sparkConf(): SparkConf = { + super.sparkConf() + .set( + StaticSQLConf.SPARK_SESSION_EXTENSIONS.key, + "org.apache.kyuubi.sql.KyuubiSparkSQLCommonExtension") + } + + test("optimize unpartitioned table") { + withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") { + withTable("up") { + sql(s"DROP TABLE IF EXISTS up") + + val target = Seq( + Seq(0, 0), + Seq(1, 0), + Seq(0, 1), + Seq(1, 1), + Seq(2, 0), + Seq(3, 0), + Seq(2, 1), + Seq(3, 1), + Seq(0, 2), + Seq(1, 2), + Seq(0, 3), + Seq(1, 3), + Seq(2, 2), + Seq(3, 2), + Seq(2, 3), + Seq(3, 3)) + sql(s"CREATE TABLE up (c1 INT, c2 INT, c3 INT)") + sql(s"INSERT INTO TABLE up VALUES" + + "(0,0,2),(0,1,2),(0,2,1),(0,3,3)," + + "(1,0,4),(1,1,2),(1,2,1),(1,3,3)," + + "(2,0,2),(2,1,1),(2,2,5),(2,3,5)," + + "(3,0,3),(3,1,4),(3,2,9),(3,3,0)") + + val e = intercept[KyuubiSQLExtensionException] { + sql("OPTIMIZE up WHERE c1 > 1 ZORDER BY c1, c2") + } + assert(e.getMessage == "Filters are only supported for partitioned table") + + sql("OPTIMIZE up ZORDER BY c1, c2") + val res = sql("SELECT c1, c2 FROM up").collect() + + assert(res.length == 16) + + for (i <- target.indices) { + val t = target(i) + val r = res(i) + assert(t(0) == r.getInt(0)) + assert(t(1) == r.getInt(1)) + } + } + } + } + + test("optimize partitioned table") { + withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") { + withTable("p") { + sql("DROP TABLE IF EXISTS p") + + val target = Seq( + Seq(0, 0), + Seq(1, 0), + Seq(0, 1), + Seq(1, 1), + Seq(2, 0), + Seq(3, 0), + Seq(2, 1), + Seq(3, 1), + Seq(0, 2), + Seq(1, 2), + Seq(0, 3), + Seq(1, 3), + Seq(2, 2), + Seq(3, 2), + Seq(2, 3), + Seq(3, 3)) + + sql(s"CREATE TABLE p (c1 INT, c2 INT, c3 INT) PARTITIONED BY (id INT)") + sql(s"ALTER TABLE p ADD PARTITION (id = 1)") + sql(s"ALTER TABLE p ADD PARTITION (id = 2)") + sql(s"INSERT INTO TABLE p PARTITION (id = 1) VALUES" + + "(0,0,2),(0,1,2),(0,2,1),(0,3,3)," + + "(1,0,4),(1,1,2),(1,2,1),(1,3,3)," + + "(2,0,2),(2,1,1),(2,2,5),(2,3,5)," + + "(3,0,3),(3,1,4),(3,2,9),(3,3,0)") + sql(s"INSERT INTO TABLE p PARTITION (id = 2) VALUES" + + "(0,0,2),(0,1,2),(0,2,1),(0,3,3)," + + "(1,0,4),(1,1,2),(1,2,1),(1,3,3)," + + "(2,0,2),(2,1,1),(2,2,5),(2,3,5)," + + "(3,0,3),(3,1,4),(3,2,9),(3,3,0)") + + sql(s"OPTIMIZE p ZORDER BY c1, c2") + + val res1 = sql(s"SELECT c1, c2 FROM p WHERE id = 1").collect() + val res2 = sql(s"SELECT c1, c2 FROM p WHERE id = 2").collect() + + assert(res1.length == 16) + assert(res2.length == 16) + + for (i <- target.indices) { + val t = target(i) + val r1 = res1(i) + assert(t(0) == r1.getInt(0)) + assert(t(1) == r1.getInt(1)) + + val r2 = res2(i) + assert(t(0) == r2.getInt(0)) + assert(t(1) == r2.getInt(1)) + } + } + } + } + + test("optimize partitioned table with filters") { + withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") { + withTable("p") { + sql("DROP TABLE IF EXISTS p") + + val target1 = Seq( + Seq(0, 0), + Seq(1, 0), + Seq(0, 1), + Seq(1, 1), + Seq(2, 0), + Seq(3, 0), + Seq(2, 1), + Seq(3, 1), + Seq(0, 2), + Seq(1, 2), + Seq(0, 3), + Seq(1, 3), + Seq(2, 2), + Seq(3, 2), + Seq(2, 3), + Seq(3, 3)) + val target2 = Seq( + Seq(0, 0), + Seq(0, 1), + Seq(0, 2), + Seq(0, 3), + Seq(1, 0), + Seq(1, 1), + Seq(1, 2), + Seq(1, 3), + Seq(2, 0), + Seq(2, 1), + Seq(2, 2), + Seq(2, 3), + Seq(3, 0), + Seq(3, 1), + Seq(3, 2), + Seq(3, 3)) + sql(s"CREATE TABLE p (c1 INT, c2 INT, c3 INT) PARTITIONED BY (id INT)") + sql(s"ALTER TABLE p ADD PARTITION (id = 1)") + sql(s"ALTER TABLE p ADD PARTITION (id = 2)") + sql(s"INSERT INTO TABLE p PARTITION (id = 1) VALUES" + + "(0,0,2),(0,1,2),(0,2,1),(0,3,3)," + + "(1,0,4),(1,1,2),(1,2,1),(1,3,3)," + + "(2,0,2),(2,1,1),(2,2,5),(2,3,5)," + + "(3,0,3),(3,1,4),(3,2,9),(3,3,0)") + sql(s"INSERT INTO TABLE p PARTITION (id = 2) VALUES" + + "(0,0,2),(0,1,2),(0,2,1),(0,3,3)," + + "(1,0,4),(1,1,2),(1,2,1),(1,3,3)," + + "(2,0,2),(2,1,1),(2,2,5),(2,3,5)," + + "(3,0,3),(3,1,4),(3,2,9),(3,3,0)") + + val e = intercept[KyuubiSQLExtensionException]( + sql(s"OPTIMIZE p WHERE id = 1 AND c1 > 1 ZORDER BY c1, c2")) + assert(e.getMessage == "Only partition column filters are allowed") + + sql(s"OPTIMIZE p WHERE id = 1 ZORDER BY c1, c2") + + val res1 = sql(s"SELECT c1, c2 FROM p WHERE id = 1").collect() + val res2 = sql(s"SELECT c1, c2 FROM p WHERE id = 2").collect() + + assert(res1.length == 16) + assert(res2.length == 16) + + for (i <- target1.indices) { + val t1 = target1(i) + val r1 = res1(i) + assert(t1(0) == r1.getInt(0)) + assert(t1(1) == r1.getInt(1)) + + val t2 = target2(i) + val r2 = res2(i) + assert(t2(0) == r2.getInt(0)) + assert(t2(1) == r2.getInt(1)) + } + } + } + } + + test("optimize zorder with datasource table") { + // TODO remove this if we support datasource table + withTable("t") { + sql("CREATE TABLE t (c1 int, c2 int) USING PARQUET") + val msg = intercept[KyuubiSQLExtensionException] { + sql("OPTIMIZE t ZORDER BY c1, c2") + }.getMessage + assert(msg.contains("only support hive table")) + } + } + + private def checkZorderTable( + enabled: Boolean, + cols: String, + planHasRepartition: Boolean, + resHasSort: Boolean): Unit = { + def checkSort(plan: LogicalPlan): Unit = { + assert(plan.isInstanceOf[Sort] === resHasSort) + plan match { + case sort: Sort => + val colArr = cols.split(",") + val refs = + if (colArr.length == 1) { + sort.order.head + .child.asInstanceOf[AttributeReference] :: Nil + } else { + sort.order.head + .child.asInstanceOf[Zorder].children.map(_.references.head) + } + assert(refs.size === colArr.size) + refs.zip(colArr).foreach { case (ref, col) => + assert(ref.name === col.trim) + } + case _ => + } + } + + val repartition = + if (planHasRepartition) { + "/*+ repartition */" + } else { + "" + } + withSQLConf("spark.sql.shuffle.partitions" -> "1") { + // hive + withSQLConf("spark.sql.hive.convertMetastoreParquet" -> "false") { + withTable("zorder_t1", "zorder_t2_true", "zorder_t2_false") { + sql( + s""" + |CREATE TABLE zorder_t1 (c1 int, c2 string, c3 long, c4 double) STORED AS PARQUET + |TBLPROPERTIES ( + | 'kyuubi.zorder.enabled' = '$enabled', + | 'kyuubi.zorder.cols' = '$cols') + |""".stripMargin) + val df1 = sql(s""" + |INSERT INTO TABLE zorder_t1 + |SELECT $repartition * FROM VALUES(1,'a',2,4D),(2,'b',3,6D) + |""".stripMargin) + assert(df1.queryExecution.analyzed.isInstanceOf[InsertIntoHiveTable]) + checkSort(df1.queryExecution.analyzed.children.head) + + Seq("true", "false").foreach { optimized => + withSQLConf( + "spark.sql.hive.convertMetastoreCtas" -> optimized, + "spark.sql.hive.convertMetastoreParquet" -> optimized) { + + withListener( + s""" + |CREATE TABLE zorder_t2_$optimized STORED AS PARQUET + |TBLPROPERTIES ( + | 'kyuubi.zorder.enabled' = '$enabled', + | 'kyuubi.zorder.cols' = '$cols') + | + |SELECT $repartition * FROM + |VALUES(1,'a',2,4D),(2,'b',3,6D) AS t(c1 ,c2 , c3, c4) + |""".stripMargin) { write => + if (optimized.toBoolean) { + assert(write.isInstanceOf[InsertIntoHadoopFsRelationCommand]) + } else { + assert(write.isInstanceOf[InsertIntoHiveTable]) + } + checkSort(write.query) + } + } + } + } + } + + // datasource + withTable("zorder_t3", "zorder_t4") { + sql( + s""" + |CREATE TABLE zorder_t3 (c1 int, c2 string, c3 long, c4 double) USING PARQUET + |TBLPROPERTIES ( + | 'kyuubi.zorder.enabled' = '$enabled', + | 'kyuubi.zorder.cols' = '$cols') + |""".stripMargin) + val df1 = sql(s""" + |INSERT INTO TABLE zorder_t3 + |SELECT $repartition * FROM VALUES(1,'a',2,4D),(2,'b',3,6D) + |""".stripMargin) + assert(df1.queryExecution.analyzed.isInstanceOf[InsertIntoHadoopFsRelationCommand]) + checkSort(df1.queryExecution.analyzed.children.head) + + withListener( + s""" + |CREATE TABLE zorder_t4 USING PARQUET + |TBLPROPERTIES ( + | 'kyuubi.zorder.enabled' = '$enabled', + | 'kyuubi.zorder.cols' = '$cols') + | + |SELECT $repartition * FROM + |VALUES(1,'a',2,4D),(2,'b',3,6D) AS t(c1 ,c2 , c3, c4) + |""".stripMargin) { write => + assert(write.isInstanceOf[InsertIntoHadoopFsRelationCommand]) + checkSort(write.query) + } + } + } + } + + test("Support insert zorder by table properties") { + withSQLConf(KyuubiSQLConf.INSERT_ZORDER_BEFORE_WRITING.key -> "false") { + checkZorderTable(true, "c1", false, false) + checkZorderTable(false, "c1", false, false) + } + withSQLConf(KyuubiSQLConf.INSERT_ZORDER_BEFORE_WRITING.key -> "true") { + checkZorderTable(true, "", false, false) + checkZorderTable(true, "c5", false, false) + checkZorderTable(true, "c1,c5", false, false) + checkZorderTable(false, "c3", false, false) + checkZorderTable(true, "c3", true, false) + checkZorderTable(true, "c3", false, true) + checkZorderTable(true, "c2,c4", false, true) + checkZorderTable(true, "c4, c2, c1, c3", false, true) + } + } + + test("zorder: check unsupported data type") { + def checkZorderPlan(zorder: Expression): Unit = { + val msg = intercept[AnalysisException] { + val plan = Project(Seq(Alias(zorder, "c")()), OneRowRelation()) + spark.sessionState.analyzer.checkAnalysis(plan) + }.getMessage + // before Spark 3.2.0 the null type catalog string is null, after Spark 3.2.0 it's void + // see https://github.com/apache/spark/pull/33437 + assert(msg.contains("Unsupported z-order type:") && + (msg.contains("null") || msg.contains("void"))) + } + + checkZorderPlan(Zorder(Seq(Literal(null, NullType)))) + checkZorderPlan(Zorder(Seq(Literal(1, IntegerType), Literal(null, NullType)))) + } + + test("zorder: check supported data type") { + val children = Seq( + Literal.create(false, BooleanType), + Literal.create(null, BooleanType), + Literal.create(1.toByte, ByteType), + Literal.create(null, ByteType), + Literal.create(1.toShort, ShortType), + Literal.create(null, ShortType), + Literal.create(1, IntegerType), + Literal.create(null, IntegerType), + Literal.create(1L, LongType), + Literal.create(null, LongType), + Literal.create(1f, FloatType), + Literal.create(null, FloatType), + Literal.create(1d, DoubleType), + Literal.create(null, DoubleType), + Literal.create("1", StringType), + Literal.create(null, StringType), + Literal.create(1L, TimestampType), + Literal.create(null, TimestampType), + Literal.create(1, DateType), + Literal.create(null, DateType), + Literal.create(BigDecimal(1, 1), DecimalType(1, 1)), + Literal.create(null, DecimalType(1, 1))) + val zorder = Zorder(children) + val plan = Project(Seq(Alias(zorder, "c")()), OneRowRelation()) + spark.sessionState.analyzer.checkAnalysis(plan) + assert(zorder.foldable) + +// // scalastyle:off +// val resultGen = org.apache.commons.codec.binary.Hex.encodeHex( +// zorder.eval(InternalRow.fromSeq(children)).asInstanceOf[Array[Byte]], false) +// resultGen.grouped(2).zipWithIndex.foreach { case (char, i) => +// print("0x" + char(0) + char(1) + ", ") +// if ((i + 1) % 10 == 0) { +// println() +// } +// } +// // scalastyle:on + + val expected = Array( + 0xFB, 0xEA, 0xAA, 0xBA, 0xAE, 0xAB, 0xAA, 0xEA, 0xBA, 0xAE, 0xAB, 0xAA, 0xEA, 0xBA, 0xA6, + 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, + 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xBA, 0xBB, 0xAA, 0xAA, 0xAA, + 0xBA, 0xAA, 0xBA, 0xAA, 0xBA, 0xAA, 0xBA, 0xAA, 0xBA, 0xAA, 0xBA, 0xAA, 0x9A, 0xAA, 0xAA, + 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xEA, + 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, + 0xAA, 0xAA, 0xBE, 0xAA, 0xAA, 0x8A, 0xBA, 0xAA, 0x2A, 0xEA, 0xA8, 0xAA, 0xAA, 0xA2, 0xAA, + 0xAA, 0x8A, 0xAA, 0xAA, 0x2F, 0xEB, 0xFE) + .map(_.toByte) + checkEvaluation(zorder, expected, InternalRow.fromSeq(children)) + } + + private def checkSort(input: DataFrame, expected: Seq[Row], dataType: Array[DataType]): Unit = { + withTempDir { dir => + input.repartition(3).write.mode("overwrite").format("parquet").save(dir.getCanonicalPath) + val df = spark.read.format("parquet") + .load(dir.getCanonicalPath) + .repartition(1) + assert(df.schema.fields.map(_.dataType).sameElements(dataType)) + val exprs = Seq("c1", "c2").map(col).map(_.expr) + val sortOrder = SortOrder(Zorder(exprs), Ascending, NullsLast, Seq.empty) + val zorderSort = Sort(Seq(sortOrder), true, df.logicalPlan) + val result = Dataset.ofRows(spark, zorderSort) + checkAnswer(result, expected) + } + } + + test("sort with zorder -- boolean column") { + val schema = StructType(StructField("c1", BooleanType) :: StructField("c2", BooleanType) :: Nil) + val nonNullDF = spark.createDataFrame( + spark.sparkContext.parallelize( + Seq(Row(false, false), Row(false, true), Row(true, false), Row(true, true))), + schema) + val expected = + Row(false, false) :: Row(true, false) :: Row(false, true) :: Row(true, true) :: Nil + checkSort(nonNullDF, expected, Array(BooleanType, BooleanType)) + val df = spark.createDataFrame( + spark.sparkContext.parallelize( + Seq(Row(false, false), Row(false, null), Row(null, false), Row(null, null))), + schema) + val expected2 = + Row(false, false) :: Row(null, false) :: Row(false, null) :: Row(null, null) :: Nil + checkSort(df, expected2, Array(BooleanType, BooleanType)) + } + + test("sort with zorder -- int column") { + // TODO: add more datatype unit test + val session = spark + import session.implicits._ + // generate 4 * 4 matrix + val len = 3 + val input = spark.range(len + 1).selectExpr("cast(id as int) as c1") + .select($"c1", explode(sequence(lit(0), lit(len))) as "c2") + val expected = + Row(0, 0) :: Row(1, 0) :: Row(0, 1) :: Row(1, 1) :: + Row(2, 0) :: Row(3, 0) :: Row(2, 1) :: Row(3, 1) :: + Row(0, 2) :: Row(1, 2) :: Row(0, 3) :: Row(1, 3) :: + Row(2, 2) :: Row(3, 2) :: Row(2, 3) :: Row(3, 3) :: Nil + checkSort(input, expected, Array(IntegerType, IntegerType)) + + // contains null value case. + val nullDF = spark.range(1).selectExpr("cast(null as int) as c1") + val input2 = spark.range(len).selectExpr("cast(id as int) as c1") + .union(nullDF) + .select( + $"c1", + explode(concat(sequence(lit(0), lit(len - 1)), array(lit(null)))) as "c2") + val expected2 = Row(0, 0) :: Row(1, 0) :: Row(0, 1) :: Row(1, 1) :: + Row(2, 0) :: Row(2, 1) :: Row(0, 2) :: Row(1, 2) :: + Row(2, 2) :: Row(null, 0) :: Row(null, 1) :: Row(null, 2) :: + Row(0, null) :: Row(1, null) :: Row(2, null) :: Row(null, null) :: Nil + checkSort(input2, expected2, Array(IntegerType, IntegerType)) + } + + test("sort with zorder -- string column") { + val schema = StructType(StructField("c1", StringType) :: StructField("c2", StringType) :: Nil) + val rdd = spark.sparkContext.parallelize(Seq( + Row("a", "a"), + Row("a", "b"), + Row("a", "c"), + Row("a", "d"), + Row("b", "a"), + Row("b", "b"), + Row("b", "c"), + Row("b", "d"), + Row("c", "a"), + Row("c", "b"), + Row("c", "c"), + Row("c", "d"), + Row("d", "a"), + Row("d", "b"), + Row("d", "c"), + Row("d", "d"))) + val input = spark.createDataFrame(rdd, schema) + val expected = Row("a", "a") :: Row("b", "a") :: Row("c", "a") :: Row("a", "b") :: + Row("a", "c") :: Row("b", "b") :: Row("c", "b") :: Row("b", "c") :: + Row("c", "c") :: Row("d", "a") :: Row("d", "b") :: Row("d", "c") :: + Row("a", "d") :: Row("b", "d") :: Row("c", "d") :: Row("d", "d") :: Nil + checkSort(input, expected, Array(StringType, StringType)) + + val rdd2 = spark.sparkContext.parallelize(Seq( + Row(null, "a"), + Row("a", "b"), + Row("a", "c"), + Row("a", null), + Row("b", "a"), + Row(null, "b"), + Row("b", null), + Row("b", "d"), + Row("c", "a"), + Row("c", null), + Row(null, "c"), + Row("c", "d"), + Row("d", null), + Row("d", "b"), + Row("d", "c"), + Row(null, "d"), + Row(null, null))) + val input2 = spark.createDataFrame(rdd2, schema) + val expected2 = Row("b", "a") :: Row("c", "a") :: Row("a", "b") :: Row("a", "c") :: + Row("d", "b") :: Row("d", "c") :: Row("b", "d") :: Row("c", "d") :: + Row(null, "a") :: Row(null, "b") :: Row(null, "c") :: Row(null, "d") :: + Row("a", null) :: Row("b", null) :: Row("c", null) :: Row("d", null) :: + Row(null, null) :: Nil + checkSort(input2, expected2, Array(StringType, StringType)) + } + + test("test special value of short int long type") { + val df1 = spark.createDataFrame(Seq( + (-1, -1L), + (Int.MinValue, Int.MinValue.toLong), + (1, 1L), + (Int.MaxValue - 1, Int.MaxValue.toLong), + (Int.MaxValue - 1, Int.MaxValue.toLong - 1), + (Int.MaxValue, Int.MaxValue.toLong + 1), + (Int.MaxValue, Int.MaxValue.toLong))).toDF("c1", "c2") + val expected1 = + Row(Int.MinValue, Int.MinValue.toLong) :: + Row(-1, -1L) :: + Row(1, 1L) :: + Row(Int.MaxValue - 1, Int.MaxValue.toLong - 1) :: + Row(Int.MaxValue - 1, Int.MaxValue.toLong) :: + Row(Int.MaxValue, Int.MaxValue.toLong) :: + Row(Int.MaxValue, Int.MaxValue.toLong + 1) :: Nil + checkSort(df1, expected1, Array(IntegerType, LongType)) + + val df2 = spark.createDataFrame(Seq( + (-1, -1.toShort), + (Short.MinValue.toInt, Short.MinValue), + (1, 1.toShort), + (Short.MaxValue.toInt, (Short.MaxValue - 1).toShort), + (Short.MaxValue.toInt + 1, (Short.MaxValue - 1).toShort), + (Short.MaxValue.toInt, Short.MaxValue), + (Short.MaxValue.toInt + 1, Short.MaxValue))).toDF("c1", "c2") + val expected2 = + Row(Short.MinValue.toInt, Short.MinValue) :: + Row(-1, -1.toShort) :: + Row(1, 1.toShort) :: + Row(Short.MaxValue.toInt, Short.MaxValue - 1) :: + Row(Short.MaxValue.toInt, Short.MaxValue) :: + Row(Short.MaxValue.toInt + 1, Short.MaxValue - 1) :: + Row(Short.MaxValue.toInt + 1, Short.MaxValue) :: Nil + checkSort(df2, expected2, Array(IntegerType, ShortType)) + + val df3 = spark.createDataFrame(Seq( + (-1L, -1.toShort), + (Short.MinValue.toLong, Short.MinValue), + (1L, 1.toShort), + (Short.MaxValue.toLong, (Short.MaxValue - 1).toShort), + (Short.MaxValue.toLong + 1, (Short.MaxValue - 1).toShort), + (Short.MaxValue.toLong, Short.MaxValue), + (Short.MaxValue.toLong + 1, Short.MaxValue))).toDF("c1", "c2") + val expected3 = + Row(Short.MinValue.toLong, Short.MinValue) :: + Row(-1L, -1.toShort) :: + Row(1L, 1.toShort) :: + Row(Short.MaxValue.toLong, Short.MaxValue - 1) :: + Row(Short.MaxValue.toLong, Short.MaxValue) :: + Row(Short.MaxValue.toLong + 1, Short.MaxValue - 1) :: + Row(Short.MaxValue.toLong + 1, Short.MaxValue) :: Nil + checkSort(df3, expected3, Array(LongType, ShortType)) + } + + test("skip zorder if only requires one column") { + withTable("t") { + withSQLConf("spark.sql.hive.convertMetastoreParquet" -> "false") { + sql("CREATE TABLE t (c1 int, c2 string) stored as parquet") + val order1 = sql("OPTIMIZE t ZORDER BY c1").queryExecution.analyzed + .asInstanceOf[OptimizeZorderCommandBase].query.asInstanceOf[Sort].order.head.child + assert(!order1.isInstanceOf[Zorder]) + assert(order1.isInstanceOf[AttributeReference]) + } + } + } + + test("Add config to control if zorder using global sort") { + withTable("t") { + withSQLConf(KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED.key -> "false") { + sql( + """ + |CREATE TABLE t (c1 int, c2 string) TBLPROPERTIES ( + |'kyuubi.zorder.enabled'= 'true', + |'kyuubi.zorder.cols'= 'c1,c2') + |""".stripMargin) + val p1 = sql("OPTIMIZE t ZORDER BY c1, c2").queryExecution.analyzed + assert(p1.collect { + case shuffle: Sort if !shuffle.global => shuffle + }.size == 1) + + val p2 = sql("INSERT INTO TABLE t SELECT * FROM VALUES(1,'a')").queryExecution.analyzed + assert(p2.collect { + case shuffle: Sort if !shuffle.global => shuffle + }.size == 1) + } + } + } + + test("fast approach test") { + Seq[Seq[Any]]( + Seq(1L, 2L), + Seq(1L, 2L, 3L), + Seq(1L, 2L, 3L, 4L), + Seq(1L, 2L, 3L, 4L, 5L), + Seq(1L, 2L, 3L, 4L, 5L, 6L), + Seq(1L, 2L, 3L, 4L, 5L, 6L, 7L), + Seq(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L)) + .foreach { inputs => + assert(java.util.Arrays.equals( + ZorderBytesUtils.interleaveBits(inputs.toArray), + ZorderBytesUtils.interleaveBitsDefault(inputs.map(ZorderBytesUtils.toByteArray).toArray))) + } + } + + test("OPTIMIZE command is parsed as expected") { + val parser = createParser + val globalSort = spark.conf.get(KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED) + + assert(parser.parsePlan("OPTIMIZE p zorder by c1") === + OptimizeZorderStatement( + Seq("p"), + Sort( + SortOrder(UnresolvedAttribute("c1"), Ascending, NullsLast, Seq.empty) :: Nil, + globalSort, + Project(Seq(UnresolvedStar(None)), UnresolvedRelation(TableIdentifier("p")))))) + + assert(parser.parsePlan("OPTIMIZE p zorder by c1, c2") === + OptimizeZorderStatement( + Seq("p"), + Sort( + SortOrder( + Zorder(Seq(UnresolvedAttribute("c1"), UnresolvedAttribute("c2"))), + Ascending, + NullsLast, + Seq.empty) :: Nil, + globalSort, + Project(Seq(UnresolvedStar(None)), UnresolvedRelation(TableIdentifier("p")))))) + + assert(parser.parsePlan("OPTIMIZE p where id = 1 zorder by c1") === + OptimizeZorderStatement( + Seq("p"), + Sort( + SortOrder(UnresolvedAttribute("c1"), Ascending, NullsLast, Seq.empty) :: Nil, + globalSort, + Project( + Seq(UnresolvedStar(None)), + Filter( + EqualTo(UnresolvedAttribute("id"), Literal(1)), + UnresolvedRelation(TableIdentifier("p"))))))) + + assert(parser.parsePlan("OPTIMIZE p where id = 1 zorder by c1, c2") === + OptimizeZorderStatement( + Seq("p"), + Sort( + SortOrder( + Zorder(Seq(UnresolvedAttribute("c1"), UnresolvedAttribute("c2"))), + Ascending, + NullsLast, + Seq.empty) :: Nil, + globalSort, + Project( + Seq(UnresolvedStar(None)), + Filter( + EqualTo(UnresolvedAttribute("id"), Literal(1)), + UnresolvedRelation(TableIdentifier("p"))))))) + + assert(parser.parsePlan("OPTIMIZE p where id = current_date() zorder by c1") === + OptimizeZorderStatement( + Seq("p"), + Sort( + SortOrder(UnresolvedAttribute("c1"), Ascending, NullsLast, Seq.empty) :: Nil, + globalSort, + Project( + Seq(UnresolvedStar(None)), + Filter( + EqualTo( + UnresolvedAttribute("id"), + UnresolvedFunction("current_date", Seq.empty, false)), + UnresolvedRelation(TableIdentifier("p"))))))) + + // TODO: add following case support + intercept[ParseException] { + parser.parsePlan("OPTIMIZE p zorder by (c1)") + } + + intercept[ParseException] { + parser.parsePlan("OPTIMIZE p zorder by (c1, c2)") + } + } + + test("OPTIMIZE partition predicates constraint") { + withTable("p") { + sql("CREATE TABLE p (c1 INT, c2 INT) PARTITIONED BY (event_date DATE)") + val e1 = intercept[KyuubiSQLExtensionException] { + sql("OPTIMIZE p WHERE event_date = current_date as c ZORDER BY c1, c2") + } + assert(e1.getMessage.contains("unsupported partition predicates")) + + val e2 = intercept[KyuubiSQLExtensionException] { + sql("OPTIMIZE p WHERE c1 = 1 ZORDER BY c1, c2") + } + assert(e2.getMessage == "Only partition column filters are allowed") + } + } + + def createParser: ParserInterface +} + +trait ZorderWithCodegenEnabledSuiteBase extends ZorderSuiteBase { + override def sparkConf(): SparkConf = { + val conf = super.sparkConf + conf.set(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, "true") + conf + } +} + +trait ZorderWithCodegenDisabledSuiteBase extends ZorderSuiteBase { + override def sparkConf(): SparkConf = { + val conf = super.sparkConf + conf.set(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, "false") + conf.set(SQLConf.CODEGEN_FACTORY_MODE.key, "NO_CODEGEN") + conf + } +} diff --git a/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/benchmark/KyuubiBenchmarkBase.scala b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/benchmark/KyuubiBenchmarkBase.scala new file mode 100644 index 00000000000..b891a7224a0 --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-3-4/src/test/scala/org/apache/spark/sql/benchmark/KyuubiBenchmarkBase.scala @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.benchmark + +import java.io.{File, FileOutputStream, OutputStream} + +import scala.collection.JavaConverters._ + +import com.google.common.reflect.ClassPath +import org.scalatest.Assertions._ + +trait KyuubiBenchmarkBase { + var output: Option[OutputStream] = None + + private val prefix = { + val benchmarkClasses = ClassPath.from(Thread.currentThread.getContextClassLoader) + .getTopLevelClassesRecursive("org.apache.spark.sql").asScala.toArray + assert(benchmarkClasses.nonEmpty) + val benchmark = benchmarkClasses.find(_.load().getName.endsWith("Benchmark")) + val targetDirOrProjDir = + new File(benchmark.get.load().getProtectionDomain.getCodeSource.getLocation.toURI) + .getParentFile.getParentFile + if (targetDirOrProjDir.getName == "target") { + targetDirOrProjDir.getParentFile.getCanonicalPath + "/" + } else { + targetDirOrProjDir.getCanonicalPath + "/" + } + } + + def withHeader(func: => Unit): Unit = { + val version = System.getProperty("java.version").split("\\D+")(0).toInt + val jdkString = if (version > 8) s"-jdk$version" else "" + val resultFileName = + s"${this.getClass.getSimpleName.replace("$", "")}$jdkString-results.txt" + val dir = new File(s"${prefix}benchmarks/") + if (!dir.exists()) { + // scalastyle:off println + println(s"Creating ${dir.getAbsolutePath} for benchmark results.") + // scalastyle:on println + dir.mkdirs() + } + val file = new File(dir, resultFileName) + if (!file.exists()) { + file.createNewFile() + } + output = Some(new FileOutputStream(file)) + + func + + output.foreach { o => + if (o != null) { + o.close() + } + } + } +} diff --git a/extensions/spark/kyuubi-extension-spark-common/pom.xml b/extensions/spark/kyuubi-extension-spark-common/pom.xml index 6d4bd144369..259931a2e2f 100644 --- a/extensions/spark/kyuubi-extension-spark-common/pom.xml +++ b/extensions/spark/kyuubi-extension-spark-common/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../../pom.xml - kyuubi-extension-spark-common_2.12 + kyuubi-extension-spark-common_${scala.binary.version} jar Kyuubi Dev Spark Extensions Common (for Spark 3) https://kyuubi.apache.org/ @@ -110,10 +110,21 @@ jakarta.xml.bind-api test + + + org.apache.logging.log4j + log4j-1.2-api + test + + + + org.apache.logging.log4j + log4j-slf4j-impl + test + - org.antlr diff --git a/extensions/spark/kyuubi-extension-spark-common/src/main/antlr4/org/apache/kyuubi/sql/KyuubiSparkSQL.g4 b/extensions/spark/kyuubi-extension-spark-common/src/main/antlr4/org/apache/kyuubi/sql/KyuubiSparkSQL.g4 index 63e2bf84813..e52b7f5cfeb 100644 --- a/extensions/spark/kyuubi-extension-spark-common/src/main/antlr4/org/apache/kyuubi/sql/KyuubiSparkSQL.g4 +++ b/extensions/spark/kyuubi-extension-spark-common/src/main/antlr4/org/apache/kyuubi/sql/KyuubiSparkSQL.g4 @@ -55,53 +55,23 @@ statement ; whereClause - : WHERE booleanExpression + : WHERE partitionPredicate = predicateToken ; zorderClause : ZORDER BY order+=multipartIdentifier (',' order+=multipartIdentifier)* ; -booleanExpression - : query #logicalQuery - | left=booleanExpression operator=AND right=booleanExpression #logicalBinary - | left=booleanExpression operator=OR right=booleanExpression #logicalBinary - ; - -query - : '('? multipartIdentifier comparisonOperator constant ')'? - ; - -comparisonOperator - : EQ | NEQ | NEQJ | LT | LTE | GT | GTE | NSEQ - ; - -constant - : NULL #nullLiteral - | identifier STRING #typeConstructor - | number #numericLiteral - | booleanValue #booleanLiteral - | STRING+ #stringLiteral +// We don't have an expression rule in our grammar here, so we just grab the tokens and defer +// parsing them to later. +predicateToken + : .+? ; multipartIdentifier : parts+=identifier ('.' parts+=identifier)* ; -booleanValue - : TRUE | FALSE - ; - -number - : MINUS? DECIMAL_VALUE #decimalLiteral - | MINUS? INTEGER_VALUE #integerLiteral - | MINUS? BIGINT_LITERAL #bigIntLiteral - | MINUS? SMALLINT_LITERAL #smallIntLiteral - | MINUS? TINYINT_LITERAL #tinyIntLiteral - | MINUS? DOUBLE_LITERAL #doubleLiteral - | MINUS? BIGDECIMAL_LITERAL #bigDecimalLiteral - ; - identifier : strictIdentifier ; @@ -136,7 +106,6 @@ BY: 'BY'; FALSE: 'FALSE'; DATE: 'DATE'; INTERVAL: 'INTERVAL'; -NULL: 'NULL'; OPTIMIZE: 'OPTIMIZE'; OR: 'OR'; TABLE: 'TABLE'; @@ -145,22 +114,8 @@ TRUE: 'TRUE'; WHERE: 'WHERE'; ZORDER: 'ZORDER'; -EQ : '=' | '=='; -NSEQ: '<=>'; -NEQ : '<>'; -NEQJ: '!='; -LT : '<'; -LTE : '<=' | '!>'; -GT : '>'; -GTE : '>=' | '!<'; - MINUS: '-'; -STRING - : '\'' ( ~('\''|'\\') | ('\\' .) )* '\'' - | '"' ( ~('"'|'\\') | ('\\' .) )* '"' - ; - BIGINT_LITERAL : DIGIT+ 'L' ; diff --git a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiQueryStagePreparation.scala b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiQueryStagePreparation.scala index 360a2645e50..fee65b35082 100644 --- a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiQueryStagePreparation.scala +++ b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiQueryStagePreparation.scala @@ -133,7 +133,9 @@ case class FinalStageConfigIsolation(session: SparkSession) extends Rule[SparkPl reusedExchangeExec // query stage is leaf node so we need to transform it manually - case queryStage: QueryStageExec => + // compatible with Spark 3.5: + // SPARK-42101: table cache is a independent query stage, so do not need include it. + case queryStage: QueryStageExec if queryStage.nodeName != "TableCacheQueryStage" => queryStageNum += 1 collectNumber(queryStage.plan) queryStage diff --git a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLConf.scala b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLConf.scala index 0fe9f649eaa..6f45dae126e 100644 --- a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLConf.scala +++ b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLConf.scala @@ -17,6 +17,7 @@ package org.apache.kyuubi.sql +import org.apache.spark.network.util.ByteUnit import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.SQLConf._ @@ -33,7 +34,8 @@ object KyuubiSQLConf { buildConf("spark.sql.optimizer.insertRepartitionNum") .doc(s"The partition number if ${INSERT_REPARTITION_BEFORE_WRITE.key} is enabled. " + s"If AQE is disabled, the default value is ${SQLConf.SHUFFLE_PARTITIONS.key}. " + - "If AQE is enabled, the default value is none that means depend on AQE.") + "If AQE is enabled, the default value is none that means depend on AQE. " + + "This config is used for Spark 3.1 only.") .version("1.2.0") .intConf .createOptional @@ -138,13 +140,23 @@ object KyuubiSQLConf { val WATCHDOG_MAX_PARTITIONS = buildConf("spark.sql.watchdog.maxPartitions") .doc("Set the max partition number when spark scans a data source. " + - "Enable MaxPartitionStrategy by specifying this configuration. " + + "Enable maxPartitions Strategy by specifying this configuration. " + "Add maxPartitions Strategy to avoid scan excessive partitions " + "on partitioned table, it's optional that works with defined") .version("1.4.0") .intConf .createOptional + val WATCHDOG_MAX_FILE_SIZE = + buildConf("spark.sql.watchdog.maxFileSize") + .doc("Set the maximum size in bytes of files when spark scans a data source. " + + "Enable maxFileSize Strategy by specifying this configuration. " + + "Add maxFileSize Strategy to avoid scan excessive size of files," + + " it's optional that works with defined") + .version("1.8.0") + .bytesConf(ByteUnit.BYTE) + .createOptional + val WATCHDOG_FORCED_MAXOUTPUTROWS = buildConf("spark.sql.watchdog.forcedMaxOutputRows") .doc("Add ForcedMaxOutputRows rule to avoid huge output rows of non-limit query " + @@ -190,4 +202,75 @@ object KyuubiSQLConf { .version("1.7.0") .booleanConf .createWithDefault(true) + + val FINAL_WRITE_STAGE_EAGERLY_KILL_EXECUTORS_ENABLED = + buildConf("spark.sql.finalWriteStage.eagerlyKillExecutors.enabled") + .doc("When true, eagerly kill redundant executors before running final write stage.") + .version("1.8.0") + .booleanConf + .createWithDefault(false) + + val FINAL_WRITE_STAGE_EAGERLY_KILL_EXECUTORS_KILL_ALL = + buildConf("spark.sql.finalWriteStage.eagerlyKillExecutors.killAll") + .doc("When true, eagerly kill all executors before running final write stage. " + + "Mainly for test.") + .version("1.8.0") + .booleanConf + .createWithDefault(false) + + val FINAL_WRITE_STAGE_SKIP_KILLING_EXECUTORS_FOR_TABLE_CACHE = + buildConf("spark.sql.finalWriteStage.skipKillingExecutorsForTableCache") + .doc("When true, skip killing executors if the plan has table caches.") + .version("1.8.0") + .booleanConf + .createWithDefault(true) + + val FINAL_WRITE_STAGE_PARTITION_FACTOR = + buildConf("spark.sql.finalWriteStage.retainExecutorsFactor") + .doc("If the target executors * factor < active executors, and " + + "target executors * factor > min executors, then kill redundant executors.") + .version("1.8.0") + .doubleConf + .checkValue(_ >= 1, "must be bigger than or equal to 1") + .createWithDefault(1.2) + + val FINAL_WRITE_STAGE_RESOURCE_ISOLATION_ENABLED = + buildConf("spark.sql.finalWriteStage.resourceIsolation.enabled") + .doc( + "When true, make final write stage resource isolation using custom RDD resource profile.") + .version("1.8.0") + .booleanConf + .createWithDefault(false) + + val FINAL_WRITE_STAGE_EXECUTOR_CORES = + buildConf("spark.sql.finalWriteStage.executorCores") + .doc("Specify the executor core request for final write stage. " + + "It would be passed to the RDD resource profile.") + .version("1.8.0") + .intConf + .createOptional + + val FINAL_WRITE_STAGE_EXECUTOR_MEMORY = + buildConf("spark.sql.finalWriteStage.executorMemory") + .doc("Specify the executor on heap memory request for final write stage. " + + "It would be passed to the RDD resource profile.") + .version("1.8.0") + .stringConf + .createOptional + + val FINAL_WRITE_STAGE_EXECUTOR_MEMORY_OVERHEAD = + buildConf("spark.sql.finalWriteStage.executorMemoryOverhead") + .doc("Specify the executor memory overhead request for final write stage. " + + "It would be passed to the RDD resource profile.") + .version("1.8.0") + .stringConf + .createOptional + + val FINAL_WRITE_STAGE_EXECUTOR_OFF_HEAP_MEMORY = + buildConf("spark.sql.finalWriteStage.executorOffHeapMemory") + .doc("Specify the executor off heap memory request for final write stage. " + + "It would be passed to the RDD resource profile.") + .version("1.8.0") + .stringConf + .createOptional } diff --git a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLAstBuilder.scala b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLAstBuilder.scala index 9f1958b0905..cc00bf88e94 100644 --- a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLAstBuilder.scala +++ b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiSparkSQLAstBuilder.scala @@ -17,37 +17,81 @@ package org.apache.kyuubi.sql -import java.time.LocalDate -import java.util.Locale - import scala.collection.JavaConverters.asScalaBufferConverter -import scala.collection.mutable.{ArrayBuffer, ListBuffer} -import scala.util.control.NonFatal +import scala.collection.mutable.ListBuffer import org.antlr.v4.runtime.ParserRuleContext -import org.antlr.v4.runtime.tree.{ParseTree, TerminalNode} -import org.apache.commons.codec.binary.Hex -import org.apache.spark.sql.AnalysisException +import org.antlr.v4.runtime.misc.Interval +import org.antlr.v4.runtime.tree.ParseTree +import org.apache.spark.sql.catalyst.SQLConfHelper import org.apache.spark.sql.catalyst.analysis.{UnresolvedAttribute, UnresolvedRelation, UnresolvedStar} import org.apache.spark.sql.catalyst.expressions._ -import org.apache.spark.sql.catalyst.parser.ParseException -import org.apache.spark.sql.catalyst.parser.ParserUtils.{string, stringWithoutUnescape, withOrigin} +import org.apache.spark.sql.catalyst.parser.ParserUtils.withOrigin import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, Project, Sort} -import org.apache.spark.sql.catalyst.util.DateTimeUtils.{getZoneId, localDateToDays, stringToTimestamp} -import org.apache.spark.sql.catalyst.util.IntervalUtils -import org.apache.spark.sql.hive.HiveAnalysis.conf -import org.apache.spark.sql.internal.SQLConf -import org.apache.spark.sql.types._ -import org.apache.spark.unsafe.types.UTF8String import org.apache.kyuubi.sql.KyuubiSparkSQLParser._ -import org.apache.kyuubi.sql.zorder.{OptimizeZorderStatement, OptimizeZorderStatementBase, Zorder, ZorderBase} +import org.apache.kyuubi.sql.zorder.{OptimizeZorderStatement, Zorder} + +class KyuubiSparkSQLAstBuilder extends KyuubiSparkSQLBaseVisitor[AnyRef] with SQLConfHelper { + + def buildOptimizeStatement( + unparsedPredicateOptimize: UnparsedPredicateOptimize, + parseExpression: String => Expression): LogicalPlan = { -abstract class KyuubiSparkSQLAstBuilderBase extends KyuubiSparkSQLBaseVisitor[AnyRef] { - def buildZorder(child: Seq[Expression]): ZorderBase - def buildOptimizeZorderStatement( - tableIdentifier: Seq[String], - query: LogicalPlan): OptimizeZorderStatementBase + val UnparsedPredicateOptimize(tableIdent, tablePredicate, orderExpr) = + unparsedPredicateOptimize + + val predicate = tablePredicate.map(parseExpression) + verifyPartitionPredicates(predicate) + val table = UnresolvedRelation(tableIdent) + val tableWithFilter = predicate match { + case Some(expr) => Filter(expr, table) + case None => table + } + val query = + Sort( + SortOrder(orderExpr, Ascending, NullsLast, Seq.empty) :: Nil, + conf.getConf(KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED), + Project(Seq(UnresolvedStar(None)), tableWithFilter)) + OptimizeZorderStatement(tableIdent, query) + } + + private def verifyPartitionPredicates(predicates: Option[Expression]): Unit = { + predicates.foreach { + case p if !isLikelySelective(p) => + throw new KyuubiSQLExtensionException(s"unsupported partition predicates: ${p.sql}") + case _ => + } + } + + /** + * Forked from Apache Spark's org.apache.spark.sql.catalyst.expressions.PredicateHelper + * The `PredicateHelper.isLikelySelective()` is available since Spark-3.3, forked for Spark + * that is lower than 3.3. + * + * Returns whether an expression is likely to be selective + */ + private def isLikelySelective(e: Expression): Boolean = e match { + case Not(expr) => isLikelySelective(expr) + case And(l, r) => isLikelySelective(l) || isLikelySelective(r) + case Or(l, r) => isLikelySelective(l) && isLikelySelective(r) + case _: StringRegexExpression => true + case _: BinaryComparison => true + case _: In | _: InSet => true + case _: StringPredicate => true + case BinaryPredicate(_) => true + case _: MultiLikeBase => true + case _ => false + } + + private object BinaryPredicate { + def unapply(expr: Expression): Option[Expression] = expr match { + case _: Contains => Option(expr) + case _: StartsWith => Option(expr) + case _: EndsWith => Option(expr) + case _ => None + } + } /** * Create an expression from the given context. This method just passes the context on to the @@ -62,21 +106,12 @@ abstract class KyuubiSparkSQLAstBuilderBase extends KyuubiSparkSQLBaseVisitor[An } override def visitOptimizeZorder( - ctx: OptimizeZorderContext): LogicalPlan = withOrigin(ctx) { + ctx: OptimizeZorderContext): UnparsedPredicateOptimize = withOrigin(ctx) { val tableIdent = multiPart(ctx.multipartIdentifier()) - val table = UnresolvedRelation(tableIdent) - - val whereClause = - if (ctx.whereClause() == null) { - None - } else { - Option(expression(ctx.whereClause().booleanExpression())) - } - val tableWithFilter = whereClause match { - case Some(expr) => Filter(expr, table) - case None => table - } + val predicate = Option(ctx.whereClause()) + .map(_.partitionPredicate) + .map(extractRawText(_)) val zorderCols = ctx.zorderClause().order.asScala .map(visitMultipartIdentifier) @@ -87,364 +122,53 @@ abstract class KyuubiSparkSQLAstBuilderBase extends KyuubiSparkSQLBaseVisitor[An if (zorderCols.length == 1) { zorderCols.head } else { - buildZorder(zorderCols) + Zorder(zorderCols) } - val query = - Sort( - SortOrder(orderExpr, Ascending, NullsLast, Seq.empty) :: Nil, - conf.getConf(KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED), - Project(Seq(UnresolvedStar(None)), tableWithFilter)) - - buildOptimizeZorderStatement(tableIdent, query) + UnparsedPredicateOptimize(tableIdent, predicate, orderExpr) } override def visitPassThrough(ctx: PassThroughContext): LogicalPlan = null - override def visitQuery(ctx: QueryContext): Expression = withOrigin(ctx) { - val left = new UnresolvedAttribute(multiPart(ctx.multipartIdentifier())) - val right = expression(ctx.constant()) - val operator = ctx.comparisonOperator().getChild(0).asInstanceOf[TerminalNode] - operator.getSymbol.getType match { - case KyuubiSparkSQLParser.EQ => - EqualTo(left, right) - case KyuubiSparkSQLParser.NSEQ => - EqualNullSafe(left, right) - case KyuubiSparkSQLParser.NEQ | KyuubiSparkSQLParser.NEQJ => - Not(EqualTo(left, right)) - case KyuubiSparkSQLParser.LT => - LessThan(left, right) - case KyuubiSparkSQLParser.LTE => - LessThanOrEqual(left, right) - case KyuubiSparkSQLParser.GT => - GreaterThan(left, right) - case KyuubiSparkSQLParser.GTE => - GreaterThanOrEqual(left, right) - } - } - - override def visitLogicalBinary(ctx: LogicalBinaryContext): Expression = withOrigin(ctx) { - val expressionType = ctx.operator.getType - val expressionCombiner = expressionType match { - case KyuubiSparkSQLParser.AND => And.apply _ - case KyuubiSparkSQLParser.OR => Or.apply _ - } - - // Collect all similar left hand contexts. - val contexts = ArrayBuffer(ctx.right) - var current = ctx.left - def collectContexts: Boolean = current match { - case lbc: LogicalBinaryContext if lbc.operator.getType == expressionType => - contexts += lbc.right - current = lbc.left - true - case _ => - contexts += current - false - } - while (collectContexts) { - // No body - all updates take place in the collectContexts. - } - - // Reverse the contexts to have them in the same sequence as in the SQL statement & turn them - // into expressions. - val expressions = contexts.reverseMap(expression) - - // Create a balanced tree. - def reduceToExpressionTree(low: Int, high: Int): Expression = high - low match { - case 0 => - expressions(low) - case 1 => - expressionCombiner(expressions(low), expressions(high)) - case x => - val mid = low + x / 2 - expressionCombiner( - reduceToExpressionTree(low, mid), - reduceToExpressionTree(mid + 1, high)) - } - reduceToExpressionTree(0, expressions.size - 1) - } - override def visitMultipartIdentifier(ctx: MultipartIdentifierContext): Seq[String] = withOrigin(ctx) { - ctx.parts.asScala.map(_.getText) + ctx.parts.asScala.map(_.getText).toSeq } override def visitZorderClause(ctx: ZorderClauseContext): Seq[UnresolvedAttribute] = withOrigin(ctx) { val res = ListBuffer[UnresolvedAttribute]() ctx.multipartIdentifier().forEach { identifier => - res += UnresolvedAttribute(identifier.parts.asScala.map(_.getText)) + res += UnresolvedAttribute(identifier.parts.asScala.map(_.getText).toSeq) } - res - } - - /** - * Create a NULL literal expression. - */ - override def visitNullLiteral(ctx: NullLiteralContext): Literal = withOrigin(ctx) { - Literal(null) - } - - /** - * Create a Boolean literal expression. - */ - override def visitBooleanLiteral(ctx: BooleanLiteralContext): Literal = withOrigin(ctx) { - if (ctx.getText.toBoolean) { - Literal.TrueLiteral - } else { - Literal.FalseLiteral + res.toSeq } - } - - /** - * Create a typed Literal expression. A typed literal has the following SQL syntax: - * {{{ - * [TYPE] '[VALUE]' - * }}} - * Currently Date, Timestamp, Interval and Binary typed literals are supported. - */ - override def visitTypeConstructor(ctx: TypeConstructorContext): Literal = withOrigin(ctx) { - val value = string(ctx.STRING) - val valueType = ctx.identifier.getText.toUpperCase(Locale.ROOT) - - def toLiteral[T](f: UTF8String => Option[T], t: DataType): Literal = { - f(UTF8String.fromString(value)).map(Literal(_, t)).getOrElse { - throw new ParseException(s"Cannot parse the $valueType value: $value", ctx) - } - } - try { - valueType match { - case "DATE" => - toLiteral(stringToDate, DateType) - case "TIMESTAMP" => - val zoneId = getZoneId(SQLConf.get.sessionLocalTimeZone) - toLiteral(stringToTimestamp(_, zoneId), TimestampType) - case "INTERVAL" => - val interval = - try { - IntervalUtils.stringToInterval(UTF8String.fromString(value)) - } catch { - case e: IllegalArgumentException => - val ex = new ParseException("Cannot parse the INTERVAL value: " + value, ctx) - ex.setStackTrace(e.getStackTrace) - throw ex - } - Literal(interval, CalendarIntervalType) - case "X" => - val padding = if (value.length % 2 != 0) "0" else "" - - Literal(Hex.decodeHex(padding + value)) - case other => - throw new ParseException(s"Literals of type '$other' are currently not supported.", ctx) - } - } catch { - case e: IllegalArgumentException => - val message = Option(e.getMessage).getOrElse(s"Exception parsing $valueType") - throw new ParseException(message, ctx) - } - } - - /** - * Create a String literal expression. - */ - override def visitStringLiteral(ctx: StringLiteralContext): Literal = withOrigin(ctx) { - Literal(createString(ctx)) - } - - /** - * Create a decimal literal for a regular decimal number. - */ - override def visitDecimalLiteral(ctx: DecimalLiteralContext): Literal = withOrigin(ctx) { - Literal(BigDecimal(ctx.getText).underlying()) - } - - /** Create a numeric literal expression. */ - private def numericLiteral( - ctx: NumberContext, - rawStrippedQualifier: String, - minValue: BigDecimal, - maxValue: BigDecimal, - typeName: String)(converter: String => Any): Literal = withOrigin(ctx) { - try { - val rawBigDecimal = BigDecimal(rawStrippedQualifier) - if (rawBigDecimal < minValue || rawBigDecimal > maxValue) { - throw new ParseException( - s"Numeric literal ${rawStrippedQualifier} does not " + - s"fit in range [${minValue}, ${maxValue}] for type ${typeName}", - ctx) - } - Literal(converter(rawStrippedQualifier)) - } catch { - case e: NumberFormatException => - throw new ParseException(e.getMessage, ctx) - } - } - - /** - * Create a Byte Literal expression. - */ - override def visitTinyIntLiteral(ctx: TinyIntLiteralContext): Literal = { - val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1) - numericLiteral( - ctx, - rawStrippedQualifier, - Byte.MinValue, - Byte.MaxValue, - ByteType.simpleString)(_.toByte) - } - - /** - * Create an integral literal expression. The code selects the most narrow integral type - * possible, either a BigDecimal, a Long or an Integer is returned. - */ - override def visitIntegerLiteral(ctx: IntegerLiteralContext): Literal = withOrigin(ctx) { - BigDecimal(ctx.getText) match { - case v if v.isValidInt => - Literal(v.intValue) - case v if v.isValidLong => - Literal(v.longValue) - case v => Literal(v.underlying()) - } - } - - /** - * Create a Short Literal expression. - */ - override def visitSmallIntLiteral(ctx: SmallIntLiteralContext): Literal = { - val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1) - numericLiteral( - ctx, - rawStrippedQualifier, - Short.MinValue, - Short.MaxValue, - ShortType.simpleString)(_.toShort) - } - - /** - * Create a Long Literal expression. - */ - override def visitBigIntLiteral(ctx: BigIntLiteralContext): Literal = { - val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1) - numericLiteral( - ctx, - rawStrippedQualifier, - Long.MinValue, - Long.MaxValue, - LongType.simpleString)(_.toLong) - } - - /** - * Create a Double Literal expression. - */ - override def visitDoubleLiteral(ctx: DoubleLiteralContext): Literal = { - val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1) - numericLiteral( - ctx, - rawStrippedQualifier, - Double.MinValue, - Double.MaxValue, - DoubleType.simpleString)(_.toDouble) - } - - /** - * Create a BigDecimal Literal expression. - */ - override def visitBigDecimalLiteral(ctx: BigDecimalLiteralContext): Literal = { - val raw = ctx.getText.substring(0, ctx.getText.length - 2) - try { - Literal(BigDecimal(raw).underlying()) - } catch { - case e: AnalysisException => - throw new ParseException(e.message, ctx) - } - } - - /** - * Create a String from a string literal context. This supports multiple consecutive string - * literals, these are concatenated, for example this expression "'hello' 'world'" will be - * converted into "helloworld". - * - * Special characters can be escaped by using Hive/C-style escaping. - */ - private def createString(ctx: StringLiteralContext): String = { - if (conf.escapedStringLiterals) { - ctx.STRING().asScala.map(stringWithoutUnescape).mkString - } else { - ctx.STRING().asScala.map(string).mkString - } - } private def typedVisit[T](ctx: ParseTree): T = { ctx.accept(this).asInstanceOf[T] } - private def stringToDate(s: UTF8String): Option[Int] = { - def isValidDigits(segment: Int, digits: Int): Boolean = { - // An integer is able to represent a date within [+-]5 million years. - var maxDigitsYear = 7 - (segment == 0 && digits >= 4 && digits <= maxDigitsYear) || - (segment != 0 && digits > 0 && digits <= 2) - } - if (s == null || s.trimAll().numBytes() == 0) { - return None - } - val segments: Array[Int] = Array[Int](1, 1, 1) - var sign = 1 - var i = 0 - var currentSegmentValue = 0 - var currentSegmentDigits = 0 - val bytes = s.trimAll().getBytes - var j = 0 - if (bytes(j) == '-' || bytes(j) == '+') { - sign = if (bytes(j) == '-') -1 else 1 - j += 1 - } - while (j < bytes.length && (i < 3 && !(bytes(j) == ' ' || bytes(j) == 'T'))) { - val b = bytes(j) - if (i < 2 && b == '-') { - if (!isValidDigits(i, currentSegmentDigits)) { - return None - } - segments(i) = currentSegmentValue - currentSegmentValue = 0 - currentSegmentDigits = 0 - i += 1 - } else { - val parsedValue = b - '0'.toByte - if (parsedValue < 0 || parsedValue > 9) { - return None - } else { - currentSegmentValue = currentSegmentValue * 10 + parsedValue - currentSegmentDigits += 1 - } - } - j += 1 - } - if (!isValidDigits(i, currentSegmentDigits)) { - return None - } - if (i < 2 && j < bytes.length) { - // For the `yyyy` and `yyyy-[m]m` formats, entire input must be consumed. - return None - } - segments(i) = currentSegmentValue - try { - val localDate = LocalDate.of(sign * segments(0), segments(1), segments(2)) - Some(localDateToDays(localDate)) - } catch { - case NonFatal(_) => None - } + private def extractRawText(exprContext: ParserRuleContext): String = { + // Extract the raw expression which will be parsed later + exprContext.getStart.getInputStream.getText(new Interval( + exprContext.getStart.getStartIndex, + exprContext.getStop.getStopIndex)) } } -class KyuubiSparkSQLAstBuilder extends KyuubiSparkSQLAstBuilderBase { - override def buildZorder(child: Seq[Expression]): ZorderBase = { - Zorder(child) - } +/** + * a logical plan contains an unparsed expression that will be parsed by spark. + */ +trait UnparsedExpressionLogicalPlan extends LogicalPlan { + override def output: Seq[Attribute] = throw new UnsupportedOperationException() - override def buildOptimizeZorderStatement( - tableIdentifier: Seq[String], - query: LogicalPlan): OptimizeZorderStatementBase = { - OptimizeZorderStatement(tableIdentifier, query) - } + override def children: Seq[LogicalPlan] = throw new UnsupportedOperationException() + + protected def withNewChildrenInternal( + newChildren: IndexedSeq[LogicalPlan]): LogicalPlan = + throw new UnsupportedOperationException() } + +case class UnparsedPredicateOptimize( + tableIdent: Seq[String], + tablePredicate: Option[String], + orderExpr: Expression) extends UnparsedExpressionLogicalPlan {} diff --git a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/watchdog/KyuubiWatchDogException.scala b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/watchdog/KyuubiWatchDogException.scala index b3c58afdf5a..e44309192a9 100644 --- a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/watchdog/KyuubiWatchDogException.scala +++ b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/watchdog/KyuubiWatchDogException.scala @@ -23,3 +23,8 @@ final class MaxPartitionExceedException( private val reason: String = "", private val cause: Throwable = None.orNull) extends KyuubiSQLExtensionException(reason, cause) + +final class MaxFileSizeExceedException( + private val reason: String = "", + private val cause: Throwable = None.orNull) + extends KyuubiSQLExtensionException(reason, cause) diff --git a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/watchdog/MaxPartitionStrategy.scala b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/watchdog/MaxPartitionStrategy.scala deleted file mode 100644 index 61ab07adfb1..00000000000 --- a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/watchdog/MaxPartitionStrategy.scala +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kyuubi.sql.watchdog - -import org.apache.hadoop.fs.Path -import org.apache.spark.sql.{PruneFileSourcePartitionHelper, SparkSession, Strategy} -import org.apache.spark.sql.catalyst.SQLConfHelper -import org.apache.spark.sql.catalyst.catalog.{CatalogTable, HiveTableRelation} -import org.apache.spark.sql.catalyst.planning.ScanOperation -import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan -import org.apache.spark.sql.execution.SparkPlan -import org.apache.spark.sql.execution.datasources.{CatalogFileIndex, HadoopFsRelation, InMemoryFileIndex, LogicalRelation} -import org.apache.spark.sql.types.StructType - -import org.apache.kyuubi.sql.KyuubiSQLConf - -/** - * Add maxPartitions Strategy to avoid scan excessive partitions on partitioned table - * 1 Check if scan exceed maxPartition - * 2 Check if Using partitionFilter on partitioned table - * This Strategy Add Planner Strategy after LogicalOptimizer - */ -case class MaxPartitionStrategy(session: SparkSession) - extends Strategy - with SQLConfHelper - with PruneFileSourcePartitionHelper { - override def apply(plan: LogicalPlan): Seq[SparkPlan] = { - val maxScanPartitionsOpt = conf.getConf(KyuubiSQLConf.WATCHDOG_MAX_PARTITIONS) - - if (maxScanPartitionsOpt.isDefined) { - checkRelationMaxPartitions(plan, maxScanPartitionsOpt.get) - } - Nil - } - - private def checkRelationMaxPartitions( - plan: LogicalPlan, - maxScanPartitions: Int): Unit = { - plan match { - case ScanOperation(_, _, relation: HiveTableRelation) if relation.isPartitioned => - relation.prunedPartitions match { - case Some(prunedPartitions) => - if (prunedPartitions.size > maxScanPartitions) { - throw new MaxPartitionExceedException( - s""" - |SQL job scan hive partition: ${prunedPartitions.size} - |exceed restrict of hive scan maxPartition $maxScanPartitions - |You should optimize your SQL logical according partition structure - |or shorten query scope such as p_date, detail as below: - |Table: ${relation.tableMeta.qualifiedName} - |Owner: ${relation.tableMeta.owner} - |Partition Structure: ${relation.partitionCols.map(_.name).mkString(", ")} - |""".stripMargin) - } - case _ => - val totalPartitions = session - .sessionState.catalog.externalCatalog.listPartitionNames( - relation.tableMeta.database, - relation.tableMeta.identifier.table) - if (totalPartitions.size > maxScanPartitions) { - throw new MaxPartitionExceedException( - s""" - |Your SQL job scan a whole huge table without any partition filter, - |You should optimize your SQL logical according partition structure - |or shorten query scope such as p_date, detail as below: - |Table: ${relation.tableMeta.qualifiedName} - |Owner: ${relation.tableMeta.owner} - |Partition Structure: ${relation.partitionCols.map(_.name).mkString(", ")} - |""".stripMargin) - } - } - case ScanOperation( - _, - filters, - relation @ LogicalRelation( - fsRelation @ HadoopFsRelation( - fileIndex: InMemoryFileIndex, - partitionSchema, - _, - _, - _, - _), - _, - _, - _)) if fsRelation.partitionSchema.nonEmpty => - val (partitionKeyFilters, dataFilter) = - getPartitionKeyFiltersAndDataFilters( - fsRelation.sparkSession, - relation, - partitionSchema, - filters, - relation.output) - val prunedPartitionSize = fileIndex.listFiles( - partitionKeyFilters.toSeq, - dataFilter) - .size - if (prunedPartitionSize > maxScanPartitions) { - throw maxPartitionExceedError( - prunedPartitionSize, - maxScanPartitions, - relation.catalogTable, - fileIndex.rootPaths, - fsRelation.partitionSchema) - } - case ScanOperation( - _, - filters, - logicalRelation @ LogicalRelation( - fsRelation @ HadoopFsRelation( - catalogFileIndex: CatalogFileIndex, - partitionSchema, - _, - _, - _, - _), - _, - _, - _)) if fsRelation.partitionSchema.nonEmpty => - val (partitionKeyFilters, _) = - getPartitionKeyFiltersAndDataFilters( - fsRelation.sparkSession, - logicalRelation, - partitionSchema, - filters, - logicalRelation.output) - - val prunedPartitionSize = - catalogFileIndex.filterPartitions( - partitionKeyFilters.toSeq) - .partitionSpec() - .partitions - .size - if (prunedPartitionSize > maxScanPartitions) { - throw maxPartitionExceedError( - prunedPartitionSize, - maxScanPartitions, - logicalRelation.catalogTable, - catalogFileIndex.rootPaths, - fsRelation.partitionSchema) - } - case _ => - } - } - - def maxPartitionExceedError( - prunedPartitionSize: Int, - maxPartitionSize: Int, - tableMeta: Option[CatalogTable], - rootPaths: Seq[Path], - partitionSchema: StructType): Throwable = { - val truncatedPaths = - if (rootPaths.length > 5) { - rootPaths.slice(0, 5).mkString(",") + """... """ + (rootPaths.length - 5) + " more paths" - } else { - rootPaths.mkString(",") - } - - new MaxPartitionExceedException( - s""" - |SQL job scan data source partition: $prunedPartitionSize - |exceed restrict of data source scan maxPartition $maxPartitionSize - |You should optimize your SQL logical according partition structure - |or shorten query scope such as p_date, detail as below: - |Table: ${tableMeta.map(_.qualifiedName).getOrElse("")} - |Owner: ${tableMeta.map(_.owner).getOrElse("")} - |RootPaths: $truncatedPaths - |Partition Structure: ${partitionSchema.map(_.name).mkString(", ")} - |""".stripMargin) - } -} diff --git a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/watchdog/MaxScanStrategy.scala b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/watchdog/MaxScanStrategy.scala new file mode 100644 index 00000000000..0ee693fcbec --- /dev/null +++ b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/watchdog/MaxScanStrategy.scala @@ -0,0 +1,303 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.sql.watchdog + +import org.apache.hadoop.fs.Path +import org.apache.spark.sql.{PruneFileSourcePartitionHelper, SparkSession, Strategy} +import org.apache.spark.sql.catalyst.SQLConfHelper +import org.apache.spark.sql.catalyst.catalog.{CatalogTable, HiveTableRelation} +import org.apache.spark.sql.catalyst.planning.ScanOperation +import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan +import org.apache.spark.sql.execution.SparkPlan +import org.apache.spark.sql.execution.datasources.{CatalogFileIndex, HadoopFsRelation, InMemoryFileIndex, LogicalRelation} +import org.apache.spark.sql.types.StructType + +import org.apache.kyuubi.sql.KyuubiSQLConf + +/** + * Add MaxScanStrategy to avoid scan excessive partitions or files + * 1. Check if scan exceed maxPartition of partitioned table + * 2. Check if scan exceed maxFileSize (calculated by hive table and partition statistics) + * This Strategy Add Planner Strategy after LogicalOptimizer + * @param session + */ +case class MaxScanStrategy(session: SparkSession) + extends Strategy + with SQLConfHelper + with PruneFileSourcePartitionHelper { + override def apply(plan: LogicalPlan): Seq[SparkPlan] = { + val maxScanPartitionsOpt = conf.getConf(KyuubiSQLConf.WATCHDOG_MAX_PARTITIONS) + val maxFileSizeOpt = conf.getConf(KyuubiSQLConf.WATCHDOG_MAX_FILE_SIZE) + if (maxScanPartitionsOpt.isDefined || maxFileSizeOpt.isDefined) { + checkScan(plan, maxScanPartitionsOpt, maxFileSizeOpt) + } + Nil + } + + private def checkScan( + plan: LogicalPlan, + maxScanPartitionsOpt: Option[Int], + maxFileSizeOpt: Option[Long]): Unit = { + plan match { + case ScanOperation(_, _, relation: HiveTableRelation) => + if (relation.isPartitioned) { + relation.prunedPartitions match { + case Some(prunedPartitions) => + if (maxScanPartitionsOpt.exists(_ < prunedPartitions.size)) { + throw new MaxPartitionExceedException( + s""" + |SQL job scan hive partition: ${prunedPartitions.size} + |exceed restrict of hive scan maxPartition ${maxScanPartitionsOpt.get} + |You should optimize your SQL logical according partition structure + |or shorten query scope such as p_date, detail as below: + |Table: ${relation.tableMeta.qualifiedName} + |Owner: ${relation.tableMeta.owner} + |Partition Structure: ${relation.partitionCols.map(_.name).mkString(", ")} + |""".stripMargin) + } + lazy val scanFileSize = prunedPartitions.flatMap(_.stats).map(_.sizeInBytes).sum + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw partTableMaxFileExceedError( + scanFileSize, + maxFileSizeOpt.get, + Some(relation.tableMeta), + prunedPartitions.flatMap(_.storage.locationUri).map(_.toString), + relation.partitionCols.map(_.name)) + } + case _ => + lazy val scanPartitions: Int = session + .sessionState.catalog.externalCatalog.listPartitionNames( + relation.tableMeta.database, + relation.tableMeta.identifier.table).size + if (maxScanPartitionsOpt.exists(_ < scanPartitions)) { + throw new MaxPartitionExceedException( + s""" + |Your SQL job scan a whole huge table without any partition filter, + |You should optimize your SQL logical according partition structure + |or shorten query scope such as p_date, detail as below: + |Table: ${relation.tableMeta.qualifiedName} + |Owner: ${relation.tableMeta.owner} + |Partition Structure: ${relation.partitionCols.map(_.name).mkString(", ")} + |""".stripMargin) + } + + lazy val scanFileSize: BigInt = + relation.tableMeta.stats.map(_.sizeInBytes).getOrElse { + session + .sessionState.catalog.externalCatalog.listPartitions( + relation.tableMeta.database, + relation.tableMeta.identifier.table).flatMap(_.stats).map(_.sizeInBytes).sum + } + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw new MaxFileSizeExceedException( + s""" + |Your SQL job scan a whole huge table without any partition filter, + |You should optimize your SQL logical according partition structure + |or shorten query scope such as p_date, detail as below: + |Table: ${relation.tableMeta.qualifiedName} + |Owner: ${relation.tableMeta.owner} + |Partition Structure: ${relation.partitionCols.map(_.name).mkString(", ")} + |""".stripMargin) + } + } + } else { + lazy val scanFileSize = relation.tableMeta.stats.map(_.sizeInBytes).sum + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw nonPartTableMaxFileExceedError( + scanFileSize, + maxFileSizeOpt.get, + Some(relation.tableMeta)) + } + } + case ScanOperation( + _, + filters, + relation @ LogicalRelation( + fsRelation @ HadoopFsRelation( + fileIndex: InMemoryFileIndex, + partitionSchema, + _, + _, + _, + _), + _, + _, + _)) => + if (fsRelation.partitionSchema.nonEmpty) { + val (partitionKeyFilters, dataFilter) = + getPartitionKeyFiltersAndDataFilters( + fsRelation.sparkSession, + relation, + partitionSchema, + filters, + relation.output) + val prunedPartitions = fileIndex.listFiles( + partitionKeyFilters.toSeq, + dataFilter) + if (maxScanPartitionsOpt.exists(_ < prunedPartitions.size)) { + throw maxPartitionExceedError( + prunedPartitions.size, + maxScanPartitionsOpt.get, + relation.catalogTable, + fileIndex.rootPaths, + fsRelation.partitionSchema) + } + lazy val scanFileSize = prunedPartitions.flatMap(_.files).map(_.getLen).sum + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw partTableMaxFileExceedError( + scanFileSize, + maxFileSizeOpt.get, + relation.catalogTable, + fileIndex.rootPaths.map(_.toString), + fsRelation.partitionSchema.map(_.name)) + } + } else { + lazy val scanFileSize = fileIndex.sizeInBytes + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw nonPartTableMaxFileExceedError( + scanFileSize, + maxFileSizeOpt.get, + relation.catalogTable) + } + } + case ScanOperation( + _, + filters, + logicalRelation @ LogicalRelation( + fsRelation @ HadoopFsRelation( + catalogFileIndex: CatalogFileIndex, + partitionSchema, + _, + _, + _, + _), + _, + _, + _)) => + if (fsRelation.partitionSchema.nonEmpty) { + val (partitionKeyFilters, _) = + getPartitionKeyFiltersAndDataFilters( + fsRelation.sparkSession, + logicalRelation, + partitionSchema, + filters, + logicalRelation.output) + + val fileIndex = catalogFileIndex.filterPartitions( + partitionKeyFilters.toSeq) + + lazy val prunedPartitionSize = fileIndex.partitionSpec().partitions.size + if (maxScanPartitionsOpt.exists(_ < prunedPartitionSize)) { + throw maxPartitionExceedError( + prunedPartitionSize, + maxScanPartitionsOpt.get, + logicalRelation.catalogTable, + catalogFileIndex.rootPaths, + fsRelation.partitionSchema) + } + + lazy val scanFileSize = fileIndex + .listFiles(Nil, Nil).flatMap(_.files).map(_.getLen).sum + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw partTableMaxFileExceedError( + scanFileSize, + maxFileSizeOpt.get, + logicalRelation.catalogTable, + catalogFileIndex.rootPaths.map(_.toString), + fsRelation.partitionSchema.map(_.name)) + } + } else { + lazy val scanFileSize = catalogFileIndex.sizeInBytes + if (maxFileSizeOpt.exists(_ < scanFileSize)) { + throw nonPartTableMaxFileExceedError( + scanFileSize, + maxFileSizeOpt.get, + logicalRelation.catalogTable) + } + } + case _ => + } + } + + def maxPartitionExceedError( + prunedPartitionSize: Int, + maxPartitionSize: Int, + tableMeta: Option[CatalogTable], + rootPaths: Seq[Path], + partitionSchema: StructType): Throwable = { + val truncatedPaths = + if (rootPaths.length > 5) { + rootPaths.slice(0, 5).mkString(",") + """... """ + (rootPaths.length - 5) + " more paths" + } else { + rootPaths.mkString(",") + } + + new MaxPartitionExceedException( + s""" + |SQL job scan data source partition: $prunedPartitionSize + |exceed restrict of data source scan maxPartition $maxPartitionSize + |You should optimize your SQL logical according partition structure + |or shorten query scope such as p_date, detail as below: + |Table: ${tableMeta.map(_.qualifiedName).getOrElse("")} + |Owner: ${tableMeta.map(_.owner).getOrElse("")} + |RootPaths: $truncatedPaths + |Partition Structure: ${partitionSchema.map(_.name).mkString(", ")} + |""".stripMargin) + } + + private def partTableMaxFileExceedError( + scanFileSize: Number, + maxFileSize: Long, + tableMeta: Option[CatalogTable], + rootPaths: Seq[String], + partitions: Seq[String]): Throwable = { + val truncatedPaths = + if (rootPaths.length > 5) { + rootPaths.slice(0, 5).mkString(",") + """... """ + (rootPaths.length - 5) + " more paths" + } else { + rootPaths.mkString(",") + } + + new MaxFileSizeExceedException( + s""" + |SQL job scan file size in bytes: $scanFileSize + |exceed restrict of table scan maxFileSize $maxFileSize + |You should optimize your SQL logical according partition structure + |or shorten query scope such as p_date, detail as below: + |Table: ${tableMeta.map(_.qualifiedName).getOrElse("")} + |Owner: ${tableMeta.map(_.owner).getOrElse("")} + |RootPaths: $truncatedPaths + |Partition Structure: ${partitions.mkString(", ")} + |""".stripMargin) + } + + private def nonPartTableMaxFileExceedError( + scanFileSize: Number, + maxFileSize: Long, + tableMeta: Option[CatalogTable]): Throwable = { + new MaxFileSizeExceedException( + s""" + |SQL job scan file size in bytes: $scanFileSize + |exceed restrict of table scan maxFileSize $maxFileSize + |detail as below: + |Table: ${tableMeta.map(_.qualifiedName).getOrElse("")} + |Owner: ${tableMeta.map(_.owner).getOrElse("")} + |Location: ${tableMeta.map(_.location).getOrElse("")} + |""".stripMargin) + } +} diff --git a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderStatementBase.scala b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderStatementBase.scala index a9bb5a5d758..895f9e24be3 100644 --- a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderStatementBase.scala +++ b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/zorder/OptimizeZorderStatementBase.scala @@ -20,24 +20,15 @@ package org.apache.kyuubi.sql.zorder import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, UnaryNode} -/** - * A zorder statement that contains we parsed from SQL. - * We should convert this plan to certain command at Analyzer. - */ -abstract class OptimizeZorderStatementBase extends UnaryNode { - def tableIdentifier: Seq[String] - def query: LogicalPlan - override def child: LogicalPlan = query - override def output: Seq[Attribute] = child.output -} - /** * A zorder statement that contains we parsed from SQL. * We should convert this plan to certain command at Analyzer. */ case class OptimizeZorderStatement( tableIdentifier: Seq[String], - query: LogicalPlan) extends OptimizeZorderStatementBase { + query: LogicalPlan) extends UnaryNode { + override def child: LogicalPlan = query + override def output: Seq[Attribute] = child.output protected def withNewChildInternal(newChild: LogicalPlan): LogicalPlan = copy(query = newChild) } diff --git a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/zorder/ResolveZorderBase.scala b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/zorder/ResolveZorderBase.scala index cdead0b06d2..9f735caa7a7 100644 --- a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/zorder/ResolveZorderBase.scala +++ b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/zorder/ResolveZorderBase.scala @@ -57,7 +57,7 @@ abstract class ResolveZorderBase extends Rule[LogicalPlan] { } override def apply(plan: LogicalPlan): LogicalPlan = plan match { - case statement: OptimizeZorderStatementBase if statement.query.resolved => + case statement: OptimizeZorderStatement if statement.query.resolved => checkQueryAllowed(statement.query) val tableIdentifier = getTableIdentifier(statement.tableIdentifier) val catalogTable = session.sessionState.catalog.getTableMetadata(tableIdentifier) diff --git a/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/KyuubiSparkSQLExtensionTest.scala b/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/KyuubiSparkSQLExtensionTest.scala index fd81948c61a..e58ac726c13 100644 --- a/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/KyuubiSparkSQLExtensionTest.scala +++ b/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/KyuubiSparkSQLExtensionTest.scala @@ -29,6 +29,8 @@ import org.apache.kyuubi.sql.KyuubiSQLConf trait KyuubiSparkSQLExtensionTest extends QueryTest with SQLTestUtils with AdaptiveSparkPlanHelper { + sys.props.put("spark.testing", "1") + private var _spark: Option[SparkSession] = None protected def spark: SparkSession = _spark.getOrElse { throw new RuntimeException("test spark session don't initial before using it.") diff --git a/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/WatchDogSuiteBase.scala b/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/WatchDogSuiteBase.scala index e6ecd28c940..a202e813c5e 100644 --- a/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/WatchDogSuiteBase.scala +++ b/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/WatchDogSuiteBase.scala @@ -17,10 +17,15 @@ package org.apache.spark.sql +import java.io.File + +import scala.collection.JavaConverters._ + +import org.apache.commons.io.FileUtils import org.apache.spark.sql.catalyst.plans.logical.{GlobalLimit, LogicalPlan} import org.apache.kyuubi.sql.KyuubiSQLConf -import org.apache.kyuubi.sql.watchdog.MaxPartitionExceedException +import org.apache.kyuubi.sql.watchdog.{MaxFileSizeExceedException, MaxPartitionExceedException} trait WatchDogSuiteBase extends KyuubiSparkSQLExtensionTest { override protected def beforeAll(): Unit = { @@ -371,7 +376,7 @@ trait WatchDogSuiteBase extends KyuubiSparkSQLExtensionTest { |ORDER BY a |DESC |""".stripMargin) - .collect().head.get(0).equals(10)) + .collect().head.get(0) === 10) } } } @@ -477,4 +482,120 @@ trait WatchDogSuiteBase extends KyuubiSparkSQLExtensionTest { } } } + + private def checkMaxFileSize(tableSize: Long, nonPartTableSize: Long): Unit = { + withSQLConf(KyuubiSQLConf.WATCHDOG_MAX_FILE_SIZE.key -> tableSize.toString) { + checkAnswer(sql("SELECT count(distinct(p)) FROM test"), Row(10) :: Nil) + } + + withSQLConf(KyuubiSQLConf.WATCHDOG_MAX_FILE_SIZE.key -> (tableSize / 2).toString) { + sql("SELECT * FROM test where p=1").queryExecution.sparkPlan + + sql(s"SELECT * FROM test WHERE p in (${Range(0, 3).toList.mkString(",")})") + .queryExecution.sparkPlan + + intercept[MaxFileSizeExceedException]( + sql("SELECT * FROM test where p != 1").queryExecution.sparkPlan) + + intercept[MaxFileSizeExceedException]( + sql("SELECT * FROM test").queryExecution.sparkPlan) + + intercept[MaxFileSizeExceedException](sql( + s"SELECT * FROM test WHERE p in (${Range(0, 6).toList.mkString(",")})") + .queryExecution.sparkPlan) + } + + withSQLConf(KyuubiSQLConf.WATCHDOG_MAX_FILE_SIZE.key -> nonPartTableSize.toString) { + checkAnswer(sql("SELECT count(*) FROM test_non_part"), Row(10000) :: Nil) + } + + withSQLConf(KyuubiSQLConf.WATCHDOG_MAX_FILE_SIZE.key -> (nonPartTableSize - 1).toString) { + intercept[MaxFileSizeExceedException]( + sql("SELECT * FROM test_non_part").queryExecution.sparkPlan) + } + } + + test("watchdog with scan maxFileSize -- hive") { + Seq(false).foreach { convertMetastoreParquet => + withTable("test", "test_non_part", "temp") { + spark.range(10000).selectExpr("id as col") + .createOrReplaceTempView("temp") + + // partitioned table + sql( + s""" + |CREATE TABLE test(i int) + |PARTITIONED BY (p int) + |STORED AS parquet""".stripMargin) + for (part <- Range(0, 10)) { + sql( + s""" + |INSERT OVERWRITE TABLE test PARTITION (p='$part') + |select col from temp""".stripMargin) + } + + val tablePath = new File(spark.sessionState.catalog.externalCatalog + .getTable("default", "test").location) + val tableSize = FileUtils.listFiles(tablePath, Array("parquet"), true).asScala + .map(_.length()).sum + assert(tableSize > 0) + + // non-partitioned table + sql( + s""" + |CREATE TABLE test_non_part(i int) + |STORED AS parquet""".stripMargin) + sql( + s""" + |INSERT OVERWRITE TABLE test_non_part + |select col from temp""".stripMargin) + sql("ANALYZE TABLE test_non_part COMPUTE STATISTICS") + + val nonPartTablePath = new File(spark.sessionState.catalog.externalCatalog + .getTable("default", "test_non_part").location) + val nonPartTableSize = FileUtils.listFiles(nonPartTablePath, Array("parquet"), true).asScala + .map(_.length()).sum + assert(nonPartTableSize > 0) + + // check + withSQLConf("spark.sql.hive.convertMetastoreParquet" -> convertMetastoreParquet.toString) { + checkMaxFileSize(tableSize, nonPartTableSize) + } + } + } + } + + test("watchdog with scan maxFileSize -- data source") { + withTempDir { dir => + withTempView("test", "test_non_part") { + // partitioned table + val tablePath = new File(dir, "test") + spark.range(10).selectExpr("id", "id as p") + .write + .partitionBy("p") + .mode("overwrite") + .parquet(tablePath.getCanonicalPath) + spark.read.load(tablePath.getCanonicalPath).createOrReplaceTempView("test") + + val tableSize = FileUtils.listFiles(tablePath, Array("parquet"), true).asScala + .map(_.length()).sum + assert(tableSize > 0) + + // non-partitioned table + val nonPartTablePath = new File(dir, "test_non_part") + spark.range(10000).selectExpr("id", "id as p") + .write + .mode("overwrite") + .parquet(nonPartTablePath.getCanonicalPath) + spark.read.load(nonPartTablePath.getCanonicalPath).createOrReplaceTempView("test_non_part") + + val nonPartTableSize = FileUtils.listFiles(nonPartTablePath, Array("parquet"), true).asScala + .map(_.length()).sum + assert(tableSize > 0) + + // check + checkMaxFileSize(tableSize, nonPartTableSize) + } + } + } } diff --git a/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/ZorderSuiteBase.scala b/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/ZorderSuiteBase.scala index b24533e6926..e0f86f85d84 100644 --- a/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/ZorderSuiteBase.scala +++ b/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/ZorderSuiteBase.scala @@ -18,9 +18,11 @@ package org.apache.spark.sql import org.apache.spark.SparkConf -import org.apache.spark.sql.catalyst.InternalRow -import org.apache.spark.sql.catalyst.expressions.{Alias, Ascending, AttributeReference, Expression, ExpressionEvalHelper, Literal, NullsLast, SortOrder} -import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, OneRowRelation, Project, Sort} +import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier} +import org.apache.spark.sql.catalyst.analysis.{UnresolvedAttribute, UnresolvedFunction, UnresolvedRelation, UnresolvedStar} +import org.apache.spark.sql.catalyst.expressions.{Alias, Ascending, AttributeReference, EqualTo, Expression, ExpressionEvalHelper, Literal, NullsLast, SortOrder} +import org.apache.spark.sql.catalyst.parser.{ParseException, ParserInterface} +import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, OneRowRelation, Project, Sort} import org.apache.spark.sql.execution.command.CreateDataSourceTableAsSelectCommand import org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand import org.apache.spark.sql.functions._ @@ -29,7 +31,7 @@ import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf} import org.apache.spark.sql.types._ import org.apache.kyuubi.sql.{KyuubiSQLConf, KyuubiSQLExtensionException} -import org.apache.kyuubi.sql.zorder.{OptimizeZorderCommandBase, Zorder, ZorderBytesUtils} +import org.apache.kyuubi.sql.zorder.{OptimizeZorderCommandBase, OptimizeZorderStatement, Zorder, ZorderBytesUtils} trait ZorderSuiteBase extends KyuubiSparkSQLExtensionTest with ExpressionEvalHelper { override def sparkConf(): SparkConf = { @@ -245,20 +247,22 @@ trait ZorderSuiteBase extends KyuubiSparkSQLExtensionTest with ExpressionEvalHel resHasSort: Boolean): Unit = { def checkSort(plan: LogicalPlan): Unit = { assert(plan.isInstanceOf[Sort] === resHasSort) - if (plan.isInstanceOf[Sort]) { - val colArr = cols.split(",") - val refs = - if (colArr.length == 1) { - plan.asInstanceOf[Sort].order.head - .child.asInstanceOf[AttributeReference] :: Nil - } else { - plan.asInstanceOf[Sort].order.head - .child.asInstanceOf[Zorder].children.map(_.references.head) + plan match { + case sort: Sort => + val colArr = cols.split(",") + val refs = + if (colArr.length == 1) { + sort.order.head + .child.asInstanceOf[AttributeReference] :: Nil + } else { + sort.order.head + .child.asInstanceOf[Zorder].children.map(_.references.head) + } + assert(refs.size === colArr.size) + refs.zip(colArr).foreach { case (ref, col) => + assert(ref.name === col.trim) } - assert(refs.size === colArr.size) - refs.zip(colArr).foreach { case (ref, col) => - assert(ref.name === col.trim) - } + case _ => } } @@ -652,6 +656,99 @@ trait ZorderSuiteBase extends KyuubiSparkSQLExtensionTest with ExpressionEvalHel ZorderBytesUtils.interleaveBitsDefault(inputs.map(ZorderBytesUtils.toByteArray).toArray))) } } + + test("OPTIMIZE command is parsed as expected") { + val parser = createParser + val globalSort = spark.conf.get(KyuubiSQLConf.ZORDER_GLOBAL_SORT_ENABLED) + + assert(parser.parsePlan("OPTIMIZE p zorder by c1") === + OptimizeZorderStatement( + Seq("p"), + Sort( + SortOrder(UnresolvedAttribute("c1"), Ascending, NullsLast, Seq.empty) :: Nil, + globalSort, + Project(Seq(UnresolvedStar(None)), UnresolvedRelation(TableIdentifier("p")))))) + + assert(parser.parsePlan("OPTIMIZE p zorder by c1, c2") === + OptimizeZorderStatement( + Seq("p"), + Sort( + SortOrder( + Zorder(Seq(UnresolvedAttribute("c1"), UnresolvedAttribute("c2"))), + Ascending, + NullsLast, + Seq.empty) :: Nil, + globalSort, + Project(Seq(UnresolvedStar(None)), UnresolvedRelation(TableIdentifier("p")))))) + + assert(parser.parsePlan("OPTIMIZE p where id = 1 zorder by c1") === + OptimizeZorderStatement( + Seq("p"), + Sort( + SortOrder(UnresolvedAttribute("c1"), Ascending, NullsLast, Seq.empty) :: Nil, + globalSort, + Project( + Seq(UnresolvedStar(None)), + Filter( + EqualTo(UnresolvedAttribute("id"), Literal(1)), + UnresolvedRelation(TableIdentifier("p"))))))) + + assert(parser.parsePlan("OPTIMIZE p where id = 1 zorder by c1, c2") === + OptimizeZorderStatement( + Seq("p"), + Sort( + SortOrder( + Zorder(Seq(UnresolvedAttribute("c1"), UnresolvedAttribute("c2"))), + Ascending, + NullsLast, + Seq.empty) :: Nil, + globalSort, + Project( + Seq(UnresolvedStar(None)), + Filter( + EqualTo(UnresolvedAttribute("id"), Literal(1)), + UnresolvedRelation(TableIdentifier("p"))))))) + + assert(parser.parsePlan("OPTIMIZE p where id = current_date() zorder by c1") === + OptimizeZorderStatement( + Seq("p"), + Sort( + SortOrder(UnresolvedAttribute("c1"), Ascending, NullsLast, Seq.empty) :: Nil, + globalSort, + Project( + Seq(UnresolvedStar(None)), + Filter( + EqualTo( + UnresolvedAttribute("id"), + UnresolvedFunction("current_date", Seq.empty, false)), + UnresolvedRelation(TableIdentifier("p"))))))) + + // TODO: add following case support + intercept[ParseException] { + parser.parsePlan("OPTIMIZE p zorder by (c1)") + } + + intercept[ParseException] { + parser.parsePlan("OPTIMIZE p zorder by (c1, c2)") + } + } + + test("OPTIMIZE partition predicates constraint") { + withTable("p") { + sql("CREATE TABLE p (c1 INT, c2 INT) PARTITIONED BY (event_date DATE)") + val e1 = intercept[KyuubiSQLExtensionException] { + sql("OPTIMIZE p WHERE event_date = current_date as c ZORDER BY c1, c2") + } + assert(e1.getMessage.contains("unsupported partition predicates")) + + val e2 = intercept[KyuubiSQLExtensionException] { + sql("OPTIMIZE p WHERE c1 = 1 ZORDER BY c1, c2") + } + assert(e2.getMessage == "Only partition column filters are allowed") + } + } + + def createParser: ParserInterface } trait ZorderWithCodegenEnabledSuiteBase extends ZorderSuiteBase { diff --git a/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/benchmark/KyuubiBenchmarkBase.scala b/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/benchmark/KyuubiBenchmarkBase.scala index c8c1b021d5a..b891a7224a0 100644 --- a/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/benchmark/KyuubiBenchmarkBase.scala +++ b/extensions/spark/kyuubi-extension-spark-common/src/test/scala/org/apache/spark/sql/benchmark/KyuubiBenchmarkBase.scala @@ -22,6 +22,7 @@ import java.io.{File, FileOutputStream, OutputStream} import scala.collection.JavaConverters._ import com.google.common.reflect.ClassPath +import org.scalatest.Assertions._ trait KyuubiBenchmarkBase { var output: Option[OutputStream] = None diff --git a/extensions/spark/kyuubi-extension-spark-jdbc-dialect/pom.xml b/extensions/spark/kyuubi-extension-spark-jdbc-dialect/pom.xml index 48c4c437923..ea571644e1d 100644 --- a/extensions/spark/kyuubi-extension-spark-jdbc-dialect/pom.xml +++ b/extensions/spark/kyuubi-extension-spark-jdbc-dialect/pom.xml @@ -21,12 +21,12 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../../pom.xml - kyuubi-extension-spark-jdbc-dialect_2.12 + kyuubi-extension-spark-jdbc-dialect_${scala.binary.version} jar Kyuubi Spark JDBC Dialect plugin https://kyuubi.apache.org/ diff --git a/extensions/spark/kyuubi-spark-authz/README.md b/extensions/spark/kyuubi-spark-authz/README.md index 554797ee01d..374f83b0379 100644 --- a/extensions/spark/kyuubi-spark-authz/README.md +++ b/extensions/spark/kyuubi-spark-authz/README.md @@ -26,7 +26,7 @@ ## Build ```shell -build/mvn clean package -pl :kyuubi-spark-authz_2.12 -Dspark.version=3.2.1 -Dranger.version=2.3.0 +build/mvn clean package -DskipTests -pl :kyuubi-spark-authz_2.12 -am -Dspark.version=3.2.1 -Dranger.version=2.4.0 ``` ### Supported Apache Spark Versions @@ -34,7 +34,8 @@ build/mvn clean package -pl :kyuubi-spark-authz_2.12 -Dspark.version=3.2.1 -Dran `-Dspark.version=` - [x] master -- [x] 3.3.x (default) +- [x] 3.4.x (default) +- [x] 3.3.x - [x] 3.2.x - [x] 3.1.x - [x] 3.0.x @@ -44,7 +45,8 @@ build/mvn clean package -pl :kyuubi-spark-authz_2.12 -Dspark.version=3.2.1 -Dran `-Dranger.version=` -- [x] 2.3.x (default) +- [x] 2.4.x (default) +- [x] 2.3.x - [x] 2.2.x - [x] 2.1.x - [x] 2.0.x @@ -52,5 +54,5 @@ build/mvn clean package -pl :kyuubi-spark-authz_2.12 -Dspark.version=3.2.1 -Dran - [x] 1.1.x - [x] 1.0.x - [x] 0.7.x -- [x] 0.6.x +- [ ] 0.6.x diff --git a/extensions/spark/kyuubi-spark-authz/pom.xml b/extensions/spark/kyuubi-spark-authz/pom.xml index 8df1b9465a9..1ae63fcb34f 100644 --- a/extensions/spark/kyuubi-spark-authz/pom.xml +++ b/extensions/spark/kyuubi-spark-authz/pom.xml @@ -21,12 +21,12 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../../pom.xml - kyuubi-spark-authz_2.12 + kyuubi-spark-authz_${scala.binary.version} jar Kyuubi Dev Spark Authorization Extension https://kyuubi.apache.org/ @@ -39,6 +39,11 @@ + + org.apache.kyuubi + kyuubi-util-scala_${scala.binary.version} + ${project.version} + org.apache.ranger ranger-plugins-common @@ -321,7 +326,6 @@ - ${project.basedir}/src/test/resources @@ -331,4 +335,31 @@ target/scala-${scala.binary.version}/test-classes + + + gen-policy + + + + org.codehaus.mojo + build-helper-maven-plugin + + + add-test-source + + add-test-source + + generate-sources + + + src/test/gen/scala + + + + + + + + + diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor index 4686bb033cf..2facb004a04 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor +++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor @@ -17,4 +17,5 @@ org.apache.kyuubi.plugin.spark.authz.serde.ExpressionInfoFunctionExtractor org.apache.kyuubi.plugin.spark.authz.serde.FunctionIdentifierFunctionExtractor +org.apache.kyuubi.plugin.spark.authz.serde.QualifiedNameStringFunctionExtractor org.apache.kyuubi.plugin.spark.authz.serde.StringFunctionExtractor diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor index 475f47afc24..3bb0ee6c23e 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor +++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor @@ -17,4 +17,5 @@ org.apache.kyuubi.plugin.spark.authz.serde.ExpressionInfoFunctionTypeExtractor org.apache.kyuubi.plugin.spark.authz.serde.FunctionIdentifierFunctionTypeExtractor +org.apache.kyuubi.plugin.spark.authz.serde.FunctionNameFunctionTypeExtractor org.apache.kyuubi.plugin.spark.authz.serde.TempMarkerFunctionTypeExtractor diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.TableExtractor b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.TableExtractor index f4d7eb503bd..78f836c65cd 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.TableExtractor +++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.TableExtractor @@ -18,8 +18,12 @@ org.apache.kyuubi.plugin.spark.authz.serde.CatalogTableOptionTableExtractor org.apache.kyuubi.plugin.spark.authz.serde.CatalogTableTableExtractor org.apache.kyuubi.plugin.spark.authz.serde.DataSourceV2RelationTableExtractor +org.apache.kyuubi.plugin.spark.authz.serde.ExpressionSeqTableExtractor org.apache.kyuubi.plugin.spark.authz.serde.IdentifierTableExtractor org.apache.kyuubi.plugin.spark.authz.serde.LogicalRelationTableExtractor org.apache.kyuubi.plugin.spark.authz.serde.ResolvedDbObjectNameTableExtractor +org.apache.kyuubi.plugin.spark.authz.serde.ResolvedIdentifierTableExtractor org.apache.kyuubi.plugin.spark.authz.serde.ResolvedTableTableExtractor +org.apache.kyuubi.plugin.spark.authz.serde.StringTableExtractor org.apache.kyuubi.plugin.spark.authz.serde.TableIdentifierTableExtractor +org.apache.kyuubi.plugin.spark.authz.serde.TableTableExtractor diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/database_command_spec.json b/extensions/spark/kyuubi-spark-authz/src/main/resources/database_command_spec.json index 4eb4b3ef8c9..c640ed89bce 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/resources/database_command_spec.json +++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/database_command_spec.json @@ -22,6 +22,11 @@ "fieldExtractor" : "CatalogPluginCatalogExtractor" }, "isInput" : false + }, { + "fieldName" : "name", + "fieldExtractor" : "ResolvedNamespaceDatabaseExtractor", + "catalogDesc" : null, + "isInput" : false } ], "opType" : "CREATEDATABASE" }, { @@ -45,6 +50,11 @@ }, { "classname" : "org.apache.spark.sql.catalyst.plans.logical.SetCatalogAndNamespace", "databaseDescs" : [ { + "fieldName" : "child", + "fieldExtractor" : "ResolvedNamespaceDatabaseExtractor", + "catalogDesc" : null, + "isInput" : true + }, { "fieldName" : "child", "fieldExtractor" : "ResolvedDBObjectNameDatabaseExtractor", "catalogDesc" : null, diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/function_command_spec.json b/extensions/spark/kyuubi-spark-authz/src/main/resources/function_command_spec.json index c9398561423..0b71245d218 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/resources/function_command_spec.json +++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/function_command_spec.json @@ -1,6 +1,16 @@ [ { "classname" : "org.apache.spark.sql.execution.command.CreateFunctionCommand", "functionDescs" : [ { + "fieldName" : "identifier", + "fieldExtractor" : "FunctionIdentifierFunctionExtractor", + "databaseDesc" : null, + "functionTypeDesc" : { + "fieldName" : "isTemp", + "fieldExtractor" : "TempMarkerFunctionTypeExtractor", + "skipTypes" : [ "TEMP" ] + }, + "isInput" : false + }, { "fieldName" : "functionName", "fieldExtractor" : "StringFunctionExtractor", "databaseDesc" : { @@ -44,6 +54,16 @@ }, { "classname" : "org.apache.spark.sql.execution.command.DropFunctionCommand", "functionDescs" : [ { + "fieldName" : "identifier", + "fieldExtractor" : "FunctionIdentifierFunctionExtractor", + "databaseDesc" : null, + "functionTypeDesc" : { + "fieldName" : "isTemp", + "fieldExtractor" : "TempMarkerFunctionTypeExtractor", + "skipTypes" : [ "TEMP" ] + }, + "isInput" : false + }, { "fieldName" : "functionName", "fieldExtractor" : "StringFunctionExtractor", "databaseDesc" : { diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_command_spec.json b/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_command_spec.json index 9a6aef4ed98..3273ccbeaf0 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_command_spec.json +++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_command_spec.json @@ -4,26 +4,86 @@ "fieldName" : "catalogTable", "fieldExtractor" : "CatalogTableTableExtractor", "catalogDesc" : null - } ] + } ], + "functionDescs" : [ ] }, { "classname" : "org.apache.spark.sql.catalyst.catalog.HiveTableRelation", "scanDescs" : [ { "fieldName" : "tableMeta", "fieldExtractor" : "CatalogTableTableExtractor", "catalogDesc" : null - } ] + } ], + "functionDescs" : [ ] }, { "classname" : "org.apache.spark.sql.execution.datasources.LogicalRelation", "scanDescs" : [ { "fieldName" : "catalogTable", "fieldExtractor" : "CatalogTableOptionTableExtractor", "catalogDesc" : null - } ] + } ], + "functionDescs" : [ ] }, { "classname" : "org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation", "scanDescs" : [ { "fieldName" : null, "fieldExtractor" : "DataSourceV2RelationTableExtractor", "catalogDesc" : null + } ], + "functionDescs" : [ ] +}, { + "classname" : "org.apache.spark.sql.hive.HiveGenericUDF", + "scanDescs" : [ ], + "functionDescs" : [ { + "fieldName" : "name", + "fieldExtractor" : "QualifiedNameStringFunctionExtractor", + "databaseDesc" : null, + "functionTypeDesc" : { + "fieldName" : "name", + "fieldExtractor" : "FunctionNameFunctionTypeExtractor", + "skipTypes" : [ "TEMP", "SYSTEM" ] + }, + "isInput" : true + } ] +}, { + "classname" : "org.apache.spark.sql.hive.HiveGenericUDTF", + "scanDescs" : [ ], + "functionDescs" : [ { + "fieldName" : "name", + "fieldExtractor" : "QualifiedNameStringFunctionExtractor", + "databaseDesc" : null, + "functionTypeDesc" : { + "fieldName" : "name", + "fieldExtractor" : "FunctionNameFunctionTypeExtractor", + "skipTypes" : [ "TEMP", "SYSTEM" ] + }, + "isInput" : true + } ] +}, { + "classname" : "org.apache.spark.sql.hive.HiveSimpleUDF", + "scanDescs" : [ ], + "functionDescs" : [ { + "fieldName" : "name", + "fieldExtractor" : "QualifiedNameStringFunctionExtractor", + "databaseDesc" : null, + "functionTypeDesc" : { + "fieldName" : "name", + "fieldExtractor" : "FunctionNameFunctionTypeExtractor", + "skipTypes" : [ "TEMP", "SYSTEM" ] + }, + "isInput" : true + } ] +}, { + "classname" : "org.apache.spark.sql.hive.HiveUDAFFunction", + "scanDescs" : [ ], + "functionDescs" : [ { + "fieldName" : "name", + "fieldExtractor" : "QualifiedNameStringFunctionExtractor", + "databaseDesc" : null, + "functionTypeDesc" : { + "fieldName" : "name", + "fieldExtractor" : "FunctionNameFunctionTypeExtractor", + "skipTypes" : [ "TEMP", "SYSTEM" ] + }, + "isInput" : true } ] } ] \ No newline at end of file diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json b/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json index f1c2297b38e..3e191146862 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json +++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json @@ -91,6 +91,20 @@ "fieldName" : "plan", "fieldExtractor" : "LogicalPlanQueryExtractor" } ] +}, { + "classname" : "org.apache.spark.sql.catalyst.plans.logical.Call", + "tableDescs" : [ { + "fieldName" : "args", + "fieldExtractor" : "ExpressionSeqTableExtractor", + "columnDesc" : null, + "actionTypeDesc" : null, + "tableTypeDesc" : null, + "catalogDesc" : null, + "isInput" : false, + "setCurrentDatabaseIfMissing" : false + } ], + "opType" : "ALTERTABLE_PROPERTIES", + "queryDescs" : [ ] }, { "classname" : "org.apache.spark.sql.catalyst.plans.logical.CommentOnTable", "tableDescs" : [ { @@ -108,6 +122,15 @@ }, { "classname" : "org.apache.spark.sql.catalyst.plans.logical.CreateTable", "tableDescs" : [ { + "fieldName" : "child", + "fieldExtractor" : "ResolvedIdentifierTableExtractor", + "columnDesc" : null, + "actionTypeDesc" : null, + "tableTypeDesc" : null, + "catalogDesc" : null, + "isInput" : false, + "setCurrentDatabaseIfMissing" : false + }, { "fieldName" : "tableName", "fieldExtractor" : "IdentifierTableExtractor", "columnDesc" : null, @@ -134,6 +157,15 @@ }, { "classname" : "org.apache.spark.sql.catalyst.plans.logical.CreateTableAsSelect", "tableDescs" : [ { + "fieldName" : "left", + "fieldExtractor" : "ResolvedIdentifierTableExtractor", + "columnDesc" : null, + "actionTypeDesc" : null, + "tableTypeDesc" : null, + "catalogDesc" : null, + "isInput" : false, + "setCurrentDatabaseIfMissing" : false + }, { "fieldName" : "tableName", "fieldExtractor" : "IdentifierTableExtractor", "columnDesc" : null, @@ -264,6 +296,15 @@ }, { "classname" : "org.apache.spark.sql.catalyst.plans.logical.DropTable", "tableDescs" : [ { + "fieldName" : "child", + "fieldExtractor" : "ResolvedIdentifierTableExtractor", + "columnDesc" : null, + "actionTypeDesc" : null, + "tableTypeDesc" : null, + "catalogDesc" : null, + "isInput" : false, + "setCurrentDatabaseIfMissing" : false + }, { "fieldName" : "child", "fieldExtractor" : "ResolvedTableTableExtractor", "columnDesc" : null, @@ -432,6 +473,15 @@ }, { "classname" : "org.apache.spark.sql.catalyst.plans.logical.ReplaceTable", "tableDescs" : [ { + "fieldName" : "child", + "fieldExtractor" : "ResolvedIdentifierTableExtractor", + "columnDesc" : null, + "actionTypeDesc" : null, + "tableTypeDesc" : null, + "catalogDesc" : null, + "isInput" : false, + "setCurrentDatabaseIfMissing" : false + }, { "fieldName" : "tableName", "fieldExtractor" : "IdentifierTableExtractor", "columnDesc" : null, @@ -458,6 +508,15 @@ }, { "classname" : "org.apache.spark.sql.catalyst.plans.logical.ReplaceTableAsSelect", "tableDescs" : [ { + "fieldName" : "left", + "fieldExtractor" : "ResolvedIdentifierTableExtractor", + "columnDesc" : null, + "actionTypeDesc" : null, + "tableTypeDesc" : null, + "catalogDesc" : null, + "isInput" : false, + "setCurrentDatabaseIfMissing" : false + }, { "fieldName" : "tableName", "fieldExtractor" : "IdentifierTableExtractor", "columnDesc" : null, @@ -806,6 +865,15 @@ }, { "classname" : "org.apache.spark.sql.execution.command.AnalyzeColumnCommand", "tableDescs" : [ { + "fieldName" : "tableIdent", + "fieldExtractor" : "TableIdentifierTableExtractor", + "columnDesc" : null, + "actionTypeDesc" : null, + "tableTypeDesc" : null, + "catalogDesc" : null, + "isInput" : false, + "setCurrentDatabaseIfMissing" : false + }, { "fieldName" : "tableIdent", "fieldExtractor" : "TableIdentifierTableExtractor", "columnDesc" : { @@ -830,11 +898,20 @@ "isInput" : true, "setCurrentDatabaseIfMissing" : false } ], - "opType" : "ANALYZE_TABLE", + "opType" : "ALTERTABLE_PROPERTIES", "queryDescs" : [ ] }, { "classname" : "org.apache.spark.sql.execution.command.AnalyzePartitionCommand", "tableDescs" : [ { + "fieldName" : "tableIdent", + "fieldExtractor" : "TableIdentifierTableExtractor", + "columnDesc" : null, + "actionTypeDesc" : null, + "tableTypeDesc" : null, + "catalogDesc" : null, + "isInput" : false, + "setCurrentDatabaseIfMissing" : false + }, { "fieldName" : "tableIdent", "fieldExtractor" : "TableIdentifierTableExtractor", "columnDesc" : { @@ -847,7 +924,7 @@ "isInput" : true, "setCurrentDatabaseIfMissing" : false } ], - "opType" : "ANALYZE_TABLE", + "opType" : "ALTERTABLE_PROPERTIES", "queryDescs" : [ ] }, { "classname" : "org.apache.spark.sql.execution.command.AnalyzeTableCommand", @@ -858,14 +935,9 @@ "actionTypeDesc" : null, "tableTypeDesc" : null, "catalogDesc" : null, - "isInput" : true, + "isInput" : false, "setCurrentDatabaseIfMissing" : false - } ], - "opType" : "ANALYZE_TABLE", - "queryDescs" : [ ] -}, { - "classname" : "org.apache.spark.sql.execution.command.AnalyzeTablesCommand", - "tableDescs" : [ { + }, { "fieldName" : "tableIdent", "fieldExtractor" : "TableIdentifierTableExtractor", "columnDesc" : null, @@ -875,7 +947,7 @@ "isInput" : true, "setCurrentDatabaseIfMissing" : false } ], - "opType" : "ANALYZE_TABLE", + "opType" : "ALTERTABLE_PROPERTIES", "queryDescs" : [ ] }, { "classname" : "org.apache.spark.sql.execution.command.CacheTableCommand", @@ -1243,14 +1315,6 @@ "fieldName" : "query", "fieldExtractor" : "LogicalPlanQueryExtractor" } ] -}, { - "classname" : "org.apache.spark.sql.execution.datasources.InsertIntoHiveDirCommand", - "tableDescs" : [ ], - "opType" : "QUERY", - "queryDescs" : [ { - "fieldName" : "query", - "fieldExtractor" : "LogicalPlanQueryExtractor" - } ] }, { "classname" : "org.apache.spark.sql.execution.datasources.RefreshTable", "tableDescs" : [ { @@ -1293,6 +1357,14 @@ "fieldName" : "query", "fieldExtractor" : "LogicalPlanQueryExtractor" } ] +}, { + "classname" : "org.apache.spark.sql.hive.execution.InsertIntoHiveDirCommand", + "tableDescs" : [ ], + "opType" : "QUERY", + "queryDescs" : [ { + "fieldName" : "query", + "fieldExtractor" : "LogicalPlanQueryExtractor" + } ] }, { "classname" : "org.apache.spark.sql.hive.execution.InsertIntoHiveTable", "tableDescs" : [ { diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilder.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilder.scala index 51f5694e1e5..5c496b8744b 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilder.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilder.scala @@ -28,6 +28,7 @@ import org.apache.kyuubi.plugin.spark.authz.OperationType.OperationType import org.apache.kyuubi.plugin.spark.authz.PrivilegeObjectActionType._ import org.apache.kyuubi.plugin.spark.authz.serde._ import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ +import org.apache.kyuubi.util.reflect.ReflectUtils._ object PrivilegesBuilder { @@ -95,6 +96,12 @@ object PrivilegesBuilder { val cols = conditionList ++ sortCols buildQuery(s.child, privilegeObjects, projectionList, cols, spark) + case a: Aggregate => + val aggCols = + (a.aggregateExpressions ++ a.groupingExpressions).flatMap(e => collectLeaves(e)) + val cols = conditionList ++ aggCols + buildQuery(a.child, privilegeObjects, projectionList, cols, spark) + case scan if isKnownScan(scan) && scan.resolved => getScanSpec(scan).tables(scan, spark).foreach(mergeProjection(_, scan)) @@ -202,7 +209,39 @@ object PrivilegesBuilder { } } - type PrivilegesAndOpType = (Seq[PrivilegeObject], Seq[PrivilegeObject], OperationType) + type PrivilegesAndOpType = (Iterable[PrivilegeObject], Iterable[PrivilegeObject], OperationType) + + /** + * Build input privilege objects from a Spark's LogicalPlan for hive permanent udf + * + * @param plan A Spark LogicalPlan + */ + def buildFunctions( + plan: LogicalPlan, + spark: SparkSession): PrivilegesAndOpType = { + val inputObjs = new ArrayBuffer[PrivilegeObject] + plan match { + case command: Command if isKnownTableCommand(command) => + val spec = getTableCommandSpec(command) + val functionPrivAndOpType = spec.queries(plan) + .map(plan => buildFunctions(plan, spark)) + functionPrivAndOpType.map(_._1) + .reduce(_ ++ _) + .foreach(functionPriv => inputObjs += functionPriv) + + case plan => plan transformAllExpressions { + case hiveFunction: Expression if isKnownFunction(hiveFunction) => + val functionSpec: ScanSpec = getFunctionSpec(hiveFunction) + if (functionSpec.functionDescs + .exists(!_.functionTypeDesc.get.skip(hiveFunction, spark))) { + functionSpec.functions(hiveFunction).foreach(func => + inputObjs += PrivilegeObject(func)) + } + hiveFunction + } + } + (inputObjs, Seq.empty, OperationType.QUERY) + } /** * Build input and output privilege objects from a Spark's LogicalPlan diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/AccessRequest.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/AccessRequest.scala index 4997dda3b87..8fc8028e683 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/AccessRequest.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/AccessRequest.scala @@ -27,7 +27,7 @@ import org.apache.ranger.plugin.policyengine.{RangerAccessRequestImpl, RangerPol import org.apache.kyuubi.plugin.spark.authz.OperationType.OperationType import org.apache.kyuubi.plugin.spark.authz.ranger.AccessType._ -import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils.{invoke, invokeAs} +import org.apache.kyuubi.util.reflect.ReflectUtils._ case class AccessRequest private (accessType: AccessType) extends RangerAccessRequestImpl @@ -50,7 +50,7 @@ object AccessRequest { "getRolesFromUserAndGroups", (classOf[String], userName), (classOf[JSet[String]], userGroups)) - invoke(req, "setUserRoles", (classOf[JSet[String]], roles)) + invokeAs[Unit](req, "setUserRoles", (classOf[JSet[String]], roles)) } catch { case _: Exception => } @@ -61,7 +61,7 @@ object AccessRequest { } try { val clusterName = invokeAs[String](SparkRangerAdminPlugin, "getClusterName") - invoke(req, "setClusterName", (classOf[String], clusterName)) + invokeAs[Unit](req, "setClusterName", (classOf[String], clusterName)) } catch { case _: Exception => } @@ -74,8 +74,8 @@ object AccessRequest { private def getUserGroupsFromUserStore(user: UserGroupInformation): Option[JSet[String]] = { try { - val storeEnricher = invoke(SparkRangerAdminPlugin, "getUserStoreEnricher") - val userStore = invoke(storeEnricher, "getRangerUserStore") + val storeEnricher = invokeAs[AnyRef](SparkRangerAdminPlugin, "getUserStoreEnricher") + val userStore = invokeAs[AnyRef](storeEnricher, "getRangerUserStore") val userGroupMapping = invokeAs[JHashMap[String, JSet[String]]](userStore, "getUserGroupMapping") Some(userGroupMapping.get(user.getShortUserName)) diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/AccessType.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/AccessType.scala index 7d62229ee41..c0b7d2a03ef 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/AccessType.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/AccessType.scala @@ -58,7 +58,12 @@ object AccessType extends Enumeration { SHOWPARTITIONS | ANALYZE_TABLE => SELECT case SHOWCOLUMNS | DESCTABLE => SELECT - case SHOWDATABASES | SWITCHDATABASE | DESCDATABASE | SHOWTABLES | SHOWFUNCTIONS => USE + case SHOWDATABASES | + SWITCHDATABASE | + DESCDATABASE | + SHOWTABLES | + SHOWFUNCTIONS | + DESCFUNCTION => USE case TRUNCATETABLE => UPDATE case _ => NONE } diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/FilterDataSourceV2Strategy.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/FilterDataSourceV2Strategy.scala index 1109464ac0a..cbf79581ed6 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/FilterDataSourceV2Strategy.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/FilterDataSourceV2Strategy.scala @@ -17,18 +17,28 @@ package org.apache.kyuubi.plugin.spark.authz.ranger import org.apache.spark.sql.{SparkSession, Strategy} -import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan +import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project} import org.apache.spark.sql.execution.SparkPlan import org.apache.kyuubi.plugin.spark.authz.util.ObjectFilterPlaceHolder class FilterDataSourceV2Strategy(spark: SparkSession) extends Strategy { override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { + // For Spark 3.1 and below, `ColumnPruning` rule will set `ObjectFilterPlaceHolder#child` to + // `Project` + case ObjectFilterPlaceHolder(Project(_, child)) if child.nodeName == "ShowNamespaces" => + spark.sessionState.planner.plan(child) + .map(FilteredShowNamespaceExec(_, spark.sparkContext)).toSeq + + // For Spark 3.2 and above case ObjectFilterPlaceHolder(child) if child.nodeName == "ShowNamespaces" => - spark.sessionState.planner.plan(child).map(FilteredShowNamespaceExec).toSeq + spark.sessionState.planner.plan(child) + .map(FilteredShowNamespaceExec(_, spark.sparkContext)).toSeq case ObjectFilterPlaceHolder(child) if child.nodeName == "ShowTables" => - spark.sessionState.planner.plan(child).map(FilteredShowTablesExec).toSeq + spark.sessionState.planner.plan(child) + .map(FilteredShowTablesExec(_, spark.sparkContext)).toSeq + case _ => Nil } } diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/FilteredShowObjectsExec.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/FilteredShowObjectsExec.scala index 7cc777d9b89..67519118ecc 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/FilteredShowObjectsExec.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/FilteredShowObjectsExec.scala @@ -17,6 +17,7 @@ package org.apache.kyuubi.plugin.spark.authz.ranger import org.apache.hadoop.security.UserGroupInformation +import org.apache.spark.SparkContext import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.Attribute @@ -26,24 +27,29 @@ import org.apache.kyuubi.plugin.spark.authz.{ObjectType, OperationType} import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils trait FilteredShowObjectsExec extends LeafExecNode { - def delegated: SparkPlan + def result: Array[InternalRow] - final override def output: Seq[Attribute] = delegated.output - - final private lazy val result = { - delegated.executeCollect().filter(isAllowed(_, AuthZUtils.getAuthzUgi(sparkContext))) - } + override def output: Seq[Attribute] final override def doExecute(): RDD[InternalRow] = { sparkContext.parallelize(result, 1) } +} - protected def isAllowed(r: InternalRow, ugi: UserGroupInformation): Boolean +trait FilteredShowObjectsCheck { + def isAllowed(r: InternalRow, ugi: UserGroupInformation): Boolean } -case class FilteredShowNamespaceExec(delegated: SparkPlan) extends FilteredShowObjectsExec { +case class FilteredShowNamespaceExec(result: Array[InternalRow], output: Seq[Attribute]) + extends FilteredShowObjectsExec {} +object FilteredShowNamespaceExec extends FilteredShowObjectsCheck { + def apply(delegated: SparkPlan, sc: SparkContext): FilteredShowNamespaceExec = { + val result = delegated.executeCollect() + .filter(isAllowed(_, AuthZUtils.getAuthzUgi(sc))) + new FilteredShowNamespaceExec(result, delegated.output) + } - override protected def isAllowed(r: InternalRow, ugi: UserGroupInformation): Boolean = { + override def isAllowed(r: InternalRow, ugi: UserGroupInformation): Boolean = { val database = r.getString(0) val resource = AccessResource(ObjectType.DATABASE, database, null, null) val request = AccessRequest(resource, ugi, OperationType.SHOWDATABASES, AccessType.USE) @@ -52,8 +58,16 @@ case class FilteredShowNamespaceExec(delegated: SparkPlan) extends FilteredShowO } } -case class FilteredShowTablesExec(delegated: SparkPlan) extends FilteredShowObjectsExec { - override protected def isAllowed(r: InternalRow, ugi: UserGroupInformation): Boolean = { +case class FilteredShowTablesExec(result: Array[InternalRow], output: Seq[Attribute]) + extends FilteredShowObjectsExec {} +object FilteredShowTablesExec extends FilteredShowObjectsCheck { + def apply(delegated: SparkPlan, sc: SparkContext): FilteredShowNamespaceExec = { + val result = delegated.executeCollect() + .filter(isAllowed(_, AuthZUtils.getAuthzUgi(sc))) + new FilteredShowNamespaceExec(result, delegated.output) + } + + override def isAllowed(r: InternalRow, ugi: UserGroupInformation): Boolean = { val database = r.getString(0) val table = r.getString(1) val isTemp = r.getBoolean(2) diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RuleAuthorization.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RuleAuthorization.scala index 1c73acc492e..3203108dfae 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RuleAuthorization.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RuleAuthorization.scala @@ -27,16 +27,15 @@ import org.apache.spark.sql.catalyst.trees.TreeNodeTag import org.apache.kyuubi.plugin.spark.authz._ import org.apache.kyuubi.plugin.spark.authz.ObjectType._ -import org.apache.kyuubi.plugin.spark.authz.ranger.RuleAuthorization.KYUUBI_AUTHZ_TAG +import org.apache.kyuubi.plugin.spark.authz.ranger.RuleAuthorization._ import org.apache.kyuubi.plugin.spark.authz.ranger.SparkRangerAdminPlugin._ -import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._; +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ class RuleAuthorization(spark: SparkSession) extends Rule[LogicalPlan] { - override def apply(plan: LogicalPlan): LogicalPlan = plan match { - case p if !plan.getTagValue(KYUUBI_AUTHZ_TAG).contains(true) => - RuleAuthorization.checkPrivileges(spark, p) - p.setTagValue(KYUUBI_AUTHZ_TAG, true) - p - case p => p // do nothing if checked privileges already. + override def apply(plan: LogicalPlan): LogicalPlan = { + plan match { + case plan if isAuthChecked(plan) => plan // do nothing if checked privileges already. + case p => checkPrivileges(spark, p) + } } } @@ -44,7 +43,7 @@ object RuleAuthorization { val KYUUBI_AUTHZ_TAG = TreeNodeTag[Boolean]("__KYUUBI_AUTHZ_TAG") - def checkPrivileges(spark: SparkSession, plan: LogicalPlan): Unit = { + private def checkPrivileges(spark: SparkSession, plan: LogicalPlan): LogicalPlan = { val auditHandler = new SparkRangerAuditHandler val ugi = getAuthzUgi(spark.sparkContext) val (inputs, outputs, opType) = PrivilegesBuilder.build(plan, spark) @@ -54,7 +53,7 @@ object RuleAuthorization { requests += AccessRequest(resource, ugi, opType, AccessType.USE) } - def addAccessRequest(objects: Seq[PrivilegeObject], isInput: Boolean): Unit = { + def addAccessRequest(objects: Iterable[PrivilegeObject], isInput: Boolean): Unit = { objects.foreach { obj => val resource = AccessResource(obj, opType) val accessType = ranger.AccessType(obj, opType, isInput) @@ -85,7 +84,7 @@ object RuleAuthorization { } case _ => Seq(request) } - } + }.toSeq if (authorizeInSingleCall) { verify(requestArrays.flatten, auditHandler) @@ -94,5 +93,17 @@ object RuleAuthorization { verify(Seq(req), auditHandler) } } + markAuthChecked(plan) + } + + private def markAuthChecked(plan: LogicalPlan): LogicalPlan = { + plan.transformUp { case p => + p.setTagValue(KYUUBI_AUTHZ_TAG, true) + p + } + } + + private def isAuthChecked(plan: LogicalPlan): Boolean = { + plan.find(_.getTagValue(KYUUBI_AUTHZ_TAG).contains(true)).nonEmpty } } diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RuleReplaceShowObjectCommands.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RuleReplaceShowObjectCommands.scala index 08d2b4fd024..bf762109cba 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RuleReplaceShowObjectCommands.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RuleReplaceShowObjectCommands.scala @@ -26,15 +26,13 @@ import org.apache.spark.sql.execution.command.{RunnableCommand, ShowColumnsComma import org.apache.kyuubi.plugin.spark.authz.{ObjectType, OperationType} import org.apache.kyuubi.plugin.spark.authz.util.{AuthZUtils, ObjectFilterPlaceHolder, WithInternalChildren} +import org.apache.kyuubi.util.reflect.ReflectUtils._ class RuleReplaceShowObjectCommands extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan match { case r: RunnableCommand if r.nodeName == "ShowTablesCommand" => FilteredShowTablesCommand(r) case n: LogicalPlan if n.nodeName == "ShowTables" => ObjectFilterPlaceHolder(n) - // show databases in spark2.4.x - case r: RunnableCommand if r.nodeName == "ShowDatabasesCommand" => - FilteredShowDatabasesCommand(r) case n: LogicalPlan if n.nodeName == "ShowNamespaces" => ObjectFilterPlaceHolder(n) case r: RunnableCommand if r.nodeName == "ShowFunctionsCommand" => @@ -48,7 +46,7 @@ class RuleReplaceShowObjectCommands extends Rule[LogicalPlan] { case class FilteredShowTablesCommand(delegated: RunnableCommand) extends FilteredShowObjectCommand(delegated) { - var isExtended: Boolean = AuthZUtils.getFieldVal(delegated, "isExtended").asInstanceOf[Boolean] + private val isExtended = getField[Boolean](delegated, "isExtended") override protected def isAllowed(r: Row, ugi: UserGroupInformation): Boolean = { val database = r.getString(0) @@ -63,18 +61,6 @@ case class FilteredShowTablesCommand(delegated: RunnableCommand) } } -case class FilteredShowDatabasesCommand(delegated: RunnableCommand) - extends FilteredShowObjectCommand(delegated) { - - override protected def isAllowed(r: Row, ugi: UserGroupInformation): Boolean = { - val database = r.getString(0) - val resource = AccessResource(ObjectType.DATABASE, database, null, null) - val request = AccessRequest(resource, ugi, OperationType.SHOWDATABASES, AccessType.USE) - val result = SparkRangerAdminPlugin.isAccessAllowed(request) - result != null && result.getIsAllowed - } -} - abstract class FilteredShowObjectCommand(delegated: RunnableCommand) extends RunnableCommand with WithInternalChildren { diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPlugin.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPlugin.scala index 78e59ff897f..9abb9cd2805 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPlugin.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPlugin.scala @@ -79,7 +79,7 @@ object SparkRangerAdminPlugin extends RangerBasePlugin("spark", "sparkSql") () => { if (plugin != null) { LOG.info(s"clean up ranger plugin, appId: ${plugin.getAppId}") - this.cleanup() + plugin.cleanup() } }, Integer.MAX_VALUE) @@ -109,7 +109,7 @@ object SparkRangerAdminPlugin extends RangerBasePlugin("spark", "sparkSql") } else if (result.getMaskTypeDef != null) { result.getMaskTypeDef.getName match { case "MASK" => regexp_replace(col) - case "MASK_SHOW_FIRST_4" if isSparkVersionAtLeast("3.1") => + case "MASK_SHOW_FIRST_4" if isSparkV31OrGreater => regexp_replace(col, hasLen = true) case "MASK_SHOW_FIRST_4" => val right = regexp_replace(s"substr($col, 5)") @@ -136,7 +136,8 @@ object SparkRangerAdminPlugin extends RangerBasePlugin("spark", "sparkSql") val upper = s"regexp_replace($expr, '[A-Z]', 'X'$pos)" val lower = s"regexp_replace($upper, '[a-z]', 'x'$pos)" val digits = s"regexp_replace($lower, '[0-9]', 'n'$pos)" - digits + val other = s"regexp_replace($digits, '[^A-Za-z0-9]', 'U'$pos)" + other } /** diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/CommandSpec.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/CommandSpec.scala index e96ef8cbfd6..32ad30e211f 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/CommandSpec.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/CommandSpec.scala @@ -19,6 +19,7 @@ package org.apache.kyuubi.plugin.spark.authz.serde import com.fasterxml.jackson.annotation.JsonIgnore import org.apache.spark.sql.SparkSession +import org.apache.spark.sql.catalyst.expressions.Expression import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.slf4j.LoggerFactory @@ -94,7 +95,8 @@ case class TableCommandSpec( case class ScanSpec( classname: String, - scanDescs: Seq[ScanDesc]) extends CommandSpec { + scanDescs: Seq[ScanDesc], + functionDescs: Seq[FunctionDesc] = Seq.empty) extends CommandSpec { override def opType: String = OperationType.QUERY.toString def tables: (LogicalPlan, SparkSession) => Seq[Table] = (plan, spark) => { scanDescs.flatMap { td => @@ -107,4 +109,16 @@ case class ScanSpec( } } } + + def functions: (Expression) => Seq[Function] = (expr) => { + functionDescs.flatMap { fd => + try { + Some(fd.extract(expr)) + } catch { + case e: Exception => + LOG.debug(fd.error(expr, e)) + None + } + } + } } diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/Descriptor.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/Descriptor.scala index d8c866b8875..fc660ce143e 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/Descriptor.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/Descriptor.scala @@ -23,18 +23,9 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.kyuubi.plugin.spark.authz.PrivilegeObjectActionType import org.apache.kyuubi.plugin.spark.authz.PrivilegeObjectActionType.PrivilegeObjectActionType -import org.apache.kyuubi.plugin.spark.authz.serde.ActionTypeExtractor.actionTypeExtractors -import org.apache.kyuubi.plugin.spark.authz.serde.CatalogExtractor.catalogExtractors -import org.apache.kyuubi.plugin.spark.authz.serde.ColumnExtractor.columnExtractors -import org.apache.kyuubi.plugin.spark.authz.serde.DatabaseExtractor.dbExtractors -import org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor.functionExtractors import org.apache.kyuubi.plugin.spark.authz.serde.FunctionType.FunctionType -import org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor.functionTypeExtractors -import org.apache.kyuubi.plugin.spark.authz.serde.QueryExtractor.queryExtractors -import org.apache.kyuubi.plugin.spark.authz.serde.TableExtractor.tableExtractors import org.apache.kyuubi.plugin.spark.authz.serde.TableType.TableType -import org.apache.kyuubi.plugin.spark.authz.serde.TableTypeExtractor.tableTypeExtractors -import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ +import org.apache.kyuubi.util.reflect.ReflectUtils._ /** * A database object(such as database, table, function) descriptor describes its name and getter @@ -81,8 +72,8 @@ case class ColumnDesc( fieldName: String, fieldExtractor: String) extends Descriptor { override def extract(v: AnyRef): Seq[String] = { - val columnsVal = invoke(v, fieldName) - val columnExtractor = columnExtractors(fieldExtractor) + val columnsVal = invokeAs[AnyRef](v, fieldName) + val columnExtractor = lookupExtractor[ColumnExtractor](fieldExtractor) columnExtractor(columnsVal) } } @@ -100,8 +91,8 @@ case class DatabaseDesc( catalogDesc: Option[CatalogDesc] = None, isInput: Boolean = false) extends Descriptor { override def extract(v: AnyRef): Database = { - val databaseVal = invoke(v, fieldName) - val databaseExtractor = dbExtractors(fieldExtractor) + val databaseVal = invokeAs[AnyRef](v, fieldName) + val databaseExtractor = lookupExtractor[DatabaseExtractor](fieldExtractor) val db = databaseExtractor(databaseVal) if (db.catalog.isEmpty && catalogDesc.nonEmpty) { val maybeCatalog = catalogDesc.get.extract(v) @@ -128,8 +119,8 @@ case class FunctionTypeDesc( } def extract(v: AnyRef, spark: SparkSession): FunctionType = { - val functionTypeVal = invoke(v, fieldName) - val functionTypeExtractor = functionTypeExtractors(fieldExtractor) + val functionTypeVal = invokeAs[AnyRef](v, fieldName) + val functionTypeExtractor = lookupExtractor[FunctionTypeExtractor](fieldExtractor) functionTypeExtractor(functionTypeVal, spark) } @@ -154,8 +145,8 @@ case class FunctionDesc( functionTypeDesc: Option[FunctionTypeDesc] = None, isInput: Boolean = false) extends Descriptor { override def extract(v: AnyRef): Function = { - val functionVal = invoke(v, fieldName) - val functionExtractor = functionExtractors(fieldExtractor) + val functionVal = invokeAs[AnyRef](v, fieldName) + val functionExtractor = lookupExtractor[FunctionExtractor](fieldExtractor) var function = functionExtractor(functionVal) if (function.database.isEmpty) { val maybeDatabase = databaseDesc.map(_.extract(v)) @@ -179,8 +170,8 @@ case class QueryDesc( fieldName: String, fieldExtractor: String = "LogicalPlanQueryExtractor") extends Descriptor { override def extract(v: AnyRef): Option[LogicalPlan] = { - val queryVal = invoke(v, fieldName) - val queryExtractor = queryExtractors(fieldExtractor) + val queryVal = invokeAs[AnyRef](v, fieldName) + val queryExtractor = lookupExtractor[QueryExtractor](fieldExtractor) queryExtractor(queryVal) } } @@ -201,8 +192,8 @@ case class TableTypeDesc( } def extract(v: AnyRef, spark: SparkSession): TableType = { - val tableTypeVal = invoke(v, fieldName) - val tableTypeExtractor = tableTypeExtractors(fieldExtractor) + val tableTypeVal = invokeAs[AnyRef](v, fieldName) + val tableTypeExtractor = lookupExtractor[TableTypeExtractor](fieldExtractor) tableTypeExtractor(tableTypeVal, spark) } @@ -239,8 +230,8 @@ case class TableDesc( } def extract(v: AnyRef, spark: SparkSession): Option[Table] = { - val tableVal = invoke(v, fieldName) - val tableExtractor = tableExtractors(fieldExtractor) + val tableVal = invokeAs[AnyRef](v, fieldName) + val tableExtractor = lookupExtractor[TableExtractor](fieldExtractor) val maybeTable = tableExtractor(spark, tableVal) maybeTable.map { t => if (t.catalog.isEmpty && catalogDesc.nonEmpty) { @@ -266,9 +257,9 @@ case class ActionTypeDesc( actionType: Option[String] = None) extends Descriptor { override def extract(v: AnyRef): PrivilegeObjectActionType = { actionType.map(PrivilegeObjectActionType.withName).getOrElse { - val actionTypeVal = invoke(v, fieldName) - val extractor = actionTypeExtractors(fieldExtractor) - extractor(actionTypeVal) + val actionTypeVal = invokeAs[AnyRef](v, fieldName) + val actionTypeExtractor = lookupExtractor[ActionTypeExtractor](fieldExtractor) + actionTypeExtractor(actionTypeVal) } } } @@ -283,9 +274,9 @@ case class CatalogDesc( fieldName: String = "catalog", fieldExtractor: String = "CatalogPluginCatalogExtractor") extends Descriptor { override def extract(v: AnyRef): Option[String] = { - val catalogVal = invoke(v, fieldName) - val extractor = catalogExtractors(fieldExtractor) - extractor(catalogVal) + val catalogVal = invokeAs[AnyRef](v, fieldName) + val catalogExtractor = lookupExtractor[CatalogExtractor](fieldExtractor) + catalogExtractor(catalogVal) } } @@ -301,9 +292,9 @@ case class ScanDesc( val tableVal = if (fieldName == null) { v } else { - invoke(v, fieldName) + invokeAs[AnyRef](v, fieldName) } - val tableExtractor = tableExtractors(fieldExtractor) + val tableExtractor = lookupExtractor[TableExtractor](fieldExtractor) val maybeTable = tableExtractor(spark, tableVal) maybeTable.map { t => if (t.catalog.isEmpty && catalogDesc.nonEmpty) { diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/Function.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/Function.scala index b7a0010b4b5..ba19972ed5f 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/Function.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/Function.scala @@ -21,8 +21,8 @@ package org.apache.kyuubi.plugin.spark.authz.serde * :: Developer API :: * * Represents a function identity - * + * @param catalog * @param database * @param functionName */ -case class Function(database: Option[String], functionName: String) +case class Function(catalog: Option[String], database: Option[String], functionName: String) diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/catalogExtractors.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/catalogExtractors.scala index 0b7d712230e..e48becb325f 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/catalogExtractors.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/catalogExtractors.scala @@ -17,7 +17,7 @@ package org.apache.kyuubi.plugin.spark.authz.serde -import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ +import org.apache.kyuubi.util.reflect.ReflectUtils._ trait CatalogExtractor extends (AnyRef => Option[String]) with Extractor @@ -43,7 +43,7 @@ class CatalogPluginOptionCatalogExtractor extends CatalogExtractor { override def apply(v1: AnyRef): Option[String] = { v1 match { case Some(catalogPlugin: AnyRef) => - new CatalogPluginCatalogExtractor().apply(catalogPlugin) + lookupExtractor[CatalogPluginCatalogExtractor].apply(catalogPlugin) case _ => None } } diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/databaseExtractors.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/databaseExtractors.scala index 4e9270e7838..713d3e3fb75 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/databaseExtractors.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/databaseExtractors.scala @@ -18,6 +18,7 @@ package org.apache.kyuubi.plugin.spark.authz.serde import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ +import org.apache.kyuubi.util.reflect.ReflectUtils._ trait DatabaseExtractor extends (AnyRef => Database) with Extractor @@ -68,9 +69,9 @@ class StringSeqOptionDatabaseExtractor extends DatabaseExtractor { */ class ResolvedNamespaceDatabaseExtractor extends DatabaseExtractor { override def apply(v1: AnyRef): Database = { - val catalogVal = invoke(v1, "catalog") - val catalog = new CatalogPluginCatalogExtractor().apply(catalogVal) - val namespace = getFieldVal[Seq[String]](v1, "namespace") + val catalogVal = invokeAs[AnyRef](v1, "catalog") + val catalog = lookupExtractor[CatalogPluginCatalogExtractor].apply(catalogVal) + val namespace = getField[Seq[String]](v1, "namespace") Database(catalog, quote(namespace)) } } @@ -80,9 +81,9 @@ class ResolvedNamespaceDatabaseExtractor extends DatabaseExtractor { */ class ResolvedDBObjectNameDatabaseExtractor extends DatabaseExtractor { override def apply(v1: AnyRef): Database = { - val catalogVal = invoke(v1, "catalog") - val catalog = new CatalogPluginCatalogExtractor().apply(catalogVal) - val namespace = getFieldVal[Seq[String]](v1, "nameParts") + val catalogVal = invokeAs[AnyRef](v1, "catalog") + val catalog = lookupExtractor[CatalogPluginCatalogExtractor].apply(catalogVal) + val namespace = getField[Seq[String]](v1, "nameParts") Database(catalog, quote(namespace)) } } diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionExtractors.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionExtractors.scala index 894a6cb8f2f..bcd5f266573 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionExtractors.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionExtractors.scala @@ -20,12 +20,26 @@ package org.apache.kyuubi.plugin.spark.authz.serde import org.apache.spark.sql.catalyst.FunctionIdentifier import org.apache.spark.sql.catalyst.expressions.ExpressionInfo +import org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor.buildFunctionFromQualifiedName + trait FunctionExtractor extends (AnyRef => Function) with Extractor object FunctionExtractor { val functionExtractors: Map[String, FunctionExtractor] = { loadExtractorsToMap[FunctionExtractor] } + + private[authz] def buildFunctionFromQualifiedName(qualifiedName: String): Function = { + val parts: Array[String] = qualifiedName.split("\\.") + val (catalog, database, functionName) = if (parts.length == 3) { + (Some(parts.head), Some(parts.tail.head), parts.last) + } else if (parts.length == 2) { + (None, Some(parts.head), parts.last) + } else { + (None, None, qualifiedName) + } + Function(catalog, database, functionName) + } } /** @@ -33,7 +47,17 @@ object FunctionExtractor { */ class StringFunctionExtractor extends FunctionExtractor { override def apply(v1: AnyRef): Function = { - Function(None, v1.asInstanceOf[String]) + Function(None, None, v1.asInstanceOf[String]) + } +} + +/** + * * String + */ +class QualifiedNameStringFunctionExtractor extends FunctionExtractor { + override def apply(v1: AnyRef): Function = { + val qualifiedName: String = v1.asInstanceOf[String] + buildFunctionFromQualifiedName(qualifiedName) } } @@ -43,7 +67,7 @@ class StringFunctionExtractor extends FunctionExtractor { class FunctionIdentifierFunctionExtractor extends FunctionExtractor { override def apply(v1: AnyRef): Function = { val identifier = v1.asInstanceOf[FunctionIdentifier] - Function(identifier.database, identifier.funcName) + Function(None, identifier.database, identifier.funcName) } } @@ -53,6 +77,6 @@ class FunctionIdentifierFunctionExtractor extends FunctionExtractor { class ExpressionInfoFunctionExtractor extends FunctionExtractor { override def apply(v1: AnyRef): Function = { val info = v1.asInstanceOf[ExpressionInfo] - Function(Option(info.getDb), info.getName) + Function(None, Option(info.getDb), info.getName) } } diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionTypeExtractors.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionTypeExtractors.scala index 4c5e9dc8452..c134b501815 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionTypeExtractors.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionTypeExtractors.scala @@ -19,8 +19,11 @@ package org.apache.kyuubi.plugin.spark.authz.serde import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.FunctionIdentifier +import org.apache.spark.sql.catalyst.catalog.SessionCatalog +import org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor.buildFunctionFromQualifiedName import org.apache.kyuubi.plugin.spark.authz.serde.FunctionType.{FunctionType, PERMANENT, SYSTEM, TEMP} +import org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor.getFunctionType object FunctionType extends Enumeration { type FunctionType = Value @@ -33,6 +36,19 @@ object FunctionTypeExtractor { val functionTypeExtractors: Map[String, FunctionTypeExtractor] = { loadExtractorsToMap[FunctionTypeExtractor] } + + def getFunctionType(fi: FunctionIdentifier, catalog: SessionCatalog): FunctionType = { + fi match { + case temp if catalog.isTemporaryFunction(temp) => + TEMP + case permanent if catalog.isPersistentFunction(permanent) => + PERMANENT + case system if catalog.isRegisteredFunction(system) => + SYSTEM + case _ => + TEMP + } + } } /** @@ -53,9 +69,9 @@ class TempMarkerFunctionTypeExtractor extends FunctionTypeExtractor { */ class ExpressionInfoFunctionTypeExtractor extends FunctionTypeExtractor { override def apply(v1: AnyRef, spark: SparkSession): FunctionType = { - val function = new ExpressionInfoFunctionExtractor().apply(v1) + val function = lookupExtractor[ExpressionInfoFunctionExtractor].apply(v1) val fi = FunctionIdentifier(function.functionName, function.database) - new FunctionIdentifierFunctionTypeExtractor().apply(fi, spark) + lookupExtractor[FunctionIdentifierFunctionTypeExtractor].apply(fi, spark) } } @@ -66,14 +82,18 @@ class FunctionIdentifierFunctionTypeExtractor extends FunctionTypeExtractor { override def apply(v1: AnyRef, spark: SparkSession): FunctionType = { val catalog = spark.sessionState.catalog val fi = v1.asInstanceOf[FunctionIdentifier] - if (catalog.isTemporaryFunction(fi)) { - TEMP - } else if (catalog.isPersistentFunction(fi)) { - PERMANENT - } else if (catalog.isRegisteredFunction(fi)) { - SYSTEM - } else { - TEMP - } + getFunctionType(fi, catalog) + } +} + +/** + * String + */ +class FunctionNameFunctionTypeExtractor extends FunctionTypeExtractor { + override def apply(v1: AnyRef, spark: SparkSession): FunctionType = { + val catalog: SessionCatalog = spark.sessionState.catalog + val qualifiedName: String = v1.asInstanceOf[String] + val function = buildFunctionFromQualifiedName(qualifiedName) + getFunctionType(FunctionIdentifier(function.functionName, function.database), catalog) } } diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/package.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/package.scala index a52a558a00a..6863516b698 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/package.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/package.scala @@ -17,9 +17,6 @@ package org.apache.kyuubi.plugin.spark.authz -import java.util.ServiceLoader - -import scala.collection.JavaConverters._ import scala.reflect.ClassTag import com.fasterxml.jackson.core.`type`.TypeReference @@ -28,16 +25,23 @@ import com.fasterxml.jackson.module.scala.DefaultScalaModule import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.kyuubi.plugin.spark.authz.OperationType.{OperationType, QUERY} +import org.apache.kyuubi.plugin.spark.authz.serde.ActionTypeExtractor.actionTypeExtractors +import org.apache.kyuubi.plugin.spark.authz.serde.CatalogExtractor.catalogExtractors +import org.apache.kyuubi.plugin.spark.authz.serde.ColumnExtractor.columnExtractors +import org.apache.kyuubi.plugin.spark.authz.serde.DatabaseExtractor.dbExtractors +import org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor.functionExtractors +import org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor.functionTypeExtractors +import org.apache.kyuubi.plugin.spark.authz.serde.QueryExtractor.queryExtractors +import org.apache.kyuubi.plugin.spark.authz.serde.TableExtractor.tableExtractors +import org.apache.kyuubi.plugin.spark.authz.serde.TableTypeExtractor.tableTypeExtractors +import org.apache.kyuubi.util.reflect.ReflectUtils._ package object serde { final val mapper = JsonMapper.builder().addModule(DefaultScalaModule).build() - def loadExtractorsToMap[T <: Extractor](implicit ct: ClassTag[T]): Map[String, T] = { - ServiceLoader.load(ct.runtimeClass).iterator().asScala - .map { case e: Extractor => (e.key, e.asInstanceOf[T]) } - .toMap - } + def loadExtractorsToMap[T <: Extractor](implicit ct: ClassTag[T]): Map[String, T] = + loadFromServiceLoader[T]()(ct).map { e: T => (e.key, e) }.toMap final lazy val DB_COMMAND_SPECS: Map[String, DatabaseCommandSpec] = { val is = getClass.getClassLoader.getResourceAsStream("database_command_spec.json") @@ -68,7 +72,8 @@ package object serde { final private lazy val SCAN_SPECS: Map[String, ScanSpec] = { val is = getClass.getClassLoader.getResourceAsStream("scan_command_spec.json") mapper.readValue(is, new TypeReference[Array[ScanSpec]] {}) - .map(e => (e.classname, e)).toMap + .map(e => (e.classname, e)) + .filter(t => t._2.scanDescs.nonEmpty).toMap } def isKnownScan(r: AnyRef): Boolean = { @@ -79,6 +84,21 @@ package object serde { SCAN_SPECS(r.getClass.getName) } + final private lazy val FUNCTION_SPECS: Map[String, ScanSpec] = { + val is = getClass.getClassLoader.getResourceAsStream("scan_command_spec.json") + mapper.readValue(is, new TypeReference[Array[ScanSpec]] {}) + .map(e => (e.classname, e)) + .filter(t => t._2.functionDescs.nonEmpty).toMap + } + + def isKnownFunction(r: AnyRef): Boolean = { + FUNCTION_SPECS.contains(r.getClass.getName) + } + + def getFunctionSpec(r: AnyRef): ScanSpec = { + FUNCTION_SPECS(r.getClass.getName) + } + def operationType(plan: LogicalPlan): OperationType = { val classname = plan.getClass.getName TABLE_COMMAND_SPECS.get(classname) @@ -87,4 +107,33 @@ package object serde { .map(s => s.operationType) .getOrElse(QUERY) } + + /** + * get extractor instance by extractor class name + * @param extractorKey explicitly load extractor by its simple class name. + * null by default means get extractor by extractor class. + * @param ct class tag of extractor class type + * @tparam T extractor class type + * @return + */ + def lookupExtractor[T <: Extractor](extractorKey: String)( + implicit ct: ClassTag[T]): T = { + val extractorClass = ct.runtimeClass + val extractors: Map[String, Extractor] = extractorClass match { + case c if classOf[CatalogExtractor].isAssignableFrom(c) => catalogExtractors + case c if classOf[DatabaseExtractor].isAssignableFrom(c) => dbExtractors + case c if classOf[TableExtractor].isAssignableFrom(c) => tableExtractors + case c if classOf[TableTypeExtractor].isAssignableFrom(c) => tableTypeExtractors + case c if classOf[ColumnExtractor].isAssignableFrom(c) => columnExtractors + case c if classOf[QueryExtractor].isAssignableFrom(c) => queryExtractors + case c if classOf[FunctionExtractor].isAssignableFrom(c) => functionExtractors + case c if classOf[FunctionTypeExtractor].isAssignableFrom(c) => functionTypeExtractors + case c if classOf[ActionTypeExtractor].isAssignableFrom(c) => actionTypeExtractors + case _ => throw new IllegalArgumentException(s"Unknown extractor type: $ct") + } + extractors(extractorKey).asInstanceOf[T] + } + + def lookupExtractor[T <: Extractor](implicit ct: ClassTag[T]): T = + lookupExtractor[T](ct.runtimeClass.getSimpleName)(ct) } diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/tableExtractors.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/tableExtractors.scala index c848381d426..94641d6d060 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/tableExtractors.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/tableExtractors.scala @@ -24,9 +24,11 @@ import scala.collection.JavaConverters._ import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.CatalogTable +import org.apache.spark.sql.catalyst.expressions.Expression import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ +import org.apache.kyuubi.util.reflect.ReflectUtils._ /** * A trait for extracting database and table as string tuple @@ -46,10 +48,25 @@ object TableExtractor { */ def getOwner(v: AnyRef): Option[String] = { // org.apache.spark.sql.connector.catalog.Table - val table = invoke(v, "table") + val table = invokeAs[AnyRef](v, "table") val properties = invokeAs[JMap[String, String]](table, "properties").asScala properties.get("owner") } + + def getOwner(spark: SparkSession, catalogName: String, tableIdent: AnyRef): Option[String] = { + try { + val catalogManager = invokeAs[AnyRef](spark.sessionState, "catalogManager") + val catalog = invokeAs[AnyRef](catalogManager, "catalog", (classOf[String], catalogName)) + val table = invokeAs[AnyRef]( + catalog, + "loadTable", + (Class.forName("org.apache.spark.sql.connector.catalog.Identifier"), tableIdent)) + getOwner(table) + } catch { + // Exception may occur due to invalid reflection or table not found + case _: Exception => None + } + } } /** @@ -87,7 +104,7 @@ class CatalogTableTableExtractor extends TableExtractor { class CatalogTableOptionTableExtractor extends TableExtractor { override def apply(spark: SparkSession, v1: AnyRef): Option[Table] = { val catalogTable = v1.asInstanceOf[Option[CatalogTable]] - catalogTable.flatMap(new CatalogTableTableExtractor().apply(spark, _)) + catalogTable.flatMap(lookupExtractor[CatalogTableTableExtractor].apply(spark, _)) } } @@ -96,10 +113,10 @@ class CatalogTableOptionTableExtractor extends TableExtractor { */ class ResolvedTableTableExtractor extends TableExtractor { override def apply(spark: SparkSession, v1: AnyRef): Option[Table] = { - val catalogVal = invoke(v1, "catalog") - val catalog = new CatalogPluginCatalogExtractor().apply(catalogVal) - val identifier = invoke(v1, "identifier") - val maybeTable = new IdentifierTableExtractor().apply(spark, identifier) + val catalogVal = invokeAs[AnyRef](v1, "catalog") + val catalog = lookupExtractor[CatalogPluginCatalogExtractor].apply(catalogVal) + val identifier = invokeAs[AnyRef](v1, "identifier") + val maybeTable = lookupExtractor[IdentifierTableExtractor].apply(spark, identifier) val maybeOwner = TableExtractor.getOwner(v1) maybeTable.map(_.copy(catalog = catalog, owner = maybeOwner)) } @@ -116,6 +133,34 @@ class IdentifierTableExtractor extends TableExtractor { } } +/** + * java.lang.String + * with concat parts by "." + */ +class StringTableExtractor extends TableExtractor { + override def apply(spark: SparkSession, v1: AnyRef): Option[Table] = { + val tableNameArr = v1.asInstanceOf[String].split("\\.") + val maybeTable = tableNameArr.length match { + case 1 => Table(None, None, tableNameArr(0), None) + case 2 => Table(None, Some(tableNameArr(0)), tableNameArr(1), None) + case 3 => Table(Some(tableNameArr(0)), Some(tableNameArr(1)), tableNameArr(2), None) + } + Option(maybeTable) + } +} + +/** + * Seq[org.apache.spark.sql.catalyst.expressions.Expression] + */ +class ExpressionSeqTableExtractor extends TableExtractor { + override def apply(spark: SparkSession, v1: AnyRef): Option[Table] = { + val expressions = v1.asInstanceOf[Seq[Expression]] + // Iceberg will rearrange the parameters according to the parameter order + // defined in the procedure, where the table parameters are currently always the first. + lookupExtractor[StringTableExtractor].apply(spark, expressions.head.toString()) + } +} + /** * org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation */ @@ -128,13 +173,12 @@ class DataSourceV2RelationTableExtractor extends TableExtractor { case Some(v2Relation) => val maybeCatalogPlugin = invokeAs[Option[AnyRef]](v2Relation, "catalog") val maybeCatalog = maybeCatalogPlugin.flatMap(catalogPlugin => - new CatalogPluginCatalogExtractor().apply(catalogPlugin)) - val maybeIdentifier = invokeAs[Option[AnyRef]](v2Relation, "identifier") - maybeIdentifier.flatMap { id => - val maybeTable = new IdentifierTableExtractor().apply(spark, id) - val maybeOwner = TableExtractor.getOwner(v2Relation) - maybeTable.map(_.copy(catalog = maybeCatalog, owner = maybeOwner)) - } + lookupExtractor[CatalogPluginCatalogExtractor].apply(catalogPlugin)) + lookupExtractor[TableTableExtractor].apply(spark, invokeAs[AnyRef](v2Relation, "table")) + .map { table => + val maybeOwner = TableExtractor.getOwner(v2Relation) + table.copy(catalog = maybeCatalog, owner = maybeOwner) + } } } } @@ -146,7 +190,7 @@ class LogicalRelationTableExtractor extends TableExtractor { override def apply(spark: SparkSession, v1: AnyRef): Option[Table] = { val maybeCatalogTable = invokeAs[Option[AnyRef]](v1, "catalogTable") maybeCatalogTable.flatMap { ct => - new CatalogTableTableExtractor().apply(spark, ct) + lookupExtractor[CatalogTableTableExtractor].apply(spark, ct) } } } @@ -156,11 +200,39 @@ class LogicalRelationTableExtractor extends TableExtractor { */ class ResolvedDbObjectNameTableExtractor extends TableExtractor { override def apply(spark: SparkSession, v1: AnyRef): Option[Table] = { - val catalogVal = invoke(v1, "catalog") - val catalog = new CatalogPluginCatalogExtractor().apply(catalogVal) + val catalogVal = invokeAs[AnyRef](v1, "catalog") + val catalog = lookupExtractor[CatalogPluginCatalogExtractor].apply(catalogVal) val nameParts = invokeAs[Seq[String]](v1, "nameParts") val namespace = nameParts.init.toArray val table = nameParts.last Some(Table(catalog, Some(quote(namespace)), table, None)) } } + +/** + * org.apache.spark.sql.catalyst.analysis.ResolvedIdentifier + */ +class ResolvedIdentifierTableExtractor extends TableExtractor { + override def apply(spark: SparkSession, v1: AnyRef): Option[Table] = { + v1.getClass.getName match { + case "org.apache.spark.sql.catalyst.analysis.ResolvedIdentifier" => + val catalogVal = invokeAs[AnyRef](v1, "catalog") + val catalog = lookupExtractor[CatalogPluginCatalogExtractor].apply(catalogVal) + val identifier = invokeAs[AnyRef](v1, "identifier") + val maybeTable = lookupExtractor[IdentifierTableExtractor].apply(spark, identifier) + val owner = catalog.flatMap(name => TableExtractor.getOwner(spark, name, identifier)) + maybeTable.map(_.copy(catalog = catalog, owner = owner)) + case _ => None + } + } +} + +/** + * org.apache.spark.sql.connector.catalog.Table + */ +class TableTableExtractor extends TableExtractor { + override def apply(spark: SparkSession, v1: AnyRef): Option[Table] = { + val tableName = invokeAs[String](v1, "name") + lookupExtractor[StringTableExtractor].apply(spark, tableName) + } +} diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/AuthZUtils.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/AuthZUtils.scala index 5773e1c9340..4f7cbb9ef14 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/AuthZUtils.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/AuthZUtils.scala @@ -23,8 +23,6 @@ import java.security.interfaces.ECPublicKey import java.security.spec.X509EncodedKeySpec import java.util.Base64 -import scala.util.{Failure, Success, Try} - import org.apache.commons.lang3.StringUtils import org.apache.hadoop.security.UserGroupInformation import org.apache.ranger.plugin.service.RangerBasePlugin @@ -33,67 +31,12 @@ import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, View} import org.apache.kyuubi.plugin.spark.authz.AccessControlException import org.apache.kyuubi.plugin.spark.authz.util.ReservedKeys._ +import org.apache.kyuubi.util.SemanticVersion +import org.apache.kyuubi.util.reflect.DynConstructors +import org.apache.kyuubi.util.reflect.ReflectUtils._ private[authz] object AuthZUtils { - /** - * fixme error handling need improve here - */ - def getFieldVal[T](o: Any, name: String): T = { - Try { - val field = o.getClass.getDeclaredField(name) - field.setAccessible(true) - field.get(o) - } match { - case Success(value) => value.asInstanceOf[T] - case Failure(e) => - val candidates = o.getClass.getDeclaredFields.map(_.getName).mkString("[", ",", "]") - throw new RuntimeException(s"$name not in ${o.getClass} $candidates", e) - } - } - - def getFieldValOpt[T](o: Any, name: String): Option[T] = Try(getFieldVal[T](o, name)).toOption - - def invoke( - obj: AnyRef, - methodName: String, - args: (Class[_], AnyRef)*): AnyRef = { - try { - val (types, values) = args.unzip - val method = obj.getClass.getMethod(methodName, types: _*) - method.setAccessible(true) - method.invoke(obj, values: _*) - } catch { - case e: NoSuchMethodException => - val candidates = obj.getClass.getMethods.map(_.getName).mkString("[", ",", "]") - throw new RuntimeException(s"$methodName not in ${obj.getClass} $candidates", e) - } - } - - def invokeAs[T]( - obj: AnyRef, - methodName: String, - args: (Class[_], AnyRef)*): T = { - invoke(obj, methodName, args: _*).asInstanceOf[T] - } - - def invokeStatic( - obj: Class[_], - methodName: String, - args: (Class[_], AnyRef)*): AnyRef = { - val (types, values) = args.unzip - val method = obj.getMethod(methodName, types: _*) - method.setAccessible(true) - method.invoke(obj, values: _*) - } - - def invokeStaticAs[T]( - obj: Class[_], - methodName: String, - args: (Class[_], AnyRef)*): T = { - invokeStatic(obj, methodName, args: _*).asInstanceOf[T] - } - /** * Get the active session user * @param spark spark context instance @@ -118,8 +61,8 @@ private[authz] object AuthZUtils { def hasResolvedPermanentView(plan: LogicalPlan): Boolean = { plan match { - case view: View if view.resolved && isSparkVersionAtLeast("3.1.0") => - !getFieldVal[Boolean](view, "isTempView") + case view: View if view.resolved && isSparkV31OrGreater => + !getField[Boolean](view, "isTempView") case _ => false } @@ -127,7 +70,12 @@ private[authz] object AuthZUtils { lazy val isRanger21orGreater: Boolean = { try { - classOf[RangerBasePlugin].getConstructor(classOf[String], classOf[String], classOf[String]) + DynConstructors.builder().impl( + classOf[RangerBasePlugin], + classOf[String], + classOf[String], + classOf[String]) + .buildChecked[RangerBasePlugin]() true } catch { case _: NoSuchMethodException => @@ -135,30 +83,10 @@ private[authz] object AuthZUtils { } } - def isSparkVersionAtMost(targetVersionString: String): Boolean = { - SemanticVersion(SPARK_VERSION).isVersionAtMost(targetVersionString) - } - - def isSparkVersionAtLeast(targetVersionString: String): Boolean = { - SemanticVersion(SPARK_VERSION).isVersionAtLeast(targetVersionString) - } - - def isSparkVersionEqualTo(targetVersionString: String): Boolean = { - SemanticVersion(SPARK_VERSION).isVersionEqualTo(targetVersionString) - } - - /** - * check if spark version satisfied - * first param is option of supported most spark version, - * and secont param is option of supported least spark version - * - * @return - */ - def passSparkVersionCheck: (Option[String], Option[String]) => Boolean = - (mostSparkVersion, leastSparkVersion) => { - mostSparkVersion.forall(isSparkVersionAtMost) && - leastSparkVersion.forall(isSparkVersionAtLeast) - } + lazy val SPARK_RUNTIME_VERSION: SemanticVersion = SemanticVersion(SPARK_VERSION) + lazy val isSparkV31OrGreater: Boolean = SPARK_RUNTIME_VERSION >= "3.1" + lazy val isSparkV32OrGreater: Boolean = SPARK_RUNTIME_VERSION >= "3.2" + lazy val isSparkV33OrGreater: Boolean = SPARK_RUNTIME_VERSION >= "3.3" def quoteIfNeeded(part: String): String = { if (part.matches("[a-zA-Z0-9_]+") && !part.matches("\\d+")) { diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/ObjectFilterPlaceHolder.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/ObjectFilterPlaceHolder.scala index a5d1c0d3b54..0d3c39adb69 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/ObjectFilterPlaceHolder.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/ObjectFilterPlaceHolder.scala @@ -18,9 +18,19 @@ package org.apache.kyuubi.plugin.spark.authz.util import org.apache.spark.sql.catalyst.expressions.Attribute -import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics} +import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, UnaryNode} + +case class ObjectFilterPlaceHolder(child: LogicalPlan) extends UnaryNode + with WithInternalChild { -case class ObjectFilterPlaceHolder(child: LogicalPlan) extends LeafNode { override def output: Seq[Attribute] = child.output - override def computeStats(): Statistics = child.stats + + override def withNewChildInternal(newChild: LogicalPlan): LogicalPlan = { + // `FilterDataSourceV2Strategy` requires child.nodename not changed + if (child.nodeName == newChild.nodeName) { + copy(newChild) + } else { + this + } + } } diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/RangerConfigProvider.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/RangerConfigProvider.scala index 83fe048e677..a61d94a8fc8 100644 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/RangerConfigProvider.scala +++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/RangerConfigProvider.scala @@ -20,6 +20,7 @@ package org.apache.kyuubi.plugin.spark.authz.util import org.apache.hadoop.conf.Configuration import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ +import org.apache.kyuubi.util.reflect.ReflectUtils._ trait RangerConfigProvider { @@ -33,15 +34,13 @@ trait RangerConfigProvider { * org.apache.ranger.authorization.hadoop.config.RangerConfiguration * for Ranger 2.0 and below */ - def getRangerConf: Configuration = { + val getRangerConf: Configuration = { if (isRanger21orGreater) { // for Ranger 2.1+ - invokeAs[Configuration](this, "getConfig") + invokeAs(this, "getConfig") } else { // for Ranger 2.0 and below - invokeStaticAs[Configuration]( - Class.forName("org.apache.ranger.authorization.hadoop.config.RangerConfiguration"), - "getInstance") + invokeAs("org.apache.ranger.authorization.hadoop.config.RangerConfiguration", "getInstance") } } } diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/SemanticVersion.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/SemanticVersion.scala deleted file mode 100644 index 4d7e8972505..00000000000 --- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/util/SemanticVersion.scala +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kyuubi.plugin.spark.authz.util - -/** - * Encapsulate a component Spark version for the convenience of version checks. - * Copy from org.apache.kyuubi.engine.ComponentVersion - */ -case class SemanticVersion(majorVersion: Int, minorVersion: Int) { - - def isVersionAtMost(targetVersionString: String): Boolean = { - this.compareVersion( - targetVersionString, - (targetMajor: Int, targetMinor: Int, runtimeMajor: Int, runtimeMinor: Int) => - (runtimeMajor < targetMajor) || { - runtimeMajor == targetMajor && runtimeMinor <= targetMinor - }) - } - - def isVersionAtLeast(targetVersionString: String): Boolean = { - this.compareVersion( - targetVersionString, - (targetMajor: Int, targetMinor: Int, runtimeMajor: Int, runtimeMinor: Int) => - (runtimeMajor > targetMajor) || { - runtimeMajor == targetMajor && runtimeMinor >= targetMinor - }) - } - - def isVersionEqualTo(targetVersionString: String): Boolean = { - this.compareVersion( - targetVersionString, - (targetMajor: Int, targetMinor: Int, runtimeMajor: Int, runtimeMinor: Int) => - runtimeMajor == targetMajor && runtimeMinor == targetMinor) - } - - def compareVersion( - targetVersionString: String, - callback: (Int, Int, Int, Int) => Boolean): Boolean = { - val targetVersion = SemanticVersion(targetVersionString) - val targetMajor = targetVersion.majorVersion - val targetMinor = targetVersion.minorVersion - callback(targetMajor, targetMinor, this.majorVersion, this.minorVersion) - } - - override def toString: String = s"$majorVersion.$minorVersion" -} - -object SemanticVersion { - - def apply(versionString: String): SemanticVersion = { - """^(\d+)\.(\d+)(\..*)?$""".r.findFirstMatchIn(versionString) match { - case Some(m) => - SemanticVersion(m.group(1).toInt, m.group(2).toInt) - case None => - throw new IllegalArgumentException(s"Tried to parse '$versionString' as a project" + - s" version string, but it could not find the major and minor version numbers.") - } - } -} diff --git a/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/PolicyJsonFileGenerator.scala b/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/PolicyJsonFileGenerator.scala new file mode 100644 index 00000000000..7faddd0c7fa --- /dev/null +++ b/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/PolicyJsonFileGenerator.scala @@ -0,0 +1,348 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.plugin.spark.authz.gen + +import java.nio.charset.StandardCharsets +import java.nio.file.{Files, Paths, StandardOpenOption} +import java.util.UUID + +import com.fasterxml.jackson.annotation.JsonInclude.Include +import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper} +import com.fasterxml.jackson.databind.json.JsonMapper +import com.fasterxml.jackson.databind.node.ObjectNode +import com.fasterxml.jackson.module.scala.DefaultScalaModule +import org.apache.ranger.plugin.model.RangerPolicy +import org.scalatest.funsuite.AnyFunSuite + +// scalastyle:off +import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._ +import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._ +import org.apache.kyuubi.plugin.spark.authz.gen.KRangerPolicyItemAccess.allowTypes +import org.apache.kyuubi.plugin.spark.authz.gen.KRangerPolicyResource._ +import org.apache.kyuubi.plugin.spark.authz.gen.RangerAccessType._ +import org.apache.kyuubi.plugin.spark.authz.gen.RangerClassConversions._ +import org.apache.kyuubi.util.AssertionUtils._ + +/** + * Generates the policy file to test/main/resources dir. + * + * To run the test suite: + * {{{ + * KYUUBI_UPDATE=0 dev/gen/gen_ranger_policy_json.sh + * }}} + * + * To regenerate the ranger policy file: + * {{{ + * dev/gen/gen_ranger_policy_json.sh + * }}} + */ +class PolicyJsonFileGenerator extends AnyFunSuite { + // scalastyle:on + final private val mapper: ObjectMapper = JsonMapper.builder() + .addModule(DefaultScalaModule) + .serializationInclusion(Include.NON_NULL) + .build() + + test("check ranger policy file") { + val pluginHome = getClass.getProtectionDomain.getCodeSource.getLocation.getPath + .split("target").head + val policyFileName = "sparkSql_hive_jenkins.json" + val policyFilePath = + Paths.get(pluginHome, "src", "test", "resources", policyFileName) + val generatedStr = mapper.writerWithDefaultPrettyPrinter() + .writeValueAsString(servicePolicies) + + if (sys.env.get("KYUUBI_UPDATE").contains("1")) { + // scalastyle:off println + println(s"Writing ranger policies to $policyFileName.") + // scalastyle:on println + Files.write( + policyFilePath, + generatedStr.getBytes(StandardCharsets.UTF_8), + StandardOpenOption.CREATE, + StandardOpenOption.TRUNCATE_EXISTING) + } else { + assertFileContent( + policyFilePath, + Seq(generatedStr), + "dev/gen/gen_ranger_policy_json.sh", + splitFirstExpectedLine = true) + } + } + + private def servicePolicies: JsonNode = { + val inputStream = Thread.currentThread().getContextClassLoader + .getResourceAsStream("policies_base.json") + val rootObjNode = mapper.readTree(inputStream).asInstanceOf[ObjectNode] + val policies = genPolicies + // scalastyle:off println + println(s"Generated ${policies.size} policies.") + // scalastyle:on println + rootObjNode.set("policies", mapper.readTree(mapper.writeValueAsString(policies))) + } + + private def genPolicies: Iterable[RangerPolicy] = { + List[RangerPolicy]( + // access for all + policyAccessForAllUrl, + policyAccessForAllDbTableColumns, + policyAccessForAllDbUdf, + // access + policyAccessForDbAllColumns, + policyAccessForDefaultDbSrcTable, + policyAccessForDefaultBobUse, + policyAccessForDefaultBobSelect, + policyAccessForPermViewAccessOnly, + // row filter + policyFilterForSrcTableKeyLessThan20, + policyFilterForPermViewKeyLessThan20, + // data masking + policyMaskForPermView, + policyMaskForPermViewUser, + policyMaskNullifyForValue2, + policyMaskShowFirst4ForValue3, + policyMaskDateShowYearForValue4, + policyMaskShowFirst4ForValue5) + // fill the id and guid with auto-increased index + .zipWithIndex + .map { + case (p, index) => + p.setId(index) + p.setGuid(UUID.nameUUIDFromBytes(index.toString.getBytes()).toString) + p + } + } + + // resources + private val allDatabaseRes = databaseRes("*") + private val allTableRes = tableRes("*") + private val allColumnRes = columnRes("*") + private val srcTableRes = tableRes("src") + + // policy type + private val POLICY_TYPE_ACCESS: Int = 0 + private val POLICY_TYPE_DATAMASK: Int = 1 + private val POLICY_TYPE_ROWFILTER: Int = 2 + + // policies + private val policyAccessForAllUrl = KRangerPolicy( + name = "all - url", + description = "Policy for all - url", + resources = Map("url" -> KRangerPolicyResource( + values = List("*"), + isRecursive = true)), + policyItems = List(KRangerPolicyItem( + users = List(admin), + accesses = allowTypes(select, update, create, drop, alter, index, lock, all, read, write), + delegateAdmin = true))) + + private val policyAccessForAllDbTableColumns = KRangerPolicy( + name = "all - database, table, column", + description = "Policy for all - database, table, column", + resources = Map(allDatabaseRes, allTableRes, allColumnRes), + policyItems = List(KRangerPolicyItem( + users = List(admin), + accesses = allowTypes(select, update, create, drop, alter, index, lock, all, read, write), + delegateAdmin = true))) + + private val policyAccessForAllDbUdf = KRangerPolicy( + name = "all - database, udf", + description = "Policy for all - database, udf", + resources = Map(allDatabaseRes, "udf" -> KRangerPolicyResource(values = List("*"))), + policyItems = List(KRangerPolicyItem( + users = List(admin), + accesses = allowTypes(select, update, create, drop, alter, index, lock, all, read, write), + delegateAdmin = true))) + + private val policyAccessForDbAllColumns = KRangerPolicy( + name = "all - database, udf", + description = "Policy for all - database, udf", + resources = Map( + databaseRes(defaultDb, sparkCatalog, icebergNamespace, namespace1), + allTableRes, + allColumnRes), + policyItems = List( + KRangerPolicyItem( + users = List(bob, permViewUser, ownerPlaceHolder), + accesses = allowTypes(select, update, create, drop, alter, index, lock, all, read, write), + delegateAdmin = true), + KRangerPolicyItem( + users = List(defaultTableOwner, createOnlyUser), + accesses = allowTypes(create), + delegateAdmin = true))) + + private val policyAccessForDefaultDbSrcTable = KRangerPolicy( + name = "default_kent", + resources = Map( + databaseRes(defaultDb, sparkCatalog), + srcTableRes, + columnRes("key")), + policyItems = List( + KRangerPolicyItem( + users = List(kent), + accesses = allowTypes(select, update, create, drop, alter, index, lock, all, read, write), + delegateAdmin = true), + KRangerPolicyItem( + users = List(defaultTableOwner, createOnlyUser), + accesses = allowTypes(create), + delegateAdmin = true))) + + private val policyFilterForSrcTableKeyLessThan20 = KRangerPolicy( + name = "src_key_less_than_20", + policyType = POLICY_TYPE_ROWFILTER, + resources = Map( + databaseRes(defaultDb), + srcTableRes), + rowFilterPolicyItems = List( + KRangerRowFilterPolicyItem( + rowFilterInfo = KRangerPolicyItemRowFilterInfo(filterExpr = "key<20"), + accesses = allowTypes(select), + users = List(bob, permViewUser)))) + + private val policyFilterForPermViewKeyLessThan20 = KRangerPolicy( + name = "perm_view_key_less_than_20", + policyType = POLICY_TYPE_ROWFILTER, + resources = Map( + databaseRes(defaultDb), + tableRes("perm_view")), + rowFilterPolicyItems = List( + KRangerRowFilterPolicyItem( + rowFilterInfo = KRangerPolicyItemRowFilterInfo(filterExpr = "key<20"), + accesses = allowTypes(select), + users = List(permViewUser)))) + + private val policyAccessForDefaultBobUse = KRangerPolicy( + name = "default_bob_use", + resources = Map( + databaseRes("default_bob", sparkCatalog), + tableRes("table_use*"), + allColumnRes), + policyItems = List( + KRangerPolicyItem( + users = List(bob), + accesses = allowTypes(update), + delegateAdmin = true))) + + private val policyAccessForDefaultBobSelect = KRangerPolicy( + name = "default_bob_select", + resources = Map( + databaseRes("default_bob", sparkCatalog), + tableRes("table_select*"), + allColumnRes), + policyItems = List( + KRangerPolicyItem( + users = List(bob), + accesses = allowTypes(select, use), + delegateAdmin = true))) + + private val policyMaskForPermView = KRangerPolicy( + name = "src_value_hash_perm_view", + policyType = POLICY_TYPE_DATAMASK, + resources = Map( + databaseRes(defaultDb, sparkCatalog), + srcTableRes, + columnRes("value1")), + dataMaskPolicyItems = List( + KRangerDataMaskPolicyItem( + dataMaskInfo = KRangerPolicyItemDataMaskInfo(dataMaskType = "MASK_HASH"), + users = List(bob), + accesses = allowTypes(select), + delegateAdmin = true))) + + private val policyMaskForPermViewUser = KRangerPolicy( + name = "src_value_hash", + policyType = POLICY_TYPE_DATAMASK, + resources = Map( + databaseRes(defaultDb, sparkCatalog), + tableRes("perm_view"), + columnRes("value1")), + dataMaskPolicyItems = List( + KRangerDataMaskPolicyItem( + dataMaskInfo = KRangerPolicyItemDataMaskInfo(dataMaskType = "MASK_HASH"), + users = List(permViewUser), + accesses = allowTypes(select), + delegateAdmin = true))) + + private val policyMaskNullifyForValue2 = KRangerPolicy( + name = "src_value2_nullify", + policyType = POLICY_TYPE_DATAMASK, + resources = Map( + databaseRes(defaultDb, sparkCatalog, icebergNamespace, namespace1), + srcTableRes, + columnRes("value2")), + dataMaskPolicyItems = List( + KRangerDataMaskPolicyItem( + dataMaskInfo = KRangerPolicyItemDataMaskInfo(dataMaskType = "MASK"), + users = List(bob), + accesses = allowTypes(select), + delegateAdmin = true))) + + private val policyMaskShowFirst4ForValue3 = KRangerPolicy( + name = "src_value3_sf4", + policyType = POLICY_TYPE_DATAMASK, + resources = Map( + databaseRes(defaultDb, sparkCatalog), + srcTableRes, + columnRes("value3")), + dataMaskPolicyItems = List( + KRangerDataMaskPolicyItem( + dataMaskInfo = KRangerPolicyItemDataMaskInfo(dataMaskType = "MASK_SHOW_FIRST_4"), + users = List(bob), + accesses = allowTypes(select), + delegateAdmin = true))) + + private val policyMaskDateShowYearForValue4 = KRangerPolicy( + name = "src_value4_sf4", + policyType = POLICY_TYPE_DATAMASK, + resources = Map( + databaseRes(defaultDb, sparkCatalog), + srcTableRes, + columnRes("value4")), + dataMaskPolicyItems = List( + KRangerDataMaskPolicyItem( + dataMaskInfo = KRangerPolicyItemDataMaskInfo(dataMaskType = "MASK_DATE_SHOW_YEAR"), + users = List(bob), + accesses = allowTypes(select), + delegateAdmin = true))) + + private val policyMaskShowFirst4ForValue5 = KRangerPolicy( + name = "src_value5_sf4", + policyType = POLICY_TYPE_DATAMASK, + resources = Map( + databaseRes(defaultDb, sparkCatalog), + srcTableRes, + columnRes("value5")), + dataMaskPolicyItems = List( + KRangerDataMaskPolicyItem( + dataMaskInfo = KRangerPolicyItemDataMaskInfo(dataMaskType = "MASK_SHOW_LAST_4"), + users = List(bob), + accesses = allowTypes(select), + delegateAdmin = true))) + + private val policyAccessForPermViewAccessOnly = KRangerPolicy( + name = "someone_access_perm_view", + resources = Map( + databaseRes(defaultDb), + tableRes("perm_view"), + allColumnRes), + policyItems = List( + KRangerPolicyItem( + users = List(permViewOnlyUser), + accesses = allowTypes(select), + delegateAdmin = true))) +} diff --git a/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/RangerGenWrapper.scala b/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/RangerGenWrapper.scala new file mode 100644 index 00000000000..71bce375972 --- /dev/null +++ b/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/RangerGenWrapper.scala @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kyuubi.plugin.spark.authz.gen + +import scala.collection.convert.ImplicitConversions._ +import scala.language.implicitConversions + +import org.apache.ranger.plugin.model.RangerPolicy +import org.apache.ranger.plugin.model.RangerPolicy._ + +import org.apache.kyuubi.plugin.spark.authz.gen.RangerClassConversions._ + +trait RangerObjectGenerator[T] { + def get: T +} + +object RangerClassConversions { + implicit def getRangerObject[T](g: RangerObjectGenerator[T]): T = g.get +} + +case class KRangerPolicy( + service: String = "hive_jenkins", + name: String, + policyType: Int = 0, + description: String = "", + isAuditEnabled: Boolean = true, + resources: Map[String, RangerPolicyResource] = Map.empty, + conditions: List[RangerPolicyItemCondition] = List.empty, + policyItems: List[RangerPolicyItem] = List.empty, + denyPolicyItems: List[RangerPolicyItem] = List.empty, + allowExceptions: List[RangerPolicyItem] = List.empty, + denyExceptions: List[RangerPolicyItem] = List.empty, + dataMaskPolicyItems: List[RangerDataMaskPolicyItem] = List.empty, + rowFilterPolicyItems: List[RangerRowFilterPolicyItem] = List.empty, + id: Int = 0, + guid: String = "", + isEnabled: Boolean = true, + version: Int = 1) extends RangerObjectGenerator[RangerPolicy] { + override def get: RangerPolicy = { + val p = new RangerPolicy() + p.setService(service) + p.setName(name) + p.setPolicyType(policyType) + p.setDescription(description) + p.setIsAuditEnabled(isAuditEnabled) + p.setResources(resources) + p.setConditions(conditions) + p.setPolicyItems(policyItems) + p.setAllowExceptions(allowExceptions) + p.setDenyExceptions(denyExceptions) + p.setDataMaskPolicyItems(dataMaskPolicyItems) + p.setRowFilterPolicyItems(rowFilterPolicyItems) + p.setId(id) + p.setGuid(guid) + p.setIsAuditEnabled(isEnabled) + p.setVersion(version) + p + } +} + +case class KRangerPolicyResource( + values: List[String] = List.empty, + isExcludes: Boolean = false, + isRecursive: Boolean = false) extends RangerObjectGenerator[RangerPolicyResource] { + override def get: RangerPolicyResource = { + val r = new RangerPolicyResource() + r.setValues(values) + r.setIsExcludes(isExcludes) + r.setIsRecursive(isRecursive) + r + } +} + +object KRangerPolicyResource { + def databaseRes(values: String*): (String, RangerPolicyResource) = + "database" -> KRangerPolicyResource(values.toList) + + def tableRes(values: String*): (String, RangerPolicyResource) = + "table" -> KRangerPolicyResource(values.toList) + + def columnRes(values: String*): (String, RangerPolicyResource) = + "column" -> KRangerPolicyResource(values.toList) +} + +case class KRangerPolicyItemCondition( + `type`: String, + values: List[String]) extends RangerObjectGenerator[RangerPolicyItemCondition] { + override def get: RangerPolicyItemCondition = { + val c = new RangerPolicyItemCondition() + c.setType(`type`) + c.setValues(values) + c + } +} + +case class KRangerPolicyItem( + accesses: List[RangerPolicyItemAccess] = List.empty, + users: List[String] = List.empty, + groups: List[String] = List.empty, + conditions: List[RangerPolicyItemCondition] = List.empty, + delegateAdmin: Boolean = false) extends RangerObjectGenerator[RangerPolicyItem] { + override def get: RangerPolicyItem = { + val i = new RangerPolicyItem() + i.setAccesses(accesses) + i.setUsers(users) + i.setGroups(groups) + i.setConditions(conditions) + i.setDelegateAdmin(delegateAdmin) + i + } +} + +case class KRangerPolicyItemAccess( + `type`: String, + isAllowed: Boolean) extends RangerObjectGenerator[RangerPolicyItemAccess] { + override def get: RangerPolicyItemAccess = { + val a = new RangerPolicyItemAccess + a.setType(`type`) + a.setIsAllowed(isAllowed) + a + } +} + +object KRangerPolicyItemAccess { + def allowTypes(types: String*): List[RangerPolicyItemAccess] = + types.map(t => KRangerPolicyItemAccess(t, isAllowed = true).get).toList +} + +case class KRangerDataMaskPolicyItem( + dataMaskInfo: RangerPolicyItemDataMaskInfo, + accesses: List[RangerPolicyItemAccess] = List.empty, + users: List[String] = List.empty, + groups: List[String] = List.empty, + conditions: List[RangerPolicyItemCondition] = List.empty, + delegateAdmin: Boolean = false) extends RangerObjectGenerator[RangerDataMaskPolicyItem] { + override def get: RangerDataMaskPolicyItem = { + val i = new RangerDataMaskPolicyItem + i.setDataMaskInfo(dataMaskInfo) + i.setAccesses(accesses) + i.setUsers(users) + i.setGroups(groups) + i.setConditions(conditions) + i.setDelegateAdmin(delegateAdmin) + i + } +} + +case class KRangerPolicyItemDataMaskInfo( + dataMaskType: String) extends RangerObjectGenerator[RangerPolicyItemDataMaskInfo] { + override def get: RangerPolicyItemDataMaskInfo = { + val i = new RangerPolicyItemDataMaskInfo + i.setDataMaskType(dataMaskType) + i + } +} + +case class KRangerRowFilterPolicyItem( + rowFilterInfo: RangerPolicyItemRowFilterInfo, + accesses: List[RangerPolicyItemAccess] = List.empty, + users: List[String] = List.empty, + groups: List[String] = List.empty, + conditions: List[RangerPolicyItemCondition] = List.empty, + delegateAdmin: Boolean = false) extends RangerObjectGenerator[RangerRowFilterPolicyItem] { + override def get: RangerRowFilterPolicyItem = { + val i = new RangerRowFilterPolicyItem + i.setRowFilterInfo(rowFilterInfo) + i.setAccesses(accesses) + i.setUsers(users) + i.setGroups(groups) + i.setConditions(conditions) + i.setDelegateAdmin(delegateAdmin) + i + } +} + +case class KRangerPolicyItemRowFilterInfo( + filterExpr: String) extends RangerObjectGenerator[RangerPolicyItemRowFilterInfo] { + override def get: RangerPolicyItemRowFilterInfo = { + val i = new RangerPolicyItemRowFilterInfo + i.setFilterExpr(filterExpr) + i + } +} + +object RangerAccessType { + val select = "select" + val update = "update" + val create = "create" + val drop = "drop" + val alter = "alter" + val index = "index" + val lock = "lock" + val all = "all" + val read = "read" + val write = "write" + val use = "use" +} diff --git a/extensions/spark/kyuubi-spark-authz/src/test/resources/policies_base.json b/extensions/spark/kyuubi-spark-authz/src/test/resources/policies_base.json new file mode 100644 index 00000000000..aea5d2a9c28 --- /dev/null +++ b/extensions/spark/kyuubi-spark-authz/src/test/resources/policies_base.json @@ -0,0 +1,1678 @@ +{ + "serviceName": "hive_jenkins", + "serviceId": 1, + "policyVersion": 85, + "policyUpdateTime": "20190429-21:36:09.000-+0800", + "policies": [ + { + "service": "hive_jenkins", + "name": "all - url", + "policyType": 0, + "policyPriority": 0, + "description": "Policy for all - url", + "isAuditEnabled": true, + "resources": { + "url": { + "values": [ + "*" + ], + "isExcludes": false, + "isRecursive": true + } + }, + "policyItems": [ + { + "accesses": [ + { + "type": "select", + "isAllowed": true + }, + { + "type": "update", + "isAllowed": true + }, + { + "type": "create", + "isAllowed": true + }, + { + "type": "drop", + "isAllowed": true + }, + { + "type": "alter", + "isAllowed": true + }, + { + "type": "index", + "isAllowed": true + }, + { + "type": "lock", + "isAllowed": true + }, + { + "type": "all", + "isAllowed": true + }, + { + "type": "read", + "isAllowed": true + }, + { + "type": "write", + "isAllowed": true + } + ], + "users": [ + "admin" + ], + "groups": [], + "conditions": [], + "delegateAdmin": true + } + ], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [], + "id": 1, + "guid": "cf7e6725-492f-434f-bffe-6bb4e3147246", + "isEnabled": true, + "version": 1 + }, + { + "service": "hive_jenkins", + "name": "all - database, table, column", + "policyType": 0, + "policyPriority": 0, + "description": "Policy for all - database, table, column", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "*" + ], + "isExcludes": false, + "isRecursive": false + }, + "column": { + "values": [ + "*" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "*" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [ + { + "accesses": [ + { + "type": "select", + "isAllowed": true + }, + { + "type": "update", + "isAllowed": true + }, + { + "type": "create", + "isAllowed": true + }, + { + "type": "drop", + "isAllowed": true + }, + { + "type": "alter", + "isAllowed": true + }, + { + "type": "index", + "isAllowed": true + }, + { + "type": "lock", + "isAllowed": true + }, + { + "type": "all", + "isAllowed": true + }, + { + "type": "read", + "isAllowed": true + }, + { + "type": "write", + "isAllowed": true + } + ], + "users": [ + "admin" + ], + "groups": [], + "conditions": [], + "delegateAdmin": true + } + ], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [], + "id": 2, + "guid": "3b96138a-af4d-48bc-9544-58c5bfa1979b", + "isEnabled": true, + "version": 1 + }, + { + "service": "hive_jenkins", + "name": "all - database, udf", + "policyType": 0, + "policyPriority": 0, + "description": "Policy for all - database, udf", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "*" + ], + "isExcludes": false, + "isRecursive": false + }, + "udf": { + "values": [ + "*" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [ + { + "accesses": [ + { + "type": "select", + "isAllowed": true + }, + { + "type": "update", + "isAllowed": true + }, + { + "type": "create", + "isAllowed": true + }, + { + "type": "drop", + "isAllowed": true + }, + { + "type": "alter", + "isAllowed": true + }, + { + "type": "index", + "isAllowed": true + }, + { + "type": "lock", + "isAllowed": true + }, + { + "type": "all", + "isAllowed": true + }, + { + "type": "read", + "isAllowed": true + }, + { + "type": "write", + "isAllowed": true + } + ], + "users": [ + "admin" + ], + "groups": [], + "conditions": [], + "delegateAdmin": true + } + ], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [], + "id": 3, + "guid": "db08fbb0-61da-4f33-8144-ccd89816151d", + "isEnabled": true, + "version": 1 + }, + { + "service": "hive_jenkins", + "name": "default", + "policyType": 0, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default", + "spark_catalog", + "iceberg_ns", + "ns1" + ], + "isExcludes": false, + "isRecursive": false + }, + "column": { + "values": [ + "*" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "*" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [ + { + "accesses": [ + { + "type": "select", + "isAllowed": true + }, + { + "type": "update", + "isAllowed": true + }, + { + "type": "create", + "isAllowed": true + }, + { + "type": "drop", + "isAllowed": true + }, + { + "type": "alter", + "isAllowed": true + }, + { + "type": "index", + "isAllowed": true + }, + { + "type": "lock", + "isAllowed": true + }, + { + "type": "all", + "isAllowed": true + }, + { + "type": "read", + "isAllowed": true + }, + { + "type": "write", + "isAllowed": true + } + ], + "users": [ + "bob", + "perm_view_user", + "{OWNER}" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + }, + { + "accesses": [ + { + "type": "select", + "isAllowed": false + }, + { + "type": "update", + "isAllowed": false + }, + { + "type": "create", + "isAllowed": true + }, + { + "type": "drop", + "isAllowed": false + }, + { + "type": "alter", + "isAllowed": false + }, + { + "type": "index", + "isAllowed": false + }, + { + "type": "lock", + "isAllowed": false + }, + { + "type": "all", + "isAllowed": false + }, + { + "type": "read", + "isAllowed": false + }, + { + "type": "write", + "isAllowed": false + } + ], + "users": [ + "default_table_owner", + "create_only_user" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 5, + "guid": "2db6099d-e4f1-41df-9d24-f2f47bed618e", + "isEnabled": true, + "version": 5 + }, + { + "service": "hive_jenkins", + "name": "default_kent", + "policyType": 0, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default", + "spark_catalog" + ], + "isExcludes": false, + "isRecursive": false + }, + "column": { + "values": [ + "key" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "src" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [ + { + "accesses": [ + { + "type": "select", + "isAllowed": true + }, + { + "type": "update", + "isAllowed": true + }, + { + "type": "create", + "isAllowed": true + }, + { + "type": "drop", + "isAllowed": true + }, + { + "type": "alter", + "isAllowed": true + }, + { + "type": "index", + "isAllowed": true + }, + { + "type": "lock", + "isAllowed": true + }, + { + "type": "all", + "isAllowed": true + }, + { + "type": "read", + "isAllowed": true + }, + { + "type": "write", + "isAllowed": true + } + ], + "users": [ + "kent" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 5, + "guid": "fd24db19-f7cc-4e13-a8ba-bbd5a07a2d8d", + "isEnabled": true, + "version": 5 + }, + { + "service": "hive_jenkins", + "name": "src_key _less_than_20", + "policyType": 2, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "src" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [], + "rowFilterPolicyItems": [ + { + "rowFilterInfo": { + "filterExpr": "key\u003c20" + }, + "accesses": [ + { + "type": "select", + "isAllowed": true + } + ], + "users": [ + "bob" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "serviceType": "hive", + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 4, + "guid": "f588a9ed-f7b1-48f7-9d0d-c12cf2b9b7ed", + "isEnabled": true, + "version": 26 + }, + { + "service": "hive_jenkins", + "name": "src_key_less_than_20_perm_view", + "policyType": 2, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "perm_view" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [], + "rowFilterPolicyItems": [ + { + "rowFilterInfo": { + "filterExpr": "key\u003c20" + }, + "accesses": [ + { + "type": "select", + "isAllowed": true + } + ], + "users": [ + "perm_view_user" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "serviceType": "hive", + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 22, + "guid": "c240a7ea-9d26-4db2-b925-d5dbe49bd447 \n", + "isEnabled": true, + "version": 26 + }, + { + "service": "hive_jenkins", + "name": "default_bob_use", + "policyType": 0, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default_bob", + "spark_catalog" + ], + "isExcludes": false, + "isRecursive": false + }, + "column": { + "values": [ + "*" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "table_use*" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [ + { + "accesses": [ + { + "type": "update", + "isAllowed": true + } + ], + "users": [ + "bob" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 5, + "guid": "2eb6099d-e4f1-41df-9d24-f2f47bed618e", + "isEnabled": true, + "version": 5 + }, + { + "service": "hive_jenkins", + "name": "default_bob_select", + "policyType": 0, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default_bob", + "spark_catalog" + ], + "isExcludes": false, + "isRecursive": false + }, + "column": { + "values": [ + "*" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "table_select*" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [ + { + "accesses": [ + { + "type": "select", + "isAllowed": true + }, + { + "type": "use", + "isAllowed": true + } + ], + "users": [ + "bob" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 5, + "guid": "2fb6099d-e4f1-41df-9d24-f2f47bed618e", + "isEnabled": true, + "version": 5 + }, + { + "service": "hive_jenkins", + "name": "src_value_hash_perm_view", + "policyType": 1, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default", + "spark_catalog" + ], + "isExcludes": false, + "isRecursive": false + }, + "column": { + "values": [ + "value1" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "src" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [ + { + "dataMaskInfo": { + "dataMaskType": "MASK_HASH" + }, + "accesses": [ + { + "type": "select", + "isAllowed": true + } + ], + "users": [ + "bob" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 5, + "guid": "ed1868a1-bf79-4721-a3d5-6815cc7d4986", + "isEnabled": true, + "version": 1 + }, + { + "service": "hive_jenkins", + "name": "src_value_hash", + "policyType": 1, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default", + "spark_catalog" + ], + "isExcludes": false, + "isRecursive": false + }, + "column": { + "values": [ + "value1" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "perm_view" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [ + { + "dataMaskInfo": { + "dataMaskType": "MASK_HASH" + }, + "accesses": [ + { + "type": "select", + "isAllowed": true + } + ], + "users": [ + "perm_view_user" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 20, + "guid": "bfeddeab-50d0-4902-985f-42559efa39c3", + "isEnabled": true, + "version": 1 + }, + { + "service": "hive_jenkins", + "name": "src_value2_nullify", + "policyType": 1, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default", + "spark_catalog", + "iceberg_ns", + "ns1" + ], + "isExcludes": false, + "isRecursive": false + }, + "column": { + "values": [ + "value2" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "src" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [ + { + "dataMaskInfo": { + "dataMaskType": "MASK" + }, + "accesses": [ + { + "type": "select", + "isAllowed": true + } + ], + "users": [ + "bob" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 6, + "guid": "98a04cd7-8d14-4466-adc9-126d87a3af69", + "isEnabled": true, + "version": 1 + }, + { + "service": "hive_jenkins", + "name": "src_value3_sf4", + "policyType": 1, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default", + "spark_catalog" + ], + "isExcludes": false, + "isRecursive": false + }, + "column": { + "values": [ + "value3" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "src" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [ + { + "dataMaskInfo": { + "dataMaskType": "MASK_SHOW_FIRST_4" + }, + "accesses": [ + { + "type": "select", + "isAllowed": true + } + ], + "users": [ + "bob" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 7, + "guid": "9d50a525-b24c-4cf5-a885-d10d426368d1", + "isEnabled": true, + "version": 1 + }, + { + "service": "hive_jenkins", + "name": "src_value4_sf4", + "policyType": 1, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default", + "spark_catalog" + ], + "isExcludes": false, + "isRecursive": false + }, + "column": { + "values": [ + "value4" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "src" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [ + { + "dataMaskInfo": { + "dataMaskType": "MASK_DATE_SHOW_YEAR" + }, + "accesses": [ + { + "type": "select", + "isAllowed": true + } + ], + "users": [ + "bob" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 8, + "guid": "9d50a526-b24c-4cf5-a885-d10d426368d1", + "isEnabled": true, + "version": 1 + }, + { + "service": "hive_jenkins", + "name": "src_value5_show_last_4", + "policyType": 1, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default", + "spark_catalog" + ], + "isExcludes": false, + "isRecursive": false + }, + "column": { + "values": [ + "value5" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "src" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [ + { + "dataMaskInfo": { + "dataMaskType": "MASK_SHOW_LAST_4" + }, + "accesses": [ + { + "type": "select", + "isAllowed": true + } + ], + "users": [ + "bob" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 32, + "guid": "b3f1f1e0-2bd6-4b20-8a32-a531006ae151", + "isEnabled": true, + "version": 1 + }, + { + "service": "hive_jenkins", + "name": "someone_access_perm_view", + "policyType": 0, + "policyPriority": 0, + "description": "", + "isAuditEnabled": true, + "resources": { + "database": { + "values": [ + "default" + ], + "isExcludes": false, + "isRecursive": false + }, + "column": { + "values": [ + "*" + ], + "isExcludes": false, + "isRecursive": false + }, + "table": { + "values": [ + "perm_view" + ], + "isExcludes": false, + "isRecursive": false + } + }, + "policyItems": [ + { + "accesses": [ + { + "type": "select", + "isAllowed": true + } + ], + "users": [ + "user_perm_view_only" + ], + "groups": [], + "conditions": [], + "delegateAdmin": false + } + ], + "denyPolicyItems": [], + "allowExceptions": [], + "denyExceptions": [], + "dataMaskPolicyItems": [], + "rowFilterPolicyItems": [], + "options": {}, + "validitySchedules": [], + "policyLabels": [ + "" + ], + "id": 123, + "guid": "2fb6099d-e421-41df-9d24-f2f47bed618e", + "isEnabled": true, + "version": 5 + } + ], + "serviceDef": { + "name": "hive", + "implClass": "org.apache.ranger.services.hive.RangerServiceHive", + "label": "Hive Server2", + "description": "Hive Server2", + "options": { + "enableDenyAndExceptionsInPolicies": "true" + }, + "configs": [ + { + "itemId": 1, + "name": "username", + "type": "string", + "mandatory": true, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "Username" + }, + { + "itemId": 2, + "name": "password", + "type": "password", + "mandatory": true, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "Password" + }, + { + "itemId": 3, + "name": "jdbc.driverClassName", + "type": "string", + "mandatory": true, + "defaultValue": "org.apache.hive.jdbc.HiveDriver", + "validationRegEx": "", + "validationMessage": "", + "uiHint": "" + }, + { + "itemId": 4, + "name": "jdbc.url", + "type": "string", + "mandatory": true, + "defaultValue": "", + "validationRegEx": "", + "validationMessage": "", + "uiHint": "{\"TextFieldWithIcon\":true, \"info\": \"1.For Remote Mode, eg.\u003cbr\u003ejdbc:hive2://\u0026lt;host\u0026gt;:\u0026lt;port\u0026gt;\u003cbr\u003e2.For Embedded Mode (no host or port), eg.\u003cbr\u003ejdbc:hive2:///;initFile\u003d\u0026lt;file\u0026gt;\u003cbr\u003e3.For HTTP Mode, eg.\u003cbr\u003ejdbc:hive2://\u0026lt;host\u0026gt;:\u0026lt;port\u0026gt;/;\u003cbr\u003etransportMode\u003dhttp;httpPath\u003d\u0026lt;httpPath\u0026gt;\u003cbr\u003e4.For SSL Mode, eg.\u003cbr\u003ejdbc:hive2://\u0026lt;host\u0026gt;:\u0026lt;port\u0026gt;/;ssl\u003dtrue;\u003cbr\u003esslTrustStore\u003dtStore;trustStorePassword\u003dpw\u003cbr\u003e5.For ZooKeeper Mode, eg.\u003cbr\u003ejdbc:hive2://\u0026lt;host\u0026gt;/;serviceDiscoveryMode\u003d\u003cbr\u003ezooKeeper;zooKeeperNamespace\u003dhiveserver2\u003cbr\u003e6.For Kerberos Mode, eg.\u003cbr\u003ejdbc:hive2://\u0026lt;host\u0026gt;:\u0026lt;port\u0026gt;/;\u003cbr\u003eprincipal\u003dhive/domain@EXAMPLE.COM\u003cbr\u003e\"}" + }, + { + "itemId": 5, + "name": "commonNameForCertificate", + "type": "string", + "mandatory": false, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "Common Name for Certificate" + } + ], + "resources": [ + { + "itemId": 1, + "name": "database", + "type": "string", + "level": 10, + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": "true", + "ignoreCase": "true" + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "Hive Database", + "description": "Hive Database", + "accessTypeRestrictions": [], + "isValidLeaf": false + }, + { + "itemId": 5, + "name": "url", + "type": "string", + "level": 10, + "mandatory": true, + "lookupSupported": false, + "recursiveSupported": true, + "excludesSupported": false, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerPathResourceMatcher", + "matcherOptions": { + "wildCard": "true", + "ignoreCase": "false" + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "URL", + "description": "URL", + "accessTypeRestrictions": [], + "isValidLeaf": true + }, + { + "itemId": 2, + "name": "table", + "type": "string", + "level": 20, + "parent": "database", + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": "true", + "ignoreCase": "true" + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "Hive Table", + "description": "Hive Table", + "accessTypeRestrictions": [], + "isValidLeaf": false + }, + { + "itemId": 3, + "name": "udf", + "type": "string", + "level": 20, + "parent": "database", + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": "true", + "ignoreCase": "true" + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "Hive UDF", + "description": "Hive UDF", + "accessTypeRestrictions": [], + "isValidLeaf": true + }, + { + "itemId": 4, + "name": "column", + "type": "string", + "level": 30, + "parent": "table", + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": "true", + "ignoreCase": "true" + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "Hive Column", + "description": "Hive Column", + "accessTypeRestrictions": [], + "isValidLeaf": true + } + ], + "accessTypes": [ + { + "itemId": 1, + "name": "select", + "label": "select", + "impliedGrants": [] + }, + { + "itemId": 2, + "name": "update", + "label": "update", + "impliedGrants": [] + }, + { + "itemId": 3, + "name": "create", + "label": "Create", + "impliedGrants": [] + }, + { + "itemId": 4, + "name": "drop", + "label": "Drop", + "impliedGrants": [] + }, + { + "itemId": 5, + "name": "alter", + "label": "Alter", + "impliedGrants": [] + }, + { + "itemId": 6, + "name": "index", + "label": "Index", + "impliedGrants": [] + }, + { + "itemId": 7, + "name": "lock", + "label": "Lock", + "impliedGrants": [] + }, + { + "itemId": 8, + "name": "all", + "label": "All", + "impliedGrants": [ + "select", + "update", + "create", + "drop", + "alter", + "index", + "lock", + "read", + "write" + ] + }, + { + "itemId": 9, + "name": "read", + "label": "Read", + "impliedGrants": [] + }, + { + "itemId": 10, + "name": "write", + "label": "Write", + "impliedGrants": [] + } + ], + "policyConditions": [], + "contextEnrichers": [], + "enums": [], + "dataMaskDef": { + "maskTypes": [ + { + "itemId": 1, + "name": "MASK", + "label": "Redact", + "description": "Replace lowercase with \u0027x\u0027, uppercase with \u0027X\u0027, digits with \u00270\u0027", + "transformer": "mask({col})", + "dataMaskOptions": {} + }, + { + "itemId": 2, + "name": "MASK_SHOW_LAST_4", + "label": "Partial mask: show last 4", + "description": "Show last 4 characters; replace rest with \u0027x\u0027", + "transformer": "mask_show_last_n({col}, 4, \u0027x\u0027, \u0027x\u0027, \u0027x\u0027, -1, \u00271\u0027)", + "dataMaskOptions": {} + }, + { + "itemId": 3, + "name": "MASK_SHOW_FIRST_4", + "label": "Partial mask: show first 4", + "description": "Show first 4 characters; replace rest with \u0027x\u0027", + "transformer": "mask_show_first_n({col}, 4, \u0027x\u0027, \u0027x\u0027, \u0027x\u0027, -1, \u00271\u0027)", + "dataMaskOptions": {} + }, + { + "itemId": 4, + "name": "MASK_HASH", + "label": "Hash", + "description": "Hash the value", + "transformer": "mask_hash({col})", + "dataMaskOptions": {} + }, + { + "itemId": 5, + "name": "MASK_NULL", + "label": "Nullify", + "description": "Replace with NULL", + "dataMaskOptions": {} + }, + { + "itemId": 6, + "name": "MASK_NONE", + "label": "Unmasked (retain original value)", + "description": "No masking", + "dataMaskOptions": {} + }, + { + "itemId": 12, + "name": "MASK_DATE_SHOW_YEAR", + "label": "Date: show only year", + "description": "Date: show only year", + "transformer": "mask({col}, \u0027x\u0027, \u0027x\u0027, \u0027x\u0027, -1, \u00271\u0027, 1, 0, -1)", + "dataMaskOptions": {} + }, + { + "itemId": 13, + "name": "CUSTOM", + "label": "Custom", + "description": "Custom", + "dataMaskOptions": {} + } + ], + "accessTypes": [ + { + "itemId": 1, + "name": "select", + "label": "select", + "impliedGrants": [] + } + ], + "resources": [ + { + "itemId": 1, + "name": "database", + "type": "string", + "level": 10, + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": false, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": "false", + "ignoreCase": "true" + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "{ \"singleValue\":true }", + "label": "Hive Database", + "description": "Hive Database", + "accessTypeRestrictions": [], + "isValidLeaf": false + }, + { + "itemId": 2, + "name": "table", + "type": "string", + "level": 20, + "parent": "database", + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": false, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": "false", + "ignoreCase": "true" + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "{ \"singleValue\":true }", + "label": "Hive Table", + "description": "Hive Table", + "accessTypeRestrictions": [], + "isValidLeaf": false + }, + { + "itemId": 4, + "name": "column", + "type": "string", + "level": 30, + "parent": "table", + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": false, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": "false", + "ignoreCase": "true" + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "{ \"singleValue\":true }", + "label": "Hive Column", + "description": "Hive Column", + "accessTypeRestrictions": [], + "isValidLeaf": true + } + ] + }, + "rowFilterDef": { + "accessTypes": [ + { + "itemId": 1, + "name": "select", + "label": "select", + "impliedGrants": [] + } + ], + "resources": [ + { + "itemId": 1, + "name": "database", + "type": "string", + "level": 10, + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": false, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": "false", + "ignoreCase": "true" + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "{ \"singleValue\":true }", + "label": "Hive Database", + "description": "Hive Database", + "accessTypeRestrictions": [], + "isValidLeaf": false + }, + { + "itemId": 2, + "name": "table", + "type": "string", + "level": 20, + "parent": "database", + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": false, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": "false", + "ignoreCase": "true" + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "{ \"singleValue\":true }", + "label": "Hive Table", + "description": "Hive Table", + "accessTypeRestrictions": [], + "isValidLeaf": true + } + ] + }, + "id": 3, + "guid": "3e1afb5a-184a-4e82-9d9c-87a5cacc243c", + "isEnabled": true, + "createTime": "20190401-20:14:36.000-+0800", + "updateTime": "20190401-20:14:36.000-+0800", + "version": 1 + }, + "auditMode": "audit-default" +} diff --git a/extensions/spark/kyuubi-spark-authz/src/test/resources/sparkSql_hive_jenkins.json b/extensions/spark/kyuubi-spark-authz/src/test/resources/sparkSql_hive_jenkins.json index 84b0e30eb56..6c160d3216a 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/resources/sparkSql_hive_jenkins.json +++ b/extensions/spark/kyuubi-spark-authz/src/test/resources/sparkSql_hive_jenkins.json @@ -1,1614 +1,1353 @@ { - "serviceName": "hive_jenkins", - "serviceId": 1, - "policyVersion": 85, - "policyUpdateTime": "20190429-21:36:09.000-+0800", - "policies": [ - { - "service": "hive_jenkins", - "name": "all - url", - "policyType": 0, - "policyPriority": 0, - "description": "Policy for all - url", - "isAuditEnabled": true, - "resources": { - "url": { - "values": [ - "*" - ], - "isExcludes": false, - "isRecursive": true - } - }, - "policyItems": [ - { - "accesses": [ - { - "type": "select", - "isAllowed": true - }, - { - "type": "update", - "isAllowed": true - }, - { - "type": "create", - "isAllowed": true - }, - { - "type": "drop", - "isAllowed": true - }, - { - "type": "alter", - "isAllowed": true - }, - { - "type": "index", - "isAllowed": true - }, - { - "type": "lock", - "isAllowed": true - }, - { - "type": "all", - "isAllowed": true - }, - { - "type": "read", - "isAllowed": true - }, - { - "type": "write", - "isAllowed": true - } - ], - "users": [ - "admin" - ], - "groups": [], - "conditions": [], - "delegateAdmin": true - } - ], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [], - "id": 1, - "guid": "cf7e6725-492f-434f-bffe-6bb4e3147246", - "isEnabled": true, - "version": 1 + "serviceName" : "hive_jenkins", + "serviceId" : 1, + "policyVersion" : 85, + "policyUpdateTime" : "20190429-21:36:09.000-+0800", + "policies" : [ { + "id" : 0, + "guid" : "cfcd2084-95d5-35ef-a6e7-dff9f98764da", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "all - url", + "policyType" : 0, + "policyPriority" : 0, + "description" : "Policy for all - url", + "isAuditEnabled" : true, + "resources" : { + "url" : { + "values" : [ "*" ], + "isExcludes" : false, + "isRecursive" : true + } }, - { - "service": "hive_jenkins", - "name": "all - database, table, column", - "policyType": 0, - "policyPriority": 0, - "description": "Policy for all - database, table, column", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "*" - ], - "isExcludes": false, - "isRecursive": false - }, - "column": { - "values": [ - "*" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "*" - ], - "isExcludes": false, - "isRecursive": false - } + "conditions" : [ ], + "policyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + }, { + "type" : "update", + "isAllowed" : true + }, { + "type" : "create", + "isAllowed" : true + }, { + "type" : "drop", + "isAllowed" : true + }, { + "type" : "alter", + "isAllowed" : true + }, { + "type" : "index", + "isAllowed" : true + }, { + "type" : "lock", + "isAllowed" : true + }, { + "type" : "all", + "isAllowed" : true + }, { + "type" : "read", + "isAllowed" : true + }, { + "type" : "write", + "isAllowed" : true + } ], + "users" : [ "admin" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true + } ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 1, + "guid" : "c4ca4238-a0b9-3382-8dcc-509a6f75849b", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "all - database, table, column", + "policyType" : 0, + "policyPriority" : 0, + "description" : "Policy for all - database, table, column", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "*" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [ - { - "accesses": [ - { - "type": "select", - "isAllowed": true - }, - { - "type": "update", - "isAllowed": true - }, - { - "type": "create", - "isAllowed": true - }, - { - "type": "drop", - "isAllowed": true - }, - { - "type": "alter", - "isAllowed": true - }, - { - "type": "index", - "isAllowed": true - }, - { - "type": "lock", - "isAllowed": true - }, - { - "type": "all", - "isAllowed": true - }, - { - "type": "read", - "isAllowed": true - }, - { - "type": "write", - "isAllowed": true - } - ], - "users": [ - "admin" - ], - "groups": [], - "conditions": [], - "delegateAdmin": true - } - ], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [], - "id": 2, - "guid": "3b96138a-af4d-48bc-9544-58c5bfa1979b", - "isEnabled": true, - "version": 1 - }, - { - "service": "hive_jenkins", - "name": "all - database, udf", - "policyType": 0, - "policyPriority": 0, - "description": "Policy for all - database, udf", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "*" - ], - "isExcludes": false, - "isRecursive": false - }, - "udf": { - "values": [ - "*" - ], - "isExcludes": false, - "isRecursive": false - } + "column" : { + "values" : [ "*" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [ - { - "accesses": [ - { - "type": "select", - "isAllowed": true - }, - { - "type": "update", - "isAllowed": true - }, - { - "type": "create", - "isAllowed": true - }, - { - "type": "drop", - "isAllowed": true - }, - { - "type": "alter", - "isAllowed": true - }, - { - "type": "index", - "isAllowed": true - }, - { - "type": "lock", - "isAllowed": true - }, - { - "type": "all", - "isAllowed": true - }, - { - "type": "read", - "isAllowed": true - }, - { - "type": "write", - "isAllowed": true - } - ], - "users": [ - "admin" - ], - "groups": [], - "conditions": [], - "delegateAdmin": true - } - ], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [], - "id": 3, - "guid": "db08fbb0-61da-4f33-8144-ccd89816151d", - "isEnabled": true, - "version": 1 + "table" : { + "values" : [ "*" ], + "isExcludes" : false, + "isRecursive" : false + } }, - { - "service": "hive_jenkins", - "name": "default", - "policyType": 0, - "policyPriority": 0, - "description": "", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "default", - "spark_catalog", - "iceberg_ns", - "ns1" - ], - "isExcludes": false, - "isRecursive": false - }, - "column": { - "values": [ - "*" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "*" - ], - "isExcludes": false, - "isRecursive": false - } + "conditions" : [ ], + "policyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + }, { + "type" : "update", + "isAllowed" : true + }, { + "type" : "create", + "isAllowed" : true + }, { + "type" : "drop", + "isAllowed" : true + }, { + "type" : "alter", + "isAllowed" : true + }, { + "type" : "index", + "isAllowed" : true + }, { + "type" : "lock", + "isAllowed" : true + }, { + "type" : "all", + "isAllowed" : true + }, { + "type" : "read", + "isAllowed" : true + }, { + "type" : "write", + "isAllowed" : true + } ], + "users" : [ "admin" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true + } ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 2, + "guid" : "c81e728d-9d4c-3f63-af06-7f89cc14862c", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "all - database, udf", + "policyType" : 0, + "policyPriority" : 0, + "description" : "Policy for all - database, udf", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "*" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [ - { - "accesses": [ - { - "type": "select", - "isAllowed": true - }, - { - "type": "update", - "isAllowed": true - }, - { - "type": "create", - "isAllowed": true - }, - { - "type": "drop", - "isAllowed": true - }, - { - "type": "alter", - "isAllowed": true - }, - { - "type": "index", - "isAllowed": true - }, - { - "type": "lock", - "isAllowed": true - }, - { - "type": "all", - "isAllowed": true - }, - { - "type": "read", - "isAllowed": true - }, - { - "type": "write", - "isAllowed": true - } - ], - "users": [ - "bob", - "perm_view_user", - "{OWNER}" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - }, { - "accesses": [ - { - "type": "select", - "isAllowed": false - }, - { - "type": "update", - "isAllowed": false - }, - { - "type": "create", - "isAllowed": true - }, - { - "type": "drop", - "isAllowed": false - }, - { - "type": "alter", - "isAllowed": false - }, - { - "type": "index", - "isAllowed": false - }, - { - "type": "lock", - "isAllowed": false - }, - { - "type": "all", - "isAllowed": false - }, - { - "type": "read", - "isAllowed": false - }, - { - "type": "write", - "isAllowed": false - } - ], - "users": [ - "default_table_owner", - "create_only_user" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - } - ], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [ - "" - ], - "id": 5, - "guid": "2db6099d-e4f1-41df-9d24-f2f47bed618e", - "isEnabled": true, - "version": 5 + "udf" : { + "values" : [ "*" ], + "isExcludes" : false, + "isRecursive" : false + } }, - { - "service": "hive_jenkins", - "name": "default_kent", - "policyType": 0, - "policyPriority": 0, - "description": "", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "default", - "spark_catalog" - ], - "isExcludes": false, - "isRecursive": false - }, - "column": { - "values": [ - "key" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "src" - ], - "isExcludes": false, - "isRecursive": false - } + "conditions" : [ ], + "policyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + }, { + "type" : "update", + "isAllowed" : true + }, { + "type" : "create", + "isAllowed" : true + }, { + "type" : "drop", + "isAllowed" : true + }, { + "type" : "alter", + "isAllowed" : true + }, { + "type" : "index", + "isAllowed" : true + }, { + "type" : "lock", + "isAllowed" : true + }, { + "type" : "all", + "isAllowed" : true + }, { + "type" : "read", + "isAllowed" : true + }, { + "type" : "write", + "isAllowed" : true + } ], + "users" : [ "admin" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true + } ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 3, + "guid" : "eccbc87e-4b5c-32fe-a830-8fd9f2a7baf3", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "all - database, udf", + "policyType" : 0, + "policyPriority" : 0, + "description" : "Policy for all - database, udf", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default", "spark_catalog", "iceberg_ns", "ns1" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [ - { - "accesses": [ - { - "type": "select", - "isAllowed": true - }, - { - "type": "update", - "isAllowed": true - }, - { - "type": "create", - "isAllowed": true - }, - { - "type": "drop", - "isAllowed": true - }, - { - "type": "alter", - "isAllowed": true - }, - { - "type": "index", - "isAllowed": true - }, - { - "type": "lock", - "isAllowed": true - }, - { - "type": "all", - "isAllowed": true - }, - { - "type": "read", - "isAllowed": true - }, - { - "type": "write", - "isAllowed": true - } - ], - "users": [ - "kent" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - } - ], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [ - "" - ], - "id": 5, - "guid": "fd24db19-f7cc-4e13-a8ba-bbd5a07a2d8d", - "isEnabled": true, - "version": 5 + "column" : { + "values" : [ "*" ], + "isExcludes" : false, + "isRecursive" : false + }, + "table" : { + "values" : [ "*" ], + "isExcludes" : false, + "isRecursive" : false + } }, - { - "service": "hive_jenkins", - "name": "src_key _less_than_20", - "policyType": 2, - "policyPriority": 0, - "description": "", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "default" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "src" - ], - "isExcludes": false, - "isRecursive": false - } + "conditions" : [ ], + "policyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + }, { + "type" : "update", + "isAllowed" : true + }, { + "type" : "create", + "isAllowed" : true + }, { + "type" : "drop", + "isAllowed" : true + }, { + "type" : "alter", + "isAllowed" : true + }, { + "type" : "index", + "isAllowed" : true + }, { + "type" : "lock", + "isAllowed" : true + }, { + "type" : "all", + "isAllowed" : true + }, { + "type" : "read", + "isAllowed" : true + }, { + "type" : "write", + "isAllowed" : true + } ], + "users" : [ "bob", "perm_view_user", "{OWNER}" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true + }, { + "accesses" : [ { + "type" : "create", + "isAllowed" : true + } ], + "users" : [ "default_table_owner", "create_only_user" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true + } ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 4, + "guid" : "a87ff679-a2f3-371d-9181-a67b7542122c", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "default_kent", + "policyType" : 0, + "policyPriority" : 0, + "description" : "", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default", "spark_catalog" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [], - "rowFilterPolicyItems": [ - { - "rowFilterInfo": { - "filterExpr": "key\u003c20" - }, - "accesses": [ - { - "type": "select", - "isAllowed": true - } - ], - "users": [ - "bob" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - } - ], - "serviceType": "hive", - "options": {}, - "validitySchedules": [], - "policyLabels": [ - "" - ], - "id": 4, - "guid": "f588a9ed-f7b1-48f7-9d0d-c12cf2b9b7ed", - "isEnabled": true, - "version": 26 - },{ - "service": "hive_jenkins", - "name": "src_key_less_than_20_perm_view", - "policyType": 2, - "policyPriority": 0, - "description": "", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "default" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "perm_view" - ], - "isExcludes": false, - "isRecursive": false - } + "column" : { + "values" : [ "key" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [], - "rowFilterPolicyItems": [ - { - "rowFilterInfo": { - "filterExpr": "key\u003c20" - }, - "accesses": [ - { - "type": "select", - "isAllowed": true - } - ], - "users": [ - "perm_view_user" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - } - ], - "serviceType": "hive", - "options": {}, - "validitySchedules": [], - "policyLabels": [ - "" - ], - "id": 22, - "guid": "c240a7ea-9d26-4db2-b925-d5dbe49bd447 \n", - "isEnabled": true, - "version": 26 + "table" : { + "values" : [ "src" ], + "isExcludes" : false, + "isRecursive" : false + } }, - { - "service": "hive_jenkins", - "name": "default_bob_use", - "policyType": 0, - "policyPriority": 0, - "description": "", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "default_bob", - "spark_catalog" - ], - "isExcludes": false, - "isRecursive": false - }, - "column": { - "values": [ - "*" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "table_use*" - ], - "isExcludes": false, - "isRecursive": false - } + "conditions" : [ ], + "policyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + }, { + "type" : "update", + "isAllowed" : true + }, { + "type" : "create", + "isAllowed" : true + }, { + "type" : "drop", + "isAllowed" : true + }, { + "type" : "alter", + "isAllowed" : true + }, { + "type" : "index", + "isAllowed" : true + }, { + "type" : "lock", + "isAllowed" : true + }, { + "type" : "all", + "isAllowed" : true + }, { + "type" : "read", + "isAllowed" : true + }, { + "type" : "write", + "isAllowed" : true + } ], + "users" : [ "kent" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true + }, { + "accesses" : [ { + "type" : "create", + "isAllowed" : true + } ], + "users" : [ "default_table_owner", "create_only_user" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true + } ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 5, + "guid" : "e4da3b7f-bbce-3345-9777-2b0674a318d5", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "default_bob_use", + "policyType" : 0, + "policyPriority" : 0, + "description" : "", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default_bob", "spark_catalog" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [ - { - "accesses": [ - { - "type": "update", - "isAllowed": true - } - ], - "users": [ - "bob" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - } - ], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [ - "" - ], - "id": 5, - "guid": "2eb6099d-e4f1-41df-9d24-f2f47bed618e", - "isEnabled": true, - "version": 5 - }, - { - "service": "hive_jenkins", - "name": "default_bob_select", - "policyType": 0, - "policyPriority": 0, - "description": "", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "default_bob", - "spark_catalog" - ], - "isExcludes": false, - "isRecursive": false - }, - "column": { - "values": [ - "*" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "table_select*" - ], - "isExcludes": false, - "isRecursive": false - } + "column" : { + "values" : [ "*" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [ - { - "accesses": [ - { - "type": "select", - "isAllowed": true - }, - { - "type": "use", - "isAllowed": true - } - ], - "users": [ - "bob" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - } - ], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [ - "" - ], - "id": 5, - "guid": "2fb6099d-e4f1-41df-9d24-f2f47bed618e", - "isEnabled": true, - "version": 5 + "table" : { + "values" : [ "table_use*" ], + "isExcludes" : false, + "isRecursive" : false + } }, - { - "service": "hive_jenkins", - "name": "src_value_hash_perm_view", - "policyType": 1, - "policyPriority": 0, - "description": "", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "default", - "spark_catalog" - ], - "isExcludes": false, - "isRecursive": false - }, - "column": { - "values": [ - "value1" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "src" - ], - "isExcludes": false, - "isRecursive": false - } + "conditions" : [ ], + "policyItems" : [ { + "accesses" : [ { + "type" : "update", + "isAllowed" : true + } ], + "users" : [ "bob" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true + } ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 6, + "guid" : "1679091c-5a88-3faf-afb5-e6087eb1b2dc", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "default_bob_select", + "policyType" : 0, + "policyPriority" : 0, + "description" : "", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default_bob", "spark_catalog" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [ - { - "dataMaskInfo": { - "dataMaskType": "MASK_HASH" - }, - "accesses": [ - { - "type": "select", - "isAllowed": true - } - ], - "users": [ - "bob" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - } - ], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [ - "" - ], - "id": 5, - "guid": "ed1868a1-bf79-4721-a3d5-6815cc7d4986", - "isEnabled": true, - "version": 1 - },{ - "service": "hive_jenkins", - "name": "src_value_hash", - "policyType": 1, - "policyPriority": 0, - "description": "", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "default", - "spark_catalog" - ], - "isExcludes": false, - "isRecursive": false - }, - "column": { - "values": [ - "value1" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "perm_view" - ], - "isExcludes": false, - "isRecursive": false - } + "column" : { + "values" : [ "*" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [ - { - "dataMaskInfo": { - "dataMaskType": "MASK_HASH" - }, - "accesses": [ - { - "type": "select", - "isAllowed": true - } - ], - "users": [ - "perm_view_user" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - } - ], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [ - "" - ], - "id": 20, - "guid": "bfeddeab-50d0-4902-985f-42559efa39c3", - "isEnabled": true, - "version": 1 + "table" : { + "values" : [ "table_select*" ], + "isExcludes" : false, + "isRecursive" : false + } }, - { - "service": "hive_jenkins", - "name": "src_value2_nullify", - "policyType": 1, - "policyPriority": 0, - "description": "", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "default", - "spark_catalog", - "iceberg_ns", - "ns1" - ], - "isExcludes": false, - "isRecursive": false - }, - "column": { - "values": [ - "value2" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "src" - ], - "isExcludes": false, - "isRecursive": false - } + "conditions" : [ ], + "policyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + }, { + "type" : "use", + "isAllowed" : true + } ], + "users" : [ "bob" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true + } ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 7, + "guid" : "8f14e45f-ceea-367a-9a36-dedd4bea2543", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "someone_access_perm_view", + "policyType" : 0, + "policyPriority" : 0, + "description" : "", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [ - { - "dataMaskInfo": { - "dataMaskType": "MASK" - }, - "accesses": [ - { - "type": "select", - "isAllowed": true - } - ], - "users": [ - "bob" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - } - ], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [ - "" - ], - "id": 6, - "guid": "98a04cd7-8d14-4466-adc9-126d87a3af69", - "isEnabled": true, - "version": 1 - }, - { - "service": "hive_jenkins", - "name": "src_value3_sf4", - "policyType": 1, - "policyPriority": 0, - "description": "", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "default", - "spark_catalog" - ], - "isExcludes": false, - "isRecursive": false - }, - "column": { - "values": [ - "value3" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "src" - ], - "isExcludes": false, - "isRecursive": false - } + "column" : { + "values" : [ "*" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [ - { - "dataMaskInfo": { - "dataMaskType": "MASK_SHOW_FIRST_4" - }, - "accesses": [ - { - "type": "select", - "isAllowed": true - } - ], - "users": [ - "bob" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - } - ], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [ - "" - ], - "id": 7, - "guid": "9d50a525-b24c-4cf5-a885-d10d426368d1", - "isEnabled": true, - "version": 1 + "table" : { + "values" : [ "perm_view" ], + "isExcludes" : false, + "isRecursive" : false + } }, - { - "service": "hive_jenkins", - "name": "src_value4_sf4", - "policyType": 1, - "policyPriority": 0, - "description": "", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "default", - "spark_catalog" - ], - "isExcludes": false, - "isRecursive": false - }, - "column": { - "values": [ - "value4" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "src" - ], - "isExcludes": false, - "isRecursive": false - } + "conditions" : [ ], + "policyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + } ], + "users" : [ "user_perm_view_only" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true + } ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 8, + "guid" : "c9f0f895-fb98-3b91-99f5-1fd0297e236d", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "src_key_less_than_20", + "policyType" : 2, + "policyPriority" : 0, + "description" : "", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [ - { - "dataMaskInfo": { - "dataMaskType": "MASK_DATE_SHOW_YEAR" - }, - "accesses": [ - { - "type": "select", - "isAllowed": true - } - ], - "users": [ - "bob" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - } - ], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [ - "" - ], - "id": 8, - "guid": "9d50a526-b24c-4cf5-a885-d10d426368d1", - "isEnabled": true, - "version": 1 + "table" : { + "values" : [ "src" ], + "isExcludes" : false, + "isRecursive" : false + } }, - { - "service": "hive_jenkins", - "name": "src_value5_show_last_4", - "policyType": 1, - "policyPriority": 0, - "description": "", - "isAuditEnabled": true, - "resources": { - "database": { - "values": [ - "default", - "spark_catalog" - ], - "isExcludes": false, - "isRecursive": false - }, - "column": { - "values": [ - "value5" - ], - "isExcludes": false, - "isRecursive": false - }, - "table": { - "values": [ - "src" - ], - "isExcludes": false, - "isRecursive": false - } + "conditions" : [ ], + "policyItems" : [ ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ ], + "rowFilterPolicyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + } ], + "users" : [ "bob", "perm_view_user" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : false, + "rowFilterInfo" : { + "filterExpr" : "key<20" + } + } ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 9, + "guid" : "45c48cce-2e2d-3fbd-aa1a-fc51c7c6ad26", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "perm_view_key_less_than_20", + "policyType" : 2, + "policyPriority" : 0, + "description" : "", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default" ], + "isExcludes" : false, + "isRecursive" : false }, - "policyItems": [], - "denyPolicyItems": [], - "allowExceptions": [], - "denyExceptions": [], - "dataMaskPolicyItems": [ - { - "dataMaskInfo": { - "dataMaskType": "MASK_SHOW_LAST_4" - }, - "accesses": [ - { - "type": "select", - "isAllowed": true - } - ], - "users": [ - "bob" - ], - "groups": [], - "conditions": [], - "delegateAdmin": false - } - ], - "rowFilterPolicyItems": [], - "options": {}, - "validitySchedules": [], - "policyLabels": [ - "" - ], - "id": 32, - "guid": "b3f1f1e0-2bd6-4b20-8a32-a531006ae151", - "isEnabled": true, - "version": 1 - } - ], - "serviceDef": { - "name": "hive", - "implClass": "org.apache.ranger.services.hive.RangerServiceHive", - "label": "Hive Server2", - "description": "Hive Server2", - "options": { - "enableDenyAndExceptionsInPolicies": "true" + "table" : { + "values" : [ "perm_view" ], + "isExcludes" : false, + "isRecursive" : false + } }, - "configs": [ - { - "itemId": 1, - "name": "username", - "type": "string", - "mandatory": true, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "", - "label": "Username" + "conditions" : [ ], + "policyItems" : [ ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ ], + "rowFilterPolicyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + } ], + "users" : [ "perm_view_user" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : false, + "rowFilterInfo" : { + "filterExpr" : "key<20" + } + } ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 10, + "guid" : "d3d94468-02a4-3259-b55d-38e6d163e820", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "src_value_hash_perm_view", + "policyType" : 1, + "policyPriority" : 0, + "description" : "", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default", "spark_catalog" ], + "isExcludes" : false, + "isRecursive" : false }, - { - "itemId": 2, - "name": "password", - "type": "password", - "mandatory": true, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "", - "label": "Password" + "column" : { + "values" : [ "value1" ], + "isExcludes" : false, + "isRecursive" : false }, - { - "itemId": 3, - "name": "jdbc.driverClassName", - "type": "string", - "mandatory": true, - "defaultValue": "org.apache.hive.jdbc.HiveDriver", - "validationRegEx": "", - "validationMessage": "", - "uiHint": "" + "table" : { + "values" : [ "src" ], + "isExcludes" : false, + "isRecursive" : false + } + }, + "conditions" : [ ], + "policyItems" : [ ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + } ], + "users" : [ "bob" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true, + "dataMaskInfo" : { + "dataMaskType" : "MASK_HASH" + } + } ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 11, + "guid" : "6512bd43-d9ca-36e0-ac99-0b0a82652dca", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "src_value_hash", + "policyType" : 1, + "policyPriority" : 0, + "description" : "", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default", "spark_catalog" ], + "isExcludes" : false, + "isRecursive" : false }, - { - "itemId": 4, - "name": "jdbc.url", - "type": "string", - "mandatory": true, - "defaultValue": "", - "validationRegEx": "", - "validationMessage": "", - "uiHint": "{\"TextFieldWithIcon\":true, \"info\": \"1.For Remote Mode, eg.\u003cbr\u003ejdbc:hive2://\u0026lt;host\u0026gt;:\u0026lt;port\u0026gt;\u003cbr\u003e2.For Embedded Mode (no host or port), eg.\u003cbr\u003ejdbc:hive2:///;initFile\u003d\u0026lt;file\u0026gt;\u003cbr\u003e3.For HTTP Mode, eg.\u003cbr\u003ejdbc:hive2://\u0026lt;host\u0026gt;:\u0026lt;port\u0026gt;/;\u003cbr\u003etransportMode\u003dhttp;httpPath\u003d\u0026lt;httpPath\u0026gt;\u003cbr\u003e4.For SSL Mode, eg.\u003cbr\u003ejdbc:hive2://\u0026lt;host\u0026gt;:\u0026lt;port\u0026gt;/;ssl\u003dtrue;\u003cbr\u003esslTrustStore\u003dtStore;trustStorePassword\u003dpw\u003cbr\u003e5.For ZooKeeper Mode, eg.\u003cbr\u003ejdbc:hive2://\u0026lt;host\u0026gt;/;serviceDiscoveryMode\u003d\u003cbr\u003ezooKeeper;zooKeeperNamespace\u003dhiveserver2\u003cbr\u003e6.For Kerberos Mode, eg.\u003cbr\u003ejdbc:hive2://\u0026lt;host\u0026gt;:\u0026lt;port\u0026gt;/;\u003cbr\u003eprincipal\u003dhive/domain@EXAMPLE.COM\u003cbr\u003e\"}" + "column" : { + "values" : [ "value1" ], + "isExcludes" : false, + "isRecursive" : false }, - { - "itemId": 5, - "name": "commonNameForCertificate", - "type": "string", - "mandatory": false, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "", - "label": "Common Name for Certificate" + "table" : { + "values" : [ "perm_view" ], + "isExcludes" : false, + "isRecursive" : false } - ], - "resources": [ - { - "itemId": 1, - "name": "database", - "type": "string", - "level": 10, - "mandatory": true, - "lookupSupported": true, - "recursiveSupported": false, - "excludesSupported": true, - "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", - "matcherOptions": { - "wildCard": "true", - "ignoreCase": "true" - }, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "", - "label": "Hive Database", - "description": "Hive Database", - "accessTypeRestrictions": [], - "isValidLeaf": false + }, + "conditions" : [ ], + "policyItems" : [ ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + } ], + "users" : [ "perm_view_user" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true, + "dataMaskInfo" : { + "dataMaskType" : "MASK_HASH" + } + } ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 12, + "guid" : "c20ad4d7-6fe9-3759-aa27-a0c99bff6710", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "src_value2_nullify", + "policyType" : 1, + "policyPriority" : 0, + "description" : "", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default", "spark_catalog", "iceberg_ns", "ns1" ], + "isExcludes" : false, + "isRecursive" : false }, - { - "itemId": 5, - "name": "url", - "type": "string", - "level": 10, - "mandatory": true, - "lookupSupported": false, - "recursiveSupported": true, - "excludesSupported": false, - "matcher": "org.apache.ranger.plugin.resourcematcher.RangerPathResourceMatcher", - "matcherOptions": { - "wildCard": "true", - "ignoreCase": "false" - }, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "", - "label": "URL", - "description": "URL", - "accessTypeRestrictions": [], - "isValidLeaf": true + "column" : { + "values" : [ "value2" ], + "isExcludes" : false, + "isRecursive" : false }, - { - "itemId": 2, - "name": "table", - "type": "string", - "level": 20, - "parent": "database", - "mandatory": true, - "lookupSupported": true, - "recursiveSupported": false, - "excludesSupported": true, - "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", - "matcherOptions": { - "wildCard": "true", - "ignoreCase": "true" - }, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "", - "label": "Hive Table", - "description": "Hive Table", - "accessTypeRestrictions": [], - "isValidLeaf": false + "table" : { + "values" : [ "src" ], + "isExcludes" : false, + "isRecursive" : false + } + }, + "conditions" : [ ], + "policyItems" : [ ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + } ], + "users" : [ "bob" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true, + "dataMaskInfo" : { + "dataMaskType" : "MASK" + } + } ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 13, + "guid" : "c51ce410-c124-310e-8db5-e4b97fc2af39", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "src_value3_sf4", + "policyType" : 1, + "policyPriority" : 0, + "description" : "", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default", "spark_catalog" ], + "isExcludes" : false, + "isRecursive" : false }, - { - "itemId": 3, - "name": "udf", - "type": "string", - "level": 20, - "parent": "database", - "mandatory": true, - "lookupSupported": true, - "recursiveSupported": false, - "excludesSupported": true, - "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", - "matcherOptions": { - "wildCard": "true", - "ignoreCase": "true" - }, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "", - "label": "Hive UDF", - "description": "Hive UDF", - "accessTypeRestrictions": [], - "isValidLeaf": true + "column" : { + "values" : [ "value3" ], + "isExcludes" : false, + "isRecursive" : false }, - { - "itemId": 4, - "name": "column", - "type": "string", - "level": 30, - "parent": "table", - "mandatory": true, - "lookupSupported": true, - "recursiveSupported": false, - "excludesSupported": true, - "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", - "matcherOptions": { - "wildCard": "true", - "ignoreCase": "true" - }, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "", - "label": "Hive Column", - "description": "Hive Column", - "accessTypeRestrictions": [], - "isValidLeaf": true + "table" : { + "values" : [ "src" ], + "isExcludes" : false, + "isRecursive" : false + } + }, + "conditions" : [ ], + "policyItems" : [ ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + } ], + "users" : [ "bob" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true, + "dataMaskInfo" : { + "dataMaskType" : "MASK_SHOW_FIRST_4" } - ], - "accessTypes": [ - { - "itemId": 1, - "name": "select", - "label": "select", - "impliedGrants": [] + } ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 14, + "guid" : "aab32389-22bc-325a-af60-6eb525ffdc56", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "src_value4_sf4", + "policyType" : 1, + "policyPriority" : 0, + "description" : "", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default", "spark_catalog" ], + "isExcludes" : false, + "isRecursive" : false }, - { - "itemId": 2, - "name": "update", - "label": "update", - "impliedGrants": [] + "column" : { + "values" : [ "value4" ], + "isExcludes" : false, + "isRecursive" : false }, - { - "itemId": 3, - "name": "create", - "label": "Create", - "impliedGrants": [] + "table" : { + "values" : [ "src" ], + "isExcludes" : false, + "isRecursive" : false + } + }, + "conditions" : [ ], + "policyItems" : [ ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + } ], + "users" : [ "bob" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true, + "dataMaskInfo" : { + "dataMaskType" : "MASK_DATE_SHOW_YEAR" + } + } ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + }, { + "id" : 15, + "guid" : "9bf31c7f-f062-336a-96d3-c8bd1f8f2ff3", + "isEnabled" : true, + "version" : 1, + "service" : "hive_jenkins", + "name" : "src_value5_sf4", + "policyType" : 1, + "policyPriority" : 0, + "description" : "", + "isAuditEnabled" : true, + "resources" : { + "database" : { + "values" : [ "default", "spark_catalog" ], + "isExcludes" : false, + "isRecursive" : false }, - { - "itemId": 4, - "name": "drop", - "label": "Drop", - "impliedGrants": [] + "column" : { + "values" : [ "value5" ], + "isExcludes" : false, + "isRecursive" : false }, - { - "itemId": 5, - "name": "alter", - "label": "Alter", - "impliedGrants": [] + "table" : { + "values" : [ "src" ], + "isExcludes" : false, + "isRecursive" : false + } + }, + "conditions" : [ ], + "policyItems" : [ ], + "denyPolicyItems" : [ ], + "allowExceptions" : [ ], + "denyExceptions" : [ ], + "dataMaskPolicyItems" : [ { + "accesses" : [ { + "type" : "select", + "isAllowed" : true + } ], + "users" : [ "bob" ], + "groups" : [ ], + "roles" : [ ], + "conditions" : [ ], + "delegateAdmin" : true, + "dataMaskInfo" : { + "dataMaskType" : "MASK_SHOW_LAST_4" + } + } ], + "rowFilterPolicyItems" : [ ], + "options" : { }, + "validitySchedules" : [ ], + "policyLabels" : [ ], + "isDenyAllElse" : false + } ], + "serviceDef" : { + "name" : "hive", + "implClass" : "org.apache.ranger.services.hive.RangerServiceHive", + "label" : "Hive Server2", + "description" : "Hive Server2", + "options" : { + "enableDenyAndExceptionsInPolicies" : "true" + }, + "configs" : [ { + "itemId" : 1, + "name" : "username", + "type" : "string", + "mandatory" : true, + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "", + "label" : "Username" + }, { + "itemId" : 2, + "name" : "password", + "type" : "password", + "mandatory" : true, + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "", + "label" : "Password" + }, { + "itemId" : 3, + "name" : "jdbc.driverClassName", + "type" : "string", + "mandatory" : true, + "defaultValue" : "org.apache.hive.jdbc.HiveDriver", + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "" + }, { + "itemId" : 4, + "name" : "jdbc.url", + "type" : "string", + "mandatory" : true, + "defaultValue" : "", + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "{\"TextFieldWithIcon\":true, \"info\": \"1.For Remote Mode, eg.
      jdbc:hive2://<host>:<port>
      2.For Embedded Mode (no host or port), eg.
      jdbc:hive2:///;initFile=<file>
      3.For HTTP Mode, eg.
      jdbc:hive2://<host>:<port>/;
      transportMode=http;httpPath=<httpPath>
      4.For SSL Mode, eg.
      jdbc:hive2://<host>:<port>/;ssl=true;
      sslTrustStore=tStore;trustStorePassword=pw
      5.For ZooKeeper Mode, eg.
      jdbc:hive2://<host>/;serviceDiscoveryMode=
      zooKeeper;zooKeeperNamespace=hiveserver2
      6.For Kerberos Mode, eg.
      jdbc:hive2://<host>:<port>/;
      principal=hive/domain@EXAMPLE.COM
      \"}" + }, { + "itemId" : 5, + "name" : "commonNameForCertificate", + "type" : "string", + "mandatory" : false, + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "", + "label" : "Common Name for Certificate" + } ], + "resources" : [ { + "itemId" : 1, + "name" : "database", + "type" : "string", + "level" : 10, + "mandatory" : true, + "lookupSupported" : true, + "recursiveSupported" : false, + "excludesSupported" : true, + "matcher" : "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions" : { + "wildCard" : "true", + "ignoreCase" : "true" }, - { - "itemId": 6, - "name": "index", - "label": "Index", - "impliedGrants": [] + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "", + "label" : "Hive Database", + "description" : "Hive Database", + "accessTypeRestrictions" : [ ], + "isValidLeaf" : false + }, { + "itemId" : 5, + "name" : "url", + "type" : "string", + "level" : 10, + "mandatory" : true, + "lookupSupported" : false, + "recursiveSupported" : true, + "excludesSupported" : false, + "matcher" : "org.apache.ranger.plugin.resourcematcher.RangerPathResourceMatcher", + "matcherOptions" : { + "wildCard" : "true", + "ignoreCase" : "false" }, - { - "itemId": 7, - "name": "lock", - "label": "Lock", - "impliedGrants": [] + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "", + "label" : "URL", + "description" : "URL", + "accessTypeRestrictions" : [ ], + "isValidLeaf" : true + }, { + "itemId" : 2, + "name" : "table", + "type" : "string", + "level" : 20, + "parent" : "database", + "mandatory" : true, + "lookupSupported" : true, + "recursiveSupported" : false, + "excludesSupported" : true, + "matcher" : "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions" : { + "wildCard" : "true", + "ignoreCase" : "true" }, - { - "itemId": 8, - "name": "all", - "label": "All", - "impliedGrants": [ - "select", - "update", - "create", - "drop", - "alter", - "index", - "lock", - "read", - "write" - ] + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "", + "label" : "Hive Table", + "description" : "Hive Table", + "accessTypeRestrictions" : [ ], + "isValidLeaf" : false + }, { + "itemId" : 3, + "name" : "udf", + "type" : "string", + "level" : 20, + "parent" : "database", + "mandatory" : true, + "lookupSupported" : true, + "recursiveSupported" : false, + "excludesSupported" : true, + "matcher" : "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions" : { + "wildCard" : "true", + "ignoreCase" : "true" }, - { - "itemId": 9, - "name": "read", - "label": "Read", - "impliedGrants": [] + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "", + "label" : "Hive UDF", + "description" : "Hive UDF", + "accessTypeRestrictions" : [ ], + "isValidLeaf" : true + }, { + "itemId" : 4, + "name" : "column", + "type" : "string", + "level" : 30, + "parent" : "table", + "mandatory" : true, + "lookupSupported" : true, + "recursiveSupported" : false, + "excludesSupported" : true, + "matcher" : "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions" : { + "wildCard" : "true", + "ignoreCase" : "true" }, - { - "itemId": 10, - "name": "write", - "label": "Write", - "impliedGrants": [] - } - ], - "policyConditions": [], - "contextEnrichers": [], - "enums": [], - "dataMaskDef": { - "maskTypes": [ - { - "itemId": 1, - "name": "MASK", - "label": "Redact", - "description": "Replace lowercase with \u0027x\u0027, uppercase with \u0027X\u0027, digits with \u00270\u0027", - "transformer": "mask({col})", - "dataMaskOptions": {} - }, - { - "itemId": 2, - "name": "MASK_SHOW_LAST_4", - "label": "Partial mask: show last 4", - "description": "Show last 4 characters; replace rest with \u0027x\u0027", - "transformer": "mask_show_last_n({col}, 4, \u0027x\u0027, \u0027x\u0027, \u0027x\u0027, -1, \u00271\u0027)", - "dataMaskOptions": {} - }, - { - "itemId": 3, - "name": "MASK_SHOW_FIRST_4", - "label": "Partial mask: show first 4", - "description": "Show first 4 characters; replace rest with \u0027x\u0027", - "transformer": "mask_show_first_n({col}, 4, \u0027x\u0027, \u0027x\u0027, \u0027x\u0027, -1, \u00271\u0027)", - "dataMaskOptions": {} - }, - { - "itemId": 4, - "name": "MASK_HASH", - "label": "Hash", - "description": "Hash the value", - "transformer": "mask_hash({col})", - "dataMaskOptions": {} + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "", + "label" : "Hive Column", + "description" : "Hive Column", + "accessTypeRestrictions" : [ ], + "isValidLeaf" : true + } ], + "accessTypes" : [ { + "itemId" : 1, + "name" : "select", + "label" : "select", + "impliedGrants" : [ ] + }, { + "itemId" : 2, + "name" : "update", + "label" : "update", + "impliedGrants" : [ ] + }, { + "itemId" : 3, + "name" : "create", + "label" : "Create", + "impliedGrants" : [ ] + }, { + "itemId" : 4, + "name" : "drop", + "label" : "Drop", + "impliedGrants" : [ ] + }, { + "itemId" : 5, + "name" : "alter", + "label" : "Alter", + "impliedGrants" : [ ] + }, { + "itemId" : 6, + "name" : "index", + "label" : "Index", + "impliedGrants" : [ ] + }, { + "itemId" : 7, + "name" : "lock", + "label" : "Lock", + "impliedGrants" : [ ] + }, { + "itemId" : 8, + "name" : "all", + "label" : "All", + "impliedGrants" : [ "select", "update", "create", "drop", "alter", "index", "lock", "read", "write" ] + }, { + "itemId" : 9, + "name" : "read", + "label" : "Read", + "impliedGrants" : [ ] + }, { + "itemId" : 10, + "name" : "write", + "label" : "Write", + "impliedGrants" : [ ] + } ], + "policyConditions" : [ ], + "contextEnrichers" : [ ], + "enums" : [ ], + "dataMaskDef" : { + "maskTypes" : [ { + "itemId" : 1, + "name" : "MASK", + "label" : "Redact", + "description" : "Replace lowercase with 'x', uppercase with 'X', digits with '0'", + "transformer" : "mask({col})", + "dataMaskOptions" : { } + }, { + "itemId" : 2, + "name" : "MASK_SHOW_LAST_4", + "label" : "Partial mask: show last 4", + "description" : "Show last 4 characters; replace rest with 'x'", + "transformer" : "mask_show_last_n({col}, 4, 'x', 'x', 'x', -1, '1')", + "dataMaskOptions" : { } + }, { + "itemId" : 3, + "name" : "MASK_SHOW_FIRST_4", + "label" : "Partial mask: show first 4", + "description" : "Show first 4 characters; replace rest with 'x'", + "transformer" : "mask_show_first_n({col}, 4, 'x', 'x', 'x', -1, '1')", + "dataMaskOptions" : { } + }, { + "itemId" : 4, + "name" : "MASK_HASH", + "label" : "Hash", + "description" : "Hash the value", + "transformer" : "mask_hash({col})", + "dataMaskOptions" : { } + }, { + "itemId" : 5, + "name" : "MASK_NULL", + "label" : "Nullify", + "description" : "Replace with NULL", + "dataMaskOptions" : { } + }, { + "itemId" : 6, + "name" : "MASK_NONE", + "label" : "Unmasked (retain original value)", + "description" : "No masking", + "dataMaskOptions" : { } + }, { + "itemId" : 12, + "name" : "MASK_DATE_SHOW_YEAR", + "label" : "Date: show only year", + "description" : "Date: show only year", + "transformer" : "mask({col}, 'x', 'x', 'x', -1, '1', 1, 0, -1)", + "dataMaskOptions" : { } + }, { + "itemId" : 13, + "name" : "CUSTOM", + "label" : "Custom", + "description" : "Custom", + "dataMaskOptions" : { } + } ], + "accessTypes" : [ { + "itemId" : 1, + "name" : "select", + "label" : "select", + "impliedGrants" : [ ] + } ], + "resources" : [ { + "itemId" : 1, + "name" : "database", + "type" : "string", + "level" : 10, + "mandatory" : true, + "lookupSupported" : true, + "recursiveSupported" : false, + "excludesSupported" : false, + "matcher" : "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions" : { + "wildCard" : "false", + "ignoreCase" : "true" }, - { - "itemId": 5, - "name": "MASK_NULL", - "label": "Nullify", - "description": "Replace with NULL", - "dataMaskOptions": {} + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "{ \"singleValue\":true }", + "label" : "Hive Database", + "description" : "Hive Database", + "accessTypeRestrictions" : [ ], + "isValidLeaf" : false + }, { + "itemId" : 2, + "name" : "table", + "type" : "string", + "level" : 20, + "parent" : "database", + "mandatory" : true, + "lookupSupported" : true, + "recursiveSupported" : false, + "excludesSupported" : false, + "matcher" : "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions" : { + "wildCard" : "false", + "ignoreCase" : "true" }, - { - "itemId": 6, - "name": "MASK_NONE", - "label": "Unmasked (retain original value)", - "description": "No masking", - "dataMaskOptions": {} + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "{ \"singleValue\":true }", + "label" : "Hive Table", + "description" : "Hive Table", + "accessTypeRestrictions" : [ ], + "isValidLeaf" : false + }, { + "itemId" : 4, + "name" : "column", + "type" : "string", + "level" : 30, + "parent" : "table", + "mandatory" : true, + "lookupSupported" : true, + "recursiveSupported" : false, + "excludesSupported" : false, + "matcher" : "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions" : { + "wildCard" : "false", + "ignoreCase" : "true" }, - { - "itemId": 12, - "name": "MASK_DATE_SHOW_YEAR", - "label": "Date: show only year", - "description": "Date: show only year", - "transformer": "mask({col}, \u0027x\u0027, \u0027x\u0027, \u0027x\u0027, -1, \u00271\u0027, 1, 0, -1)", - "dataMaskOptions": {} - }, - { - "itemId": 13, - "name": "CUSTOM", - "label": "Custom", - "description": "Custom", - "dataMaskOptions": {} - } - ], - "accessTypes": [ - { - "itemId": 1, - "name": "select", - "label": "select", - "impliedGrants": [] - } - ], - "resources": [ - { - "itemId": 1, - "name": "database", - "type": "string", - "level": 10, - "mandatory": true, - "lookupSupported": true, - "recursiveSupported": false, - "excludesSupported": false, - "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", - "matcherOptions": { - "wildCard": "false", - "ignoreCase": "true" - }, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "{ \"singleValue\":true }", - "label": "Hive Database", - "description": "Hive Database", - "accessTypeRestrictions": [], - "isValidLeaf": false - }, - { - "itemId": 2, - "name": "table", - "type": "string", - "level": 20, - "parent": "database", - "mandatory": true, - "lookupSupported": true, - "recursiveSupported": false, - "excludesSupported": false, - "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", - "matcherOptions": { - "wildCard": "false", - "ignoreCase": "true" - }, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "{ \"singleValue\":true }", - "label": "Hive Table", - "description": "Hive Table", - "accessTypeRestrictions": [], - "isValidLeaf": false - }, - { - "itemId": 4, - "name": "column", - "type": "string", - "level": 30, - "parent": "table", - "mandatory": true, - "lookupSupported": true, - "recursiveSupported": false, - "excludesSupported": false, - "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", - "matcherOptions": { - "wildCard": "false", - "ignoreCase": "true" - }, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "{ \"singleValue\":true }", - "label": "Hive Column", - "description": "Hive Column", - "accessTypeRestrictions": [], - "isValidLeaf": true - } - ] + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "{ \"singleValue\":true }", + "label" : "Hive Column", + "description" : "Hive Column", + "accessTypeRestrictions" : [ ], + "isValidLeaf" : true + } ] }, - "rowFilterDef": { - "accessTypes": [ - { - "itemId": 1, - "name": "select", - "label": "select", - "impliedGrants": [] - } - ], - "resources": [ - { - "itemId": 1, - "name": "database", - "type": "string", - "level": 10, - "mandatory": true, - "lookupSupported": true, - "recursiveSupported": false, - "excludesSupported": false, - "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", - "matcherOptions": { - "wildCard": "false", - "ignoreCase": "true" - }, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "{ \"singleValue\":true }", - "label": "Hive Database", - "description": "Hive Database", - "accessTypeRestrictions": [], - "isValidLeaf": false + "rowFilterDef" : { + "accessTypes" : [ { + "itemId" : 1, + "name" : "select", + "label" : "select", + "impliedGrants" : [ ] + } ], + "resources" : [ { + "itemId" : 1, + "name" : "database", + "type" : "string", + "level" : 10, + "mandatory" : true, + "lookupSupported" : true, + "recursiveSupported" : false, + "excludesSupported" : false, + "matcher" : "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions" : { + "wildCard" : "false", + "ignoreCase" : "true" + }, + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "{ \"singleValue\":true }", + "label" : "Hive Database", + "description" : "Hive Database", + "accessTypeRestrictions" : [ ], + "isValidLeaf" : false + }, { + "itemId" : 2, + "name" : "table", + "type" : "string", + "level" : 20, + "parent" : "database", + "mandatory" : true, + "lookupSupported" : true, + "recursiveSupported" : false, + "excludesSupported" : false, + "matcher" : "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions" : { + "wildCard" : "false", + "ignoreCase" : "true" }, - { - "itemId": 2, - "name": "table", - "type": "string", - "level": 20, - "parent": "database", - "mandatory": true, - "lookupSupported": true, - "recursiveSupported": false, - "excludesSupported": false, - "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", - "matcherOptions": { - "wildCard": "false", - "ignoreCase": "true" - }, - "validationRegEx": "", - "validationMessage": "", - "uiHint": "{ \"singleValue\":true }", - "label": "Hive Table", - "description": "Hive Table", - "accessTypeRestrictions": [], - "isValidLeaf": true - } - ] + "validationRegEx" : "", + "validationMessage" : "", + "uiHint" : "{ \"singleValue\":true }", + "label" : "Hive Table", + "description" : "Hive Table", + "accessTypeRestrictions" : [ ], + "isValidLeaf" : true + } ] }, - "id": 3, - "guid": "3e1afb5a-184a-4e82-9d9c-87a5cacc243c", - "isEnabled": true, - "createTime": "20190401-20:14:36.000-+0800", - "updateTime": "20190401-20:14:36.000-+0800", - "version": 1 + "id" : 3, + "guid" : "3e1afb5a-184a-4e82-9d9c-87a5cacc243c", + "isEnabled" : true, + "createTime" : "20190401-20:14:36.000-+0800", + "updateTime" : "20190401-20:14:36.000-+0800", + "version" : 1 }, - "auditMode": "audit-default" -} + "auditMode" : "audit-default" +} \ No newline at end of file diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/FunctionPrivilegesBuilderSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/FunctionPrivilegesBuilderSuite.scala new file mode 100644 index 00000000000..ad4b57faa93 --- /dev/null +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/FunctionPrivilegesBuilderSuite.scala @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.plugin.spark.authz + +import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan +import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach} +// scalastyle:off +import org.scalatest.funsuite.AnyFunSuite + +import org.apache.kyuubi.plugin.spark.authz.OperationType.QUERY +import org.apache.kyuubi.plugin.spark.authz.ranger.AccessType + +abstract class FunctionPrivilegesBuilderSuite extends AnyFunSuite + with SparkSessionProvider with BeforeAndAfterAll with BeforeAndAfterEach { + // scalastyle:on + + protected def withTable(t: String)(f: String => Unit): Unit = { + try { + f(t) + } finally { + sql(s"DROP TABLE IF EXISTS $t") + } + } + + protected def withDatabase(t: String)(f: String => Unit): Unit = { + try { + f(t) + } finally { + sql(s"DROP DATABASE IF EXISTS $t") + } + } + + protected def checkColumns(plan: LogicalPlan, cols: Seq[String]): Unit = { + val (in, out, _) = PrivilegesBuilder.build(plan, spark) + assert(out.isEmpty, "Queries shall not check output privileges") + val po = in.head + assert(po.actionType === PrivilegeObjectActionType.OTHER) + assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) + assert(po.columns === cols) + } + + protected def checkColumns(query: String, cols: Seq[String]): Unit = { + checkColumns(sql(query).queryExecution.optimizedPlan, cols) + } + + protected val reusedDb: String = getClass.getSimpleName + protected val reusedDb2: String = getClass.getSimpleName + "2" + protected val reusedTable: String = reusedDb + "." + getClass.getSimpleName + protected val reusedTableShort: String = reusedTable.split("\\.").last + protected val reusedPartTable: String = reusedTable + "_part" + protected val reusedPartTableShort: String = reusedPartTable.split("\\.").last + protected val functionCount = 3 + protected val functionNamePrefix = "kyuubi_fun_" + protected val tempFunNamePrefix = "kyuubi_temp_fun_" + + override def beforeAll(): Unit = { + sql(s"CREATE DATABASE IF NOT EXISTS $reusedDb") + sql(s"CREATE DATABASE IF NOT EXISTS $reusedDb2") + sql(s"CREATE TABLE IF NOT EXISTS $reusedTable" + + s" (key int, value string) USING parquet") + sql(s"CREATE TABLE IF NOT EXISTS $reusedPartTable" + + s" (key int, value string, pid string) USING parquet" + + s" PARTITIONED BY(pid)") + // scalastyle:off + (0 until functionCount).foreach { index => + { + sql(s"CREATE FUNCTION ${reusedDb}.${functionNamePrefix}${index} AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFMaskHash'") + sql(s"CREATE FUNCTION ${reusedDb2}.${functionNamePrefix}${index} AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFMaskHash'") + sql(s"CREATE TEMPORARY FUNCTION ${tempFunNamePrefix}${index} AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFMaskHash'") + } + } + sql(s"USE ${reusedDb2}") + // scalastyle:on + super.beforeAll() + } + + override def afterAll(): Unit = { + Seq(reusedTable, reusedPartTable).foreach { t => + sql(s"DROP TABLE IF EXISTS $t") + } + + Seq(reusedDb, reusedDb2).foreach { db => + (0 until functionCount).foreach { index => + sql(s"DROP FUNCTION ${db}.${functionNamePrefix}${index}") + } + sql(s"DROP DATABASE IF EXISTS ${db}") + } + + spark.stop() + super.afterAll() + } +} + +class HiveFunctionPrivilegesBuilderSuite extends FunctionPrivilegesBuilderSuite { + + override protected val catalogImpl: String = "hive" + + test("Function Call Query") { + val plan = sql(s"SELECT kyuubi_fun_1('data'), " + + s"kyuubi_fun_2(value), " + + s"${reusedDb}.kyuubi_fun_0(value), " + + s"kyuubi_temp_fun_1('data2')," + + s"kyuubi_temp_fun_2(key) " + + s"FROM $reusedTable").queryExecution.analyzed + val (inputs, _, _) = PrivilegesBuilder.buildFunctions(plan, spark) + assert(inputs.size === 3) + inputs.foreach { po => + assert(po.actionType === PrivilegeObjectActionType.OTHER) + assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION) + assert(po.dbname startsWith reusedDb.toLowerCase) + assert(po.objectName startsWith functionNamePrefix.toLowerCase) + val accessType = ranger.AccessType(po, QUERY, isInput = true) + assert(accessType === AccessType.SELECT) + } + } + + test("Function Call Query with Quoted Name") { + val plan = sql(s"SELECT `kyuubi_fun_1`('data'), " + + s"`kyuubi_fun_2`(value), " + + s"`${reusedDb}`.`kyuubi_fun_0`(value), " + + s"`kyuubi_temp_fun_1`('data2')," + + s"`kyuubi_temp_fun_2`(key) " + + s"FROM $reusedTable").queryExecution.analyzed + val (inputs, _, _) = PrivilegesBuilder.buildFunctions(plan, spark) + assert(inputs.size === 3) + inputs.foreach { po => + assert(po.actionType === PrivilegeObjectActionType.OTHER) + assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION) + assert(po.dbname startsWith reusedDb.toLowerCase) + assert(po.objectName startsWith functionNamePrefix.toLowerCase) + val accessType = ranger.AccessType(po, QUERY, isInput = true) + assert(accessType === AccessType.SELECT) + } + } + + test("Simple Function Call Query") { + val plan = sql(s"SELECT kyuubi_fun_1('data'), " + + s"kyuubi_fun_0('value'), " + + s"${reusedDb}.kyuubi_fun_0('value'), " + + s"${reusedDb}.kyuubi_fun_2('value'), " + + s"kyuubi_temp_fun_1('data2')," + + s"kyuubi_temp_fun_2('key') ").queryExecution.analyzed + val (inputs, _, _) = PrivilegesBuilder.buildFunctions(plan, spark) + assert(inputs.size === 4) + inputs.foreach { po => + assert(po.actionType === PrivilegeObjectActionType.OTHER) + assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION) + assert(po.dbname startsWith reusedDb.toLowerCase) + assert(po.objectName startsWith functionNamePrefix.toLowerCase) + val accessType = ranger.AccessType(po, QUERY, isInput = true) + assert(accessType === AccessType.SELECT) + } + } + + test("Function Call In CAST Command") { + val table = "castTable" + withTable(table) { table => + val plan = sql(s"CREATE TABLE ${table} " + + s"SELECT kyuubi_fun_1('data') col1, " + + s"${reusedDb2}.kyuubi_fun_2(value) col2, " + + s"kyuubi_fun_0(value) col3, " + + s"kyuubi_fun_2('value') col4, " + + s"${reusedDb}.kyuubi_fun_2('value') col5, " + + s"${reusedDb}.kyuubi_fun_1('value') col6, " + + s"kyuubi_temp_fun_1('data2') col7, " + + s"kyuubi_temp_fun_2(key) col8 " + + s"FROM ${reusedTable} WHERE ${reusedDb2}.kyuubi_fun_1(key)='123'").queryExecution.analyzed + val (inputs, _, _) = PrivilegesBuilder.buildFunctions(plan, spark) + assert(inputs.size === 7) + inputs.foreach { po => + assert(po.actionType === PrivilegeObjectActionType.OTHER) + assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION) + assert(po.dbname startsWith reusedDb.toLowerCase) + assert(po.objectName startsWith functionNamePrefix.toLowerCase) + val accessType = ranger.AccessType(po, QUERY, isInput = true) + assert(accessType === AccessType.SELECT) + } + } + } + +} diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/IcebergCatalogPrivilegesBuilderSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/IcebergCatalogPrivilegesBuilderSuite.scala index d89d0696feb..45186e2502d 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/IcebergCatalogPrivilegesBuilderSuite.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/IcebergCatalogPrivilegesBuilderSuite.scala @@ -22,11 +22,15 @@ import org.scalatest.Outcome import org.apache.kyuubi.Utils import org.apache.kyuubi.plugin.spark.authz.OperationType._ import org.apache.kyuubi.plugin.spark.authz.ranger.AccessType +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ +import org.apache.kyuubi.tags.IcebergTest +import org.apache.kyuubi.util.AssertionUtils._ +@IcebergTest class IcebergCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite { override protected val catalogImpl: String = "hive" override protected val sqlExtensions: String = - if (isSparkV32OrGreater) { + if (isSparkV31OrGreater) { "org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions" } else "" override protected def format = "iceberg" @@ -38,7 +42,7 @@ class IcebergCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite { override protected val supportsPartitionManagement = false override def beforeAll(): Unit = { - if (isSparkV32OrGreater) { + if (isSparkV31OrGreater) { spark.conf.set( s"spark.sql.catalog.$catalogV2", "org.apache.iceberg.spark.SparkCatalog") @@ -51,7 +55,7 @@ class IcebergCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite { } override def withFixture(test: NoArgTest): Outcome = { - assume(isSparkV32OrGreater) + assume(isSparkV31OrGreater) test() } @@ -64,8 +68,8 @@ class IcebergCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.UPDATE) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.dbname === namespace) - assert(po.objectName === catalogTableShort) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -81,8 +85,8 @@ class IcebergCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.UPDATE) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.dbname === namespace) - assert(po.objectName === catalogTableShort) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -104,8 +108,8 @@ class IcebergCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite { val po0 = inputs.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname === namespace) - assert(po0.objectName === catalogTableShort) + assertEqualsIgnoreCase(namespace)(po0.dbname) + assertEqualsIgnoreCase(catalogTableShort)(po0.objectName) assert(po0.columns === Seq("key", "value")) checkV2TableOwner(po0) @@ -113,12 +117,34 @@ class IcebergCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.UPDATE) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.dbname === namespace) - assert(po.objectName === table) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) assert(accessType === AccessType.UPDATE) } } + + test("RewriteDataFilesProcedure") { + val table = "RewriteDataFilesProcedure" + withV2Table(table) { tableId => + sql(s"CREATE TABLE IF NOT EXISTS $tableId (key int, value String) USING iceberg") + sql(s"INSERT INTO $tableId VALUES (1, 'a'), (2, 'b'), (3, 'c')") + + val plan = sql(s"CALL $catalogV2.system.rewrite_data_files (table => '$tableId')") + .queryExecution.analyzed + val (inputs, outputs, operationType) = PrivilegesBuilder.build(plan, spark) + assert(operationType === ALTERTABLE_PROPERTIES) + assert(inputs.size === 0) + assert(outputs.size === 1) + val po = outputs.head + assert(po.actionType === PrivilegeObjectActionType.OTHER) + assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) + val accessType = AccessType(po, operationType, isInput = false) + assert(accessType === AccessType.ALTER) + } + } } diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilderSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilderSuite.scala index b014aaaca28..723fabd7b67 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilderSuite.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilderSuite.scala @@ -30,9 +30,11 @@ import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach} import org.scalatest.funsuite.AnyFunSuite import org.apache.kyuubi.plugin.spark.authz.OperationType._ +import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._ +import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._ import org.apache.kyuubi.plugin.spark.authz.ranger.AccessType -import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils -import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils.isSparkVersionAtMost +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ +import org.apache.kyuubi.util.AssertionUtils._ abstract class PrivilegesBuilderSuite extends AnyFunSuite with SparkSessionProvider with BeforeAndAfterAll with BeforeAndAfterEach { @@ -110,7 +112,7 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite } test("AlterDatabasePropertiesCommand") { - assume(isSparkVersionAtMost("3.2")) + assume(SPARK_RUNTIME_VERSION <= "3.2") val plan = sql("ALTER DATABASE default SET DBPROPERTIES (abc = '123')").queryExecution.analyzed val (in, out, operationType) = PrivilegesBuilder.build(plan, spark) assertResult(plan.getClass.getName)( @@ -122,8 +124,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE) assert(po.catalog.isEmpty) - assert(po.dbname === "default") - assert(po.objectName === "default") + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase(defaultDb)(po.objectName) assert(po.columns.isEmpty) } @@ -147,8 +149,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite out.foreach { po => assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(Set(oldTableShort, "efg").contains(po.objectName)) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertExistsIgnoreCase(po.objectName)(Set(oldTableShort, "efg")) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType == AccessType.ALTER) @@ -158,7 +160,7 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite } test("CreateDatabaseCommand") { - assume(isSparkVersionAtMost("3.2")) + assume(SPARK_RUNTIME_VERSION <= "3.2") withDatabase("CreateDatabaseCommand") { db => val plan = sql(s"CREATE DATABASE $db").queryExecution.analyzed val (in, out, operationType) = PrivilegesBuilder.build(plan, spark) @@ -171,8 +173,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE) assert(po.catalog.isEmpty) - assert(po.dbname === "CreateDatabaseCommand") - assert(po.objectName === "CreateDatabaseCommand") + assertEqualsIgnoreCase(db)(po.dbname) + assertEqualsIgnoreCase(db)(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.CREATE) @@ -180,7 +182,7 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite } test("DropDatabaseCommand") { - assume(isSparkVersionAtMost("3.2")) + assume(SPARK_RUNTIME_VERSION <= "3.2") withDatabase("DropDatabaseCommand") { db => sql(s"CREATE DATABASE $db") val plan = sql(s"DROP DATABASE DropDatabaseCommand").queryExecution.analyzed @@ -194,8 +196,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE) assert(po.catalog.isEmpty) - assert(po.dbname === "DropDatabaseCommand") - assert(po.objectName === "DropDatabaseCommand") + assertEqualsIgnoreCase(db)(po.dbname) + assertEqualsIgnoreCase(db)(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.DROP) @@ -212,8 +214,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName === reusedPartTableShort) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po.objectName) assert(po.columns.head === "pid") checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -230,8 +232,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName === reusedPartTableShort) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po.objectName) assert(po.columns.head === "pid") checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -263,8 +265,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName equalsIgnoreCase tableName.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(tableName.split("\\.").last)(po.objectName) assert(po.columns.isEmpty) checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -286,8 +288,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName === reusedPartTableShort) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po.objectName) assert(po.columns.head === "pid") checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -309,8 +311,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname === reusedDb) - assert(po.objectName === reusedPartTableShort) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po.objectName) assert(po.columns.head === "pid") checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -331,8 +333,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname === reusedDb) - assert(po.objectName === reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po.objectName) assert(po.columns.isEmpty) checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -350,8 +352,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedPartTableShort) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po0.objectName) if (isSparkV32OrGreater) { // Query in AlterViewAsCommand can not be resolved before SPARK-34698 assert(po0.columns === Seq("key", "value", "pid")) @@ -365,8 +367,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname === (if (isSparkV2) null else "default")) - assert(po.objectName === "AlterViewAsCommand") + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase("AlterViewAsCommand")(po.objectName) checkTableOwner(po) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -377,41 +379,62 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val plan = sql(s"ANALYZE TABLE $reusedPartTable PARTITION (pid=1)" + s" COMPUTE STATISTICS FOR COLUMNS key").queryExecution.analyzed val (in, out, operationType) = PrivilegesBuilder.build(plan, spark) - assert(operationType === ANALYZE_TABLE) + assert(operationType === ALTERTABLE_PROPERTIES) assert(in.size === 1) val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedPartTableShort) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po0.objectName) // ignore this check as it behaves differently across spark versions assert(po0.columns === Seq("key")) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) - assert(accessType0 === AccessType.SELECT) + assert(accessType0 === AccessType.ALTER) + + assert(out.size === 1) + val po1 = out.head + assert(po1.actionType === PrivilegeObjectActionType.OTHER) + assert(po1.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) + assertEqualsIgnoreCase(reusedDb)(po1.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po1.objectName) + // ignore this check as it behaves differently across spark versions + assert(po1.columns.isEmpty) + checkTableOwner(po1) + val accessType1 = ranger.AccessType(po1, operationType, isInput = true) + assert(accessType1 === AccessType.ALTER) - assert(out.size === 0) } test("AnalyzePartitionCommand") { val plan = sql(s"ANALYZE TABLE $reusedPartTable" + s" PARTITION (pid = 1) COMPUTE STATISTICS").queryExecution.analyzed val (in, out, operationType) = PrivilegesBuilder.build(plan, spark) - assert(operationType === ANALYZE_TABLE) + assert(operationType === ALTERTABLE_PROPERTIES) assert(in.size === 1) val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedPartTableShort) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po0.objectName) // ignore this check as it behaves differently across spark versions assert(po0.columns === Seq("pid")) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) - assert(accessType0 === AccessType.SELECT) + assert(accessType0 === AccessType.ALTER) - assert(out.size === 0) + assert(out.size === 1) + val po1 = out.head + assert(po1.actionType === PrivilegeObjectActionType.OTHER) + assert(po1.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) + assertEqualsIgnoreCase(reusedDb)(po1.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po1.objectName) + // ignore this check as it behaves differently across spark versions + assert(po1.columns.isEmpty) + checkTableOwner(po1) + val accessType1 = ranger.AccessType(po1, operationType, isInput = true) + assert(accessType1 === AccessType.ALTER) } test("AnalyzeTableCommand") { @@ -419,20 +442,30 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite .queryExecution.analyzed val (in, out, operationType) = PrivilegesBuilder.build(plan, spark) - assert(operationType === ANALYZE_TABLE) + assert(operationType === ALTERTABLE_PROPERTIES) assert(in.size === 1) val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedPartTableShort) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po0.objectName) // ignore this check as it behaves differently across spark versions assert(po0.columns.isEmpty) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) - assert(accessType0 === AccessType.SELECT) + assert(accessType0 === AccessType.ALTER) - assert(out.size === 0) + assert(out.size === 1) + val po1 = out.head + assert(po1.actionType === PrivilegeObjectActionType.OTHER) + assert(po1.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) + assertEqualsIgnoreCase(reusedDb)(po1.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po1.objectName) + // ignore this check as it behaves differently across spark versions + assert(po1.columns.isEmpty) + checkTableOwner(po1) + val accessType1 = ranger.AccessType(po1, operationType, isInput = true) + assert(accessType1 === AccessType.ALTER) } test("AnalyzeTablesCommand") { @@ -445,8 +478,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.DATABASE) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedDb) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedDb)(po0.objectName) // ignore this check as it behaves differently across spark versions assert(po0.columns.isEmpty) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) @@ -463,8 +496,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedDb) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedDb)(po0.objectName) assert(po0.columns.isEmpty) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) @@ -482,8 +515,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po0.objectName) if (isSparkV32OrGreater) { assert(po0.columns.head === "key") checkTableOwner(po0) @@ -505,8 +538,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po0.objectName) if (isSparkV32OrGreater) { assert(po0.columns === Seq("key", "value")) checkTableOwner(po0) @@ -521,8 +554,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname === (if (isSparkV2) null else "default")) - assert(po.objectName === "CreateViewCommand") + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase("CreateViewCommand")(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.CREATE) @@ -541,8 +574,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname === (if (isSparkV2) null else "default")) - assert(po.objectName === tableName) + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase(tableName)(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.CREATE) @@ -588,9 +621,9 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION) assert(po.catalog.isEmpty) - val db = if (isSparkV33OrGreater) "default" else null - assert(po.dbname === db) - assert(po.objectName === "CreateFunctionCommand") + val db = if (isSparkV33OrGreater) defaultDb else null + assertEqualsIgnoreCase(db)(po.dbname) + assertEqualsIgnoreCase("CreateFunctionCommand")(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.CREATE) @@ -620,16 +653,16 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION) assert(po.catalog.isEmpty) - val db = if (isSparkV33OrGreater) "default" else null - assert(po.dbname === db) - assert(po.objectName === "DropFunctionCommand") + val db = if (isSparkV33OrGreater) defaultDb else null + assertEqualsIgnoreCase(db)(po.dbname) + assertEqualsIgnoreCase("DropFunctionCommand")(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.DROP) } test("RefreshFunctionCommand") { - assume(AuthZUtils.isSparkVersionAtLeast("3.1")) + assume(isSparkV31OrGreater) sql(s"CREATE FUNCTION RefreshFunctionCommand AS '${getClass.getCanonicalName}'") val plan = sql("REFRESH FUNCTION RefreshFunctionCommand") .queryExecution.analyzed @@ -641,9 +674,9 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION) assert(po.catalog.isEmpty) - val db = if (isSparkV33OrGreater) "default" else null - assert(po.dbname === db) - assert(po.objectName === "RefreshFunctionCommand") + val db = if (isSparkV33OrGreater) defaultDb else null + assertEqualsIgnoreCase(db)(po.dbname) + assertEqualsIgnoreCase("RefreshFunctionCommand")(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.NONE) @@ -658,8 +691,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po0.objectName) assert(po0.columns.isEmpty) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) @@ -670,8 +703,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName === "CreateTableLikeCommand") + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase("CreateTableLikeCommand")(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.CREATE) @@ -689,8 +722,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po0.objectName) assert(po0.columns.isEmpty) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) @@ -701,8 +734,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName === "CreateTableLikeCommandWithoutDatabase") + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase("CreateTableLikeCommandWithoutDatabase")(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.CREATE) @@ -727,8 +760,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po.objectName) assert(po.columns === Seq("key")) checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -746,8 +779,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po.objectName) assert(po.columns.isEmpty) checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -757,7 +790,7 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite } test("DescribeDatabaseCommand") { - assume(isSparkVersionAtMost("3.2")) + assume(SPARK_RUNTIME_VERSION <= "3.2") val plan = sql(s"DESC DATABASE $reusedDb").queryExecution.analyzed val (in, out, operationType) = PrivilegesBuilder.build(plan, spark) assert(operationType === DESCDATABASE) @@ -766,8 +799,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName equalsIgnoreCase reusedDb) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedDb)(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.USE) @@ -785,8 +818,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.DATABASE) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedDb) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedDb)(po0.objectName) assert(po0.columns.isEmpty) val accessType0 = ranger.AccessType(po0, operationType, isInput = false) assert(accessType0 === AccessType.USE) @@ -808,8 +841,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName === reusedPartTableShort) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po.objectName) assert(po.columns.head === "pid") checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -824,8 +857,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po0.objectName) assert(po0.columns.isEmpty) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) @@ -842,8 +875,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po0.objectName) assert(po0.columns.isEmpty) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) @@ -860,8 +893,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po0.objectName) assert(po0.columns.isEmpty) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) @@ -879,8 +912,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedPartTableShort) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedPartTableShort)(po0.objectName) assert(po0.columns === Seq("pid")) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) @@ -915,8 +948,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName equalsIgnoreCase tableName.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(tableName.split("\\.").last)(po.objectName) assert(po.columns.isEmpty) checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -931,8 +964,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName equalsIgnoreCase reusedTableShort) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po.objectName) assert(po.columns.take(2) === Seq("key", "value")) checkTableOwner(po) } @@ -956,7 +989,6 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite } test("Query: CTE") { - assume(!isSparkV2) checkColumns( s""" |with t(c) as (select coalesce(max(key), pid, 1) from $reusedPartTable group by pid) @@ -1007,8 +1039,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName startsWith reusedTableShort.toLowerCase) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertStartsWithIgnoreCase(reusedTableShort)(po.objectName) assert( po.columns === Seq("value", "pid", "key"), s"$reusedPartTable both 'key', 'value' and 'pid' should be authenticated") @@ -1034,8 +1066,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName startsWith reusedTableShort.toLowerCase) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertStartsWithIgnoreCase(reusedTableShort)(po.objectName) assert( po.columns === Seq("value", "key", "pid"), s"$reusedPartTable both 'key', 'value' and 'pid' should be authenticated") @@ -1064,8 +1096,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName startsWith reusedTableShort.toLowerCase) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertStartsWithIgnoreCase(reusedTableShort)(po.objectName) assert( po.columns === Seq("key", "value"), s"$reusedPartTable 'key' is the join key and 'pid' is omitted") @@ -1093,8 +1125,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName startsWith reusedTableShort.toLowerCase) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertStartsWithIgnoreCase(reusedTableShort)(po.objectName) assert( po.columns === Seq("key", "value"), s"$reusedPartTable both 'key' and 'value' should be authenticated") @@ -1123,8 +1155,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName startsWith reusedTableShort.toLowerCase) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertStartsWithIgnoreCase(reusedTableShort)(po.objectName) assert( po.columns === Seq("key", "value"), s"$reusedPartTable both 'key' and 'value' should be authenticated") @@ -1149,8 +1181,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName startsWith reusedTableShort.toLowerCase) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertStartsWithIgnoreCase(reusedTableShort)(po.objectName) assert( po.columns === Seq("key", "value"), s"$reusedPartTable both 'key' and 'value' should be authenticated") @@ -1175,8 +1207,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName startsWith reusedTableShort.toLowerCase) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertStartsWithIgnoreCase(reusedTableShort)(po.objectName) assert( po.columns === Seq("key", "value", "pid"), s"$reusedPartTable both 'key', 'value' and 'pid' should be authenticated") @@ -1219,8 +1251,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName === getClass.getSimpleName) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po.objectName) assert(po.columns.head === "a") checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -1228,7 +1260,6 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite } test("AlterTableChangeColumnCommand") { - assume(!isSparkV2) val plan = sql(s"ALTER TABLE $reusedTable" + s" ALTER COLUMN value COMMENT 'alter column'").queryExecution.analyzed val (in, out, operationType) = PrivilegesBuilder.build(plan, spark) @@ -1239,8 +1270,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName === getClass.getSimpleName) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po.objectName) assert(po.columns.head === "value") checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -1253,7 +1284,7 @@ class InMemoryPrivilegeBuilderSuite extends PrivilegesBuilderSuite { // some hive version does not support set database location test("AlterDatabaseSetLocationCommand") { - assume(isSparkVersionAtMost("3.2")) + assume(SPARK_RUNTIME_VERSION <= "3.2") val newLoc = spark.conf.get("spark.sql.warehouse.dir") + "/new_db_location" val plan = sql(s"ALTER DATABASE default SET LOCATION '$newLoc'") .queryExecution.analyzed @@ -1267,8 +1298,8 @@ class InMemoryPrivilegeBuilderSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE) assert(po.catalog.isEmpty) - assert(po.dbname === "default") - assert(po.objectName === "default") + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase(defaultDb)(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.ALTER) @@ -1284,8 +1315,8 @@ class InMemoryPrivilegeBuilderSuite extends PrivilegesBuilderSuite { val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po0.objectName) assert(po0.columns === Seq("key", "value")) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) @@ -1296,8 +1327,8 @@ class InMemoryPrivilegeBuilderSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname === (if (isSparkV2) null else "default")) - assert(po.objectName === "CreateDataSourceTableAsSelectCommand") + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase("CreateDataSourceTableAsSelectCommand")(po.objectName) if (catalogImpl == "hive") { assert(po.columns === Seq("key", "value")) } else { @@ -1310,10 +1341,9 @@ class InMemoryPrivilegeBuilderSuite extends PrivilegesBuilderSuite { class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { - override protected val catalogImpl: String = if (isSparkV2) "in-memory" else "hive" + override protected val catalogImpl: String = "hive" test("AlterTableSerDePropertiesCommand") { - assume(!isSparkV2) withTable("AlterTableSerDePropertiesCommand") { t => sql(s"CREATE TABLE $t (key int, pid int) USING hive PARTITIONED BY (pid)") sql(s"ALTER TABLE $t ADD IF NOT EXISTS PARTITION (pid=1)") @@ -1328,8 +1358,8 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname === "default") - assert(po.objectName === t) + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase(t)(po.objectName) assert(po.columns.head === "pid") checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -1338,7 +1368,6 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { } test("CreateTableCommand") { - assume(!isSparkV2) withTable("CreateTableCommand") { _ => val plan = sql(s"CREATE TABLE CreateTableCommand(a int, b string) USING hive") .queryExecution.analyzed @@ -1350,8 +1379,8 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname === "default") - assert(po.objectName === "CreateTableCommand") + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase("CreateTableCommand")(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.CREATE) @@ -1359,7 +1388,6 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { } test("CreateHiveTableAsSelectCommand") { - assume(!isSparkV2) val plan = sql(s"CREATE TABLE CreateHiveTableAsSelectCommand USING hive" + s" AS SELECT key, value FROM $reusedTable") .queryExecution.analyzed @@ -1370,8 +1398,8 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po0.objectName) assert(po0.columns === Seq("key", "value")) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) @@ -1382,15 +1410,14 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname === "default") - assert(po.objectName === "CreateHiveTableAsSelectCommand") + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase("CreateHiveTableAsSelectCommand")(po.objectName) assert(po.columns === Seq("key", "value")) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.CREATE) } test("LoadDataCommand") { - assume(!isSparkV2) val dataPath = getClass.getClassLoader.getResource("data.txt").getPath val tableName = reusedDb + "." + "LoadDataToTable" withTable(tableName) { _ => @@ -1410,7 +1437,7 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { val po0 = out.head assert(po0.actionType === PrivilegeObjectActionType.INSERT_OVERWRITE) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) assert(po0.objectName equalsIgnoreCase tableName.split("\\.").last) assert(po0.columns.isEmpty) checkTableOwner(po0) @@ -1420,7 +1447,6 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { } test("InsertIntoDatasourceDirCommand") { - assume(!isSparkV2) val tableDirectory = getClass.getResource("/").getPath + "table_directory" val directory = File(tableDirectory).createDirectory() val plan = sql( @@ -1435,7 +1461,7 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) assert(po0.objectName equalsIgnoreCase reusedPartTable.split("\\.").last) assert(po0.columns === Seq("key", "value", "pid")) checkTableOwner(po0) @@ -1446,7 +1472,6 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { } test("InsertIntoDataSourceCommand") { - assume(!isSparkV2) val tableName = "InsertIntoDataSourceTable" withTable(tableName) { _ => // sql(s"CREATE TABLE $tableName (a int, b string) USING parquet") @@ -1480,8 +1505,8 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po.objectName) assert(po.columns === Seq("key", "value")) checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = true) @@ -1493,8 +1518,8 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.INSERT) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase "default") - assert(po.objectName equalsIgnoreCase tableName) + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase(tableName)(po.objectName) assert(po.columns.isEmpty) checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -1505,7 +1530,6 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { } test("InsertIntoHadoopFsRelationCommand") { - assume(!isSparkV2) val tableName = "InsertIntoHadoopFsRelationTable" withTable(tableName) { _ => sql(s"CREATE TABLE $tableName (a int, b string) USING parquet") @@ -1523,8 +1547,8 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName equalsIgnoreCase reusedTable.split("\\.").last) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po.objectName) assert(po.columns === Seq("key", "value")) checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -1536,8 +1560,8 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.INSERT) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase "default") - assert(po.objectName equalsIgnoreCase tableName) + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase(tableName)(po.objectName) assert(po.columns === Seq("a", "b")) checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -1546,8 +1570,7 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { } } - test("InsertIntoHiveDirCommand") { - assume(!isSparkV2) + test("InsertIntoDataSourceDirCommand") { val tableDirectory = getClass.getResource("/").getPath + "table_directory" val directory = File(tableDirectory).createDirectory() val plan = sql( @@ -1562,7 +1585,32 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname equalsIgnoreCase reusedDb) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assert(po0.objectName equalsIgnoreCase reusedPartTable.split("\\.").last) + assert(po0.columns === Seq("key", "value", "pid")) + checkTableOwner(po0) + val accessType0 = ranger.AccessType(po0, operationType, isInput = true) + assert(accessType0 === AccessType.SELECT) + + assert(out.isEmpty) + } + + test("InsertIntoHiveDirCommand") { + val tableDirectory = getClass.getResource("/").getPath + "table_directory" + val directory = File(tableDirectory).createDirectory() + val plan = sql( + s""" + |INSERT OVERWRITE DIRECTORY '$directory.path' + |ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' + |SELECT * FROM $reusedPartTable""".stripMargin) + .queryExecution.analyzed + val (in, out, operationType) = PrivilegesBuilder.build(plan, spark) + assert(operationType === QUERY) + assert(in.size === 1) + val po0 = in.head + assert(po0.actionType === PrivilegeObjectActionType.OTHER) + assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) assert(po0.objectName equalsIgnoreCase reusedPartTable.split("\\.").last) assert(po0.columns === Seq("key", "value", "pid")) checkTableOwner(po0) @@ -1573,7 +1621,6 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { } test("InsertIntoHiveTableCommand") { - assume(!isSparkV2) val tableName = "InsertIntoHiveTable" withTable(tableName) { _ => sql(s"CREATE TABLE $tableName (a int, b string) USING hive") @@ -1592,8 +1639,8 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.INSERT) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname equalsIgnoreCase "default") - assert(po.objectName equalsIgnoreCase tableName) + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase(tableName)(po.objectName) assert(po.columns === Seq("a", "b")) checkTableOwner(po) val accessType = ranger.AccessType(po, operationType, isInput = false) @@ -1603,7 +1650,6 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { } test("ShowCreateTableAsSerdeCommand") { - assume(!isSparkV2) withTable("ShowCreateTableAsSerdeCommand") { t => sql(s"CREATE TABLE $t (key int, pid int) USING hive PARTITIONED BY (pid)") val plan = sql(s"SHOW CREATE TABLE $t AS SERDE").queryExecution.analyzed @@ -1613,8 +1659,8 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { val po0 = in.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.dbname === "default") - assert(po0.objectName === t) + assertEqualsIgnoreCase(defaultDb)(po0.dbname) + assertEqualsIgnoreCase(t)(po0.objectName) assert(po0.columns.isEmpty) checkTableOwner(po0) val accessType0 = ranger.AccessType(po0, operationType, isInput = true) @@ -1625,7 +1671,6 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { } test("OptimizedCreateHiveTableAsSelectCommand") { - assume(!isSparkV2) val plan = sql( s"CREATE TABLE OptimizedCreateHiveTableAsSelectCommand STORED AS parquet AS SELECT 1 as a") .queryExecution.analyzed @@ -1639,12 +1684,54 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) assert(po.catalog.isEmpty) - assert(po.dbname === "default") - assert(po.objectName === "OptimizedCreateHiveTableAsSelectCommand") + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase("OptimizedCreateHiveTableAsSelectCommand")(po.objectName) assert(po.columns === Seq("a")) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.CREATE) } + + test("KYUUBI #4532: Displays the columns involved in extracting the aggregation operator") { + // case1: There is no project operator involving all columns. + val plan1 = sql(s"SELECT COUNT(key), MAX(value) FROM $reusedPartTable GROUP BY pid") + .queryExecution.optimizedPlan + val (in1, out1, _) = PrivilegesBuilder.build(plan1, spark) + assert(in1.size === 1) + assert(out1.isEmpty) + val pi1 = in1.head + assert(pi1.columns.size === 3) + assert(pi1.columns === Seq("key", "value", "pid")) + + // case2: Some columns are involved, and the group column is not selected. + val plan2 = sql(s"SELECT COUNT(key) FROM $reusedPartTable GROUP BY pid") + .queryExecution.optimizedPlan + val (in2, out2, _) = PrivilegesBuilder.build(plan2, spark) + assert(in2.size === 1) + assert(out2.isEmpty) + val pi2 = in2.head + assert(pi2.columns.size === 2) + assert(pi2.columns === Seq("key", "pid")) + + // case3: Some columns are involved, and the group column is selected. + val plan3 = sql(s"SELECT COUNT(key), pid FROM $reusedPartTable GROUP BY pid") + .queryExecution.optimizedPlan + val (in3, out3, _) = PrivilegesBuilder.build(plan3, spark) + assert(in3.size === 1) + assert(out3.isEmpty) + val pi3 = in3.head + assert(pi3.columns.size === 2) + assert(pi3.columns === Seq("key", "pid")) + + // case4: HAVING & GROUP clause + val plan4 = sql(s"SELECT COUNT(key) FROM $reusedPartTable GROUP BY pid HAVING MAX(key) > 1000") + .queryExecution.optimizedPlan + val (in4, out4, _) = PrivilegesBuilder.build(plan4, spark) + assert(in4.size === 1) + assert(out4.isEmpty) + val pi4 = in4.head + assert(pi4.columns.size === 2) + assert(pi4.columns === Seq("key", "pid")) + } } case class SimpleInsert(userSpecifiedSchema: StructType)(@transient val sparkSession: SparkSession) diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/RangerTestResources.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/RangerTestResources.scala new file mode 100644 index 00000000000..2297f73f9c4 --- /dev/null +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/RangerTestResources.scala @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.plugin.spark.authz + +object RangerTestUsers { + // authorized users used in policy generation + val admin = "admin" + val alice = "alice" + val bob = "bob" + val kent = "kent" + val permViewUser = "perm_view_user" + val ownerPlaceHolder = "{OWNER}" + val createOnlyUser = "create_only_user" + val defaultTableOwner = "default_table_owner" + val permViewOnlyUser = "user_perm_view_only" + + // non-authorized users + val invisibleUser = "i_am_invisible" + val denyUser = "denyuser" + val denyUser2 = "denyuser2" + val someone = "someone" +} + +object RangerTestNamespace { + val defaultDb = "default" + val sparkCatalog = "spark_catalog" + val icebergNamespace = "iceberg_ns" + val namespace1 = "ns1" + val namespace2 = "ns2" +} diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/SparkSessionProvider.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/SparkSessionProvider.scala index a1f2d71971f..e6f70b4d1a6 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/SparkSessionProvider.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/SparkSessionProvider.scala @@ -22,29 +22,26 @@ import java.security.PrivilegedExceptionAction import org.apache.hadoop.security.UserGroupInformation import org.apache.spark.SparkConf -import org.apache.spark.sql.{DataFrame, SparkSession, SparkSessionExtensions} +import org.apache.spark.sql.{DataFrame, Row, SparkSession, SparkSessionExtensions} +import org.scalatest.Assertions._ import org.apache.kyuubi.Utils +import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._ import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ trait SparkSessionProvider { protected val catalogImpl: String protected def format: String = if (catalogImpl == "hive") "hive" else "parquet" - protected val isSparkV2: Boolean = isSparkVersionAtMost("2.4") - protected val isSparkV31OrGreater: Boolean = isSparkVersionAtLeast("3.1") - protected val isSparkV32OrGreater: Boolean = isSparkVersionAtLeast("3.2") - protected val isSparkV33OrGreater: Boolean = isSparkVersionAtLeast("3.3") - protected val extension: SparkSessionExtensions => Unit = _ => Unit + protected val extension: SparkSessionExtensions => Unit = _ => () protected val sqlExtensions: String = "" - protected val defaultTableOwner = "default_table_owner" protected val extraSparkConf: SparkConf = new SparkConf() protected lazy val spark: SparkSession = { val metastore = { val path = Utils.createTempDir(prefix = "hms") - Files.delete(path) + Files.deleteIfExists(path) path } val ret = SparkSession.builder() @@ -82,12 +79,12 @@ trait SparkSessionProvider { f } finally { res.foreach { - case (t, "table") => doAs("admin", sql(s"DROP TABLE IF EXISTS $t")) - case (db, "database") => doAs("admin", sql(s"DROP DATABASE IF EXISTS $db")) - case (fn, "function") => doAs("admin", sql(s"DROP FUNCTION IF EXISTS $fn")) - case (view, "view") => doAs("admin", sql(s"DROP VIEW IF EXISTS $view")) + case (t, "table") => doAs(admin, sql(s"DROP TABLE IF EXISTS $t")) + case (db, "database") => doAs(admin, sql(s"DROP DATABASE IF EXISTS $db")) + case (fn, "function") => doAs(admin, sql(s"DROP FUNCTION IF EXISTS $fn")) + case (view, "view") => doAs(admin, sql(s"DROP VIEW IF EXISTS $view")) case (cacheTable, "cache") => if (isSparkV32OrGreater) { - doAs("admin", sql(s"UNCACHE TABLE IF EXISTS $cacheTable")) + doAs(admin, sql(s"UNCACHE TABLE IF EXISTS $cacheTable")) } case (_, e) => throw new RuntimeException(s"the resource whose resource type is $e cannot be cleared") @@ -95,4 +92,8 @@ trait SparkSessionProvider { } } + protected def checkAnswer(user: String, query: String, result: Seq[Row]): Unit = { + doAs(user, assert(sql(query).collect() === result)) + } + } diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/V2CommandsPrivilegesSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/V2CommandsPrivilegesSuite.scala index dede8142693..3ebea1ce9d9 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/V2CommandsPrivilegesSuite.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/V2CommandsPrivilegesSuite.scala @@ -23,8 +23,11 @@ import org.apache.hadoop.security.UserGroupInformation import org.apache.spark.sql.execution.QueryExecution import org.apache.kyuubi.plugin.spark.authz.OperationType._ +import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._ import org.apache.kyuubi.plugin.spark.authz.ranger.AccessType import org.apache.kyuubi.plugin.spark.authz.serde.{Database, DB_COMMAND_SPECS} +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ +import org.apache.kyuubi.util.AssertionUtils._ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { @@ -99,9 +102,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === table) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) assert(po.columns.isEmpty) assert(po.owner.isEmpty) val accessType = AccessType(po, operationType, isInput = false) @@ -121,9 +124,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po0 = inputs.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.catalog === None) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedTableShort) + assert(po0.catalog.isEmpty) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po0.objectName) assert(po0.columns.take(2) === Seq("key", "value")) checkTableOwner(po0) @@ -131,9 +134,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === table) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) assert(po.columns.isEmpty) assert(po.owner.isEmpty) val accessType = AccessType(po, operationType, isInput = false) @@ -154,9 +157,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === table) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) assert(po.columns.isEmpty) assert(po.owner.isEmpty) val accessType = AccessType(po, operationType, isInput = false) @@ -176,9 +179,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po0 = inputs.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.catalog === None) - assert(po0.dbname equalsIgnoreCase reusedDb) - assert(po0.objectName equalsIgnoreCase reusedTableShort) + assert(po0.catalog.isEmpty) + assertEqualsIgnoreCase(reusedDb)(po0.dbname) + assertEqualsIgnoreCase(reusedTableShort)(po0.objectName) assert(po0.columns.take(2) === Seq("key", "value")) checkTableOwner(po0) @@ -186,9 +189,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === table) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) assert(po.columns.isEmpty) assert(po.owner.isEmpty) val accessType = AccessType(po, operationType, isInput = false) @@ -207,9 +210,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.INSERT) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -229,9 +232,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.UPDATE) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -249,9 +252,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.UPDATE) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -267,9 +270,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.INSERT_OVERWRITE) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -290,9 +293,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.INSERT_OVERWRITE) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogPartTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogPartTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -315,9 +318,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogPartTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogPartTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -337,9 +340,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogPartTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogPartTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -359,9 +362,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogPartTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogPartTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -382,9 +385,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogPartTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogPartTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -403,9 +406,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -425,9 +428,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === table) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -452,9 +455,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po0 = inputs.head assert(po0.actionType === PrivilegeObjectActionType.OTHER) assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po0.catalog === Some(catalogV2)) - assert(po0.dbname === namespace) - assert(po0.objectName === catalogTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po0.catalog) + assertEqualsIgnoreCase(namespace)(po0.dbname) + assertEqualsIgnoreCase(catalogTableShort)(po0.objectName) assert(po0.columns === Seq("key", "value")) checkV2TableOwner(po0) @@ -462,9 +465,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.UPDATE) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === table) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -485,9 +488,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogPartTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogPartTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -506,9 +509,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -523,9 +526,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = inputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === catalogTableShort) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(catalogTableShort)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = true) @@ -550,9 +553,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === table) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -575,9 +578,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === table) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -600,9 +603,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === table) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -625,9 +628,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === table) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -650,9 +653,9 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val po = outputs.head assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW) - assert(po.catalog === Some(catalogV2)) - assert(po.dbname === namespace) - assert(po.objectName === table) + assertEqualsIgnoreCase(Some(catalogV2))(po.catalog) + assertEqualsIgnoreCase(namespace)(po.dbname) + assertEqualsIgnoreCase(table)(po.objectName) assert(po.columns.isEmpty) checkV2TableOwner(po) val accessType = AccessType(po, operationType, isInput = false) @@ -667,7 +670,7 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { val spec = DB_COMMAND_SPECS(plan1.getClass.getName) var db: Database = null spec.databaseDescs.find { d => - Try(db = d.extract(plan1)).isSuccess + Try { db = d.extract(plan1) }.isSuccess } withClue(sql1) { assert(db.catalog === None) @@ -688,8 +691,8 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE) assert(po.catalog.get === sparkSessionCatalogName) - assert(po.dbname === "default") - assert(po.objectName === "default") + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase(defaultDb)(po.objectName) assert(po.columns.isEmpty) } @@ -707,8 +710,8 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE) assert(po.catalog.get === sparkSessionCatalogName) - assert(po.dbname === "CreateNamespace") - assert(po.objectName === "CreateNamespace") + assertEqualsIgnoreCase("CreateNamespace")(po.dbname) + assertEqualsIgnoreCase("CreateNamespace")(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.CREATE) @@ -732,8 +735,8 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE) assert(po.catalog.get === sparkSessionCatalogName) - assert(po.dbname === "default") - assert(po.objectName === "default") + assertEqualsIgnoreCase(defaultDb)(po.dbname) + assertEqualsIgnoreCase(defaultDb)(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.ALTER) @@ -751,8 +754,8 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE) assert(po.catalog.get === sparkSessionCatalogName) - assert(po.dbname equalsIgnoreCase reusedDb) - assert(po.objectName equalsIgnoreCase reusedDb) + assertEqualsIgnoreCase(reusedDb)(po.dbname) + assertEqualsIgnoreCase(reusedDb)(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.USE) @@ -775,8 +778,8 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite { assert(po.actionType === PrivilegeObjectActionType.OTHER) assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE) assert(po.catalog.get === sparkSessionCatalogName) - assert(po.dbname === "DropNameSpace") - assert(po.objectName === "DropNameSpace") + assertEqualsIgnoreCase(db)(po.dbname) + assertEqualsIgnoreCase(db)(po.objectName) assert(po.columns.isEmpty) val accessType = ranger.AccessType(po, operationType, isInput = false) assert(accessType === AccessType.DROP) diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/V2JdbcTableCatalogPrivilegesBuilderSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/V2JdbcTableCatalogPrivilegesBuilderSuite.scala index f85689406dc..1037d9811ee 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/V2JdbcTableCatalogPrivilegesBuilderSuite.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/V2JdbcTableCatalogPrivilegesBuilderSuite.scala @@ -23,6 +23,8 @@ import scala.util.Try import org.scalatest.Outcome import org.apache.kyuubi.plugin.spark.authz.serde._ +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ +import org.apache.kyuubi.util.AssertionUtils._ class V2JdbcTableCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite { override protected val catalogImpl: String = "in-memory" @@ -77,12 +79,12 @@ class V2JdbcTableCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite val spec = TABLE_COMMAND_SPECS(plan.getClass.getName) var table: Table = null spec.tableDescs.find { d => - Try(table = d.extract(plan, spark).get).isSuccess + Try { table = d.extract(plan, spark).get }.isSuccess } withClue(str) { - assert(table.catalog === Some(catalogV2)) - assert(table.database === Some(ns1)) - assert(table.table === tbl) + assertEqualsIgnoreCase(Some(catalogV2))(table.catalog) + assertEqualsIgnoreCase(Some(ns1))(table.database) + assertEqualsIgnoreCase(tbl)(table.table) assert(table.owner.isEmpty) } } @@ -102,12 +104,12 @@ class V2JdbcTableCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite val spec = TABLE_COMMAND_SPECS(plan.getClass.getName) var table: Table = null spec.tableDescs.find { d => - Try(table = d.extract(plan, spark).get).isSuccess + Try { table = d.extract(plan, spark).get }.isSuccess } withClue(sql1) { - assert(table.catalog === Some(catalogV2)) - assert(table.database === Some(ns1)) - assert(table.table === tbl) + assertEqualsIgnoreCase(Some(catalogV2))(table.catalog) + assertEqualsIgnoreCase(Some(ns1))(table.database) + assertEqualsIgnoreCase(tbl)(table.table) assert(table.owner.isEmpty) } } @@ -125,11 +127,11 @@ class V2JdbcTableCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite val plan = executePlan(sql1).analyzed val spec = TABLE_COMMAND_SPECS(plan.getClass.getName) var table: Table = null - spec.tableDescs.find { d => Try(table = d.extract(plan, spark).get).isSuccess } + spec.tableDescs.find { d => Try { table = d.extract(plan, spark).get }.isSuccess } withClue(sql1) { - assert(table.catalog === Some(catalogV2)) - assert(table.database === Some(ns1)) - assert(table.table === tbl) + assertEqualsIgnoreCase(Some(catalogV2))(table.catalog) + assertEqualsIgnoreCase(Some(ns1))(table.database) + assertEqualsIgnoreCase(tbl)(table.table) assert(table.owner.isEmpty) } } @@ -144,11 +146,11 @@ class V2JdbcTableCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite val spec = DB_COMMAND_SPECS(plan.getClass.getName) var db: Database = null spec.databaseDescs.find { d => - Try(db = d.extract(plan)).isSuccess + Try { db = d.extract(plan) }.isSuccess } withClue(sql) { - assert(db.catalog === Some(catalogV2)) - assert(db.database === ns1) + assertEqualsIgnoreCase(Some(catalogV2))(db.catalog) + assertEqualsIgnoreCase(ns1)(db.database) } } @@ -163,11 +165,11 @@ class V2JdbcTableCatalogPrivilegesBuilderSuite extends V2CommandsPrivilegesSuite val spec = DB_COMMAND_SPECS(plan.getClass.getName) var db: Database = null spec.databaseDescs.find { d => - Try(db = d.extract(plan)).isSuccess + Try { db = d.extract(plan) }.isSuccess } withClue(sql1) { - assert(db.catalog === Some(catalogV2)) - assert(db.database === ns1) + assertEqualsIgnoreCase(Some(catalogV2))(db.catalog) + assertEqualsIgnoreCase(ns1)(db.database) } } } diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/DatabaseCommands.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/DatabaseCommands.scala index e947579e9f7..a61c142edb5 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/DatabaseCommands.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/DatabaseCommands.scala @@ -58,9 +58,10 @@ object DatabaseCommands { "namespace", classOf[StringSeqDatabaseExtractor], catalogDesc = Some(CatalogDesc())) + val databaseDesc3 = DatabaseDesc("name", classOf[ResolvedNamespaceDatabaseExtractor]) DatabaseCommandSpec( "org.apache.spark.sql.catalyst.plans.logical.CreateNamespace", - Seq(databaseDesc1, databaseDesc2), + Seq(databaseDesc1, databaseDesc2, databaseDesc3), CREATEDATABASE) } @@ -97,12 +98,12 @@ object DatabaseCommands { val SetCatalogAndNamespace = { val cmd = "org.apache.spark.sql.catalyst.plans.logical.SetCatalogAndNamespace" - val databaseDesc1 = + val resolvedDbObjectDatabaseDesc = DatabaseDesc( "child", classOf[ResolvedDBObjectNameDatabaseExtractor], isInput = true) - val databaseDesc2 = + val stringSeqOptionDatabaseDesc = DatabaseDesc( "namespace", classOf[StringSeqOptionDatabaseExtractor], @@ -110,7 +111,15 @@ object DatabaseCommands { fieldName = "catalogName", fieldExtractor = classOf[StringOptionCatalogExtractor])), isInput = true) - DatabaseCommandSpec(cmd, Seq(databaseDesc1, databaseDesc2), SWITCHDATABASE) + val resolvedNamespaceDatabaseDesc = + DatabaseDesc( + "child", + classOf[ResolvedNamespaceDatabaseExtractor], + isInput = true) + DatabaseCommandSpec( + cmd, + Seq(resolvedNamespaceDatabaseDesc, resolvedDbObjectDatabaseDesc, stringSeqOptionDatabaseDesc), + SWITCHDATABASE) } val SetNamespace = { diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/FunctionCommands.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/FunctionCommands.scala index 46c7f0efac5..1822e80fc8a 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/FunctionCommands.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/FunctionCommands.scala @@ -35,8 +35,12 @@ object FunctionCommands { "functionName", classOf[StringFunctionExtractor], Some(databaseDesc), - Some(functionTypeDesc)) - FunctionCommandSpec(cmd, Seq(functionDesc), CREATEFUNCTION) + functionTypeDesc = Some(functionTypeDesc)) + val functionIdentifierDesc = FunctionDesc( + "identifier", + classOf[FunctionIdentifierFunctionExtractor], + functionTypeDesc = Some(functionTypeDesc)) + FunctionCommandSpec(cmd, Seq(functionIdentifierDesc, functionDesc), CREATEFUNCTION) } val DescribeFunction = { diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/IcebergCommands.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/IcebergCommands.scala index 208e73c51b3..355143c402c 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/IcebergCommands.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/IcebergCommands.scala @@ -17,6 +17,7 @@ package org.apache.kyuubi.plugin.spark.authz.gen +import org.apache.kyuubi.plugin.spark.authz.OperationType import org.apache.kyuubi.plugin.spark.authz.PrivilegeObjectActionType._ import org.apache.kyuubi.plugin.spark.authz.serde._ @@ -49,7 +50,14 @@ object IcebergCommands { TableCommandSpec(cmd, Seq(tableDesc), queryDescs = Seq(queryDesc)) } + val CallProcedure = { + val cmd = "org.apache.spark.sql.catalyst.plans.logical.Call" + val td = TableDesc("args", classOf[ExpressionSeqTableExtractor]) + TableCommandSpec(cmd, Seq(td), opType = OperationType.ALTERTABLE_PROPERTIES) + } + val data: Array[TableCommandSpec] = Array( + CallProcedure, DeleteFromIcebergTable, UpdateIcebergTable, MergeIntoIcebergTable, diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala index 7c7ed138b27..855e25e87ea 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala @@ -18,37 +18,62 @@ package org.apache.kyuubi.plugin.spark.authz.gen import java.nio.charset.StandardCharsets -import java.nio.file.{Files, Paths} +import java.nio.file.{Files, Paths, StandardOpenOption} + +//scalastyle:off +import org.scalatest.funsuite.AnyFunSuite import org.apache.kyuubi.plugin.spark.authz.serde.{mapper, CommandSpec} +import org.apache.kyuubi.util.AssertionUtils._ /** * Generates the default command specs to src/main/resources dir. * - * Usage: - * mvn scala:run -DmainClass=this class -pl :kyuubi-spark-authz_2.12 + * To run the test suite: + * {{{ + * KYUUBI_UPDATE=0 dev/gen/gen_ranger_spec_json.sh + * }}} + * + * To regenerate the ranger policy file: + * {{{ + * dev/gen/gen_ranger_spec_json.sh + * }}} */ -object JsonSpecFileGenerator { - - def main(args: Array[String]): Unit = { +class JsonSpecFileGenerator extends AnyFunSuite { + // scalastyle:on + test("check spec json files") { writeCommandSpecJson("database", DatabaseCommands.data) writeCommandSpecJson("table", TableCommands.data ++ IcebergCommands.data) writeCommandSpecJson("function", FunctionCommands.data) writeCommandSpecJson("scan", Scans.data) } - def writeCommandSpecJson[T <: CommandSpec](commandType: String, specArr: Array[T]): Unit = { + def writeCommandSpecJson[T <: CommandSpec]( + commandType: String, + specArr: Array[T]): Unit = { val pluginHome = getClass.getProtectionDomain.getCodeSource.getLocation.getPath .split("target").head val filename = s"${commandType}_command_spec.json" - val writer = { - val p = Paths.get(pluginHome, "src", "main", "resources", filename) - Files.newBufferedWriter(p, StandardCharsets.UTF_8) + val filePath = Paths.get(pluginHome, "src", "main", "resources", filename) + + val generatedStr = mapper.writerWithDefaultPrettyPrinter() + .writeValueAsString(specArr.sortBy(_.classname)) + + if (sys.env.get("KYUUBI_UPDATE").contains("1")) { + // scalastyle:off println + println(s"writing ${specArr.length} specs to $filename") + // scalastyle:on println + Files.write( + filePath, + generatedStr.getBytes(StandardCharsets.UTF_8), + StandardOpenOption.CREATE, + StandardOpenOption.TRUNCATE_EXISTING) + } else { + assertFileContent( + filePath, + Seq(generatedStr), + "dev/gen/gen_ranger_spec_json.sh", + splitFirstExpectedLine = true) } - // scalastyle:off println - println(s"writing ${specArr.length} specs to $filename") - // scalastyle:on println - mapper.writerWithDefaultPrettyPrinter().writeValue(writer, specArr.sortBy(_.classname)) - writer.close() } } diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/Scans.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/Scans.scala index 7bd8260bba5..b2c1868a26d 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/Scans.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/Scans.scala @@ -18,6 +18,7 @@ package org.apache.kyuubi.plugin.spark.authz.gen import org.apache.kyuubi.plugin.spark.authz.serde._ +import org.apache.kyuubi.plugin.spark.authz.serde.FunctionType._ object Scans { @@ -57,9 +58,34 @@ object Scans { ScanSpec(r, Seq(tableDesc)) } + val HiveSimpleUDF = { + ScanSpec( + "org.apache.spark.sql.hive.HiveSimpleUDF", + Seq.empty, + Seq(FunctionDesc( + "name", + classOf[QualifiedNameStringFunctionExtractor], + functionTypeDesc = Some(FunctionTypeDesc( + "name", + classOf[FunctionNameFunctionTypeExtractor], + Seq(TEMP, SYSTEM))), + isInput = true))) + } + + val HiveGenericUDF = HiveSimpleUDF.copy(classname = "org.apache.spark.sql.hive.HiveGenericUDF") + + val HiveUDAFFunction = HiveSimpleUDF.copy(classname = + "org.apache.spark.sql.hive.HiveUDAFFunction") + + val HiveGenericUDTF = HiveSimpleUDF.copy(classname = "org.apache.spark.sql.hive.HiveGenericUDTF") + val data: Array[ScanSpec] = Array( HiveTableRelation, LogicalRelation, DataSourceV2Relation, - PermanentViewMarker) + PermanentViewMarker, + HiveSimpleUDF, + HiveGenericUDF, + HiveUDAFFunction, + HiveGenericUDTF) } diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala index a8b8121e2b0..ca2ee92948e 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala @@ -30,6 +30,8 @@ object TableCommands { val resolvedTableDesc = TableDesc("child", classOf[ResolvedTableTableExtractor]) val resolvedDbObjectNameDesc = TableDesc("child", classOf[ResolvedDbObjectNameTableExtractor]) + val resolvedIdentifierTableDesc = + TableDesc("child", classOf[ResolvedIdentifierTableExtractor]) val overwriteActionTypeDesc = ActionTypeDesc("overwrite", classOf[OverwriteOrInsertActionTypeExtractor]) val queryQueryDesc = QueryDesc("query") @@ -179,7 +181,8 @@ object TableCommands { val cd2 = cd1.copy(fieldExtractor = classOf[StringSeqOptionColumnExtractor]) val td1 = tableIdentDesc.copy(columnDesc = Some(cd1), isInput = true) val td2 = td1.copy(columnDesc = Some(cd2)) - TableCommandSpec(cmd, Seq(td1, td2), ANALYZE_TABLE) + // AnalyzeColumn will update table properties, here we use ALTERTABLE_PROPERTIES + TableCommandSpec(cmd, Seq(tableIdentDesc, td1, td2), ALTERTABLE_PROPERTIES) } val AnalyzePartition = { @@ -187,16 +190,18 @@ object TableCommands { val columnDesc = ColumnDesc("partitionSpec", classOf[PartitionColumnExtractor]) TableCommandSpec( cmd, - Seq(tableIdentDesc.copy(columnDesc = Some(columnDesc), isInput = true)), - ANALYZE_TABLE) + // AnalyzePartition will update table properties, here we use ALTERTABLE_PROPERTIES + Seq(tableIdentDesc, tableIdentDesc.copy(columnDesc = Some(columnDesc), isInput = true)), + ALTERTABLE_PROPERTIES) } val AnalyzeTable = { val cmd = "org.apache.spark.sql.execution.command.AnalyzeTableCommand" TableCommandSpec( cmd, - Seq(tableIdentDesc.copy(isInput = true)), - ANALYZE_TABLE) + // AnalyzeTable will update table properties, here we use ALTERTABLE_PROPERTIES + Seq(tableIdentDesc, tableIdentDesc.copy(isInput = true)), + ALTERTABLE_PROPERTIES) } val CreateTableV2 = { @@ -205,7 +210,10 @@ object TableCommands { "tableName", classOf[IdentifierTableExtractor], catalogDesc = Some(CatalogDesc())) - TableCommandSpec(cmd, Seq(tableDesc, resolvedDbObjectNameDesc), CREATETABLE) + TableCommandSpec( + cmd, + Seq(resolvedIdentifierTableDesc, tableDesc, resolvedDbObjectNameDesc), + CREATETABLE) } val CreateV2Table = { @@ -225,7 +233,10 @@ object TableCommands { catalogDesc = Some(CatalogDesc())) TableCommandSpec( cmd, - Seq(tableDesc, resolvedDbObjectNameDesc.copy(fieldName = "left")), + Seq( + resolvedIdentifierTableDesc.copy(fieldName = "left"), + tableDesc, + resolvedDbObjectNameDesc.copy(fieldName = "left")), CREATETABLE_AS_SELECT, Seq(queryQueryDesc)) } @@ -438,8 +449,7 @@ object TableCommands { val DropTableV2 = { val cmd = "org.apache.spark.sql.catalyst.plans.logical.DropTable" - val tableDesc1 = resolvedTableDesc - TableCommandSpec(cmd, Seq(tableDesc1), DROPTABLE) + TableCommandSpec(cmd, Seq(resolvedIdentifierTableDesc, resolvedTableDesc), DROPTABLE) } val MergeIntoTable = { @@ -600,8 +610,6 @@ object TableCommands { AnalyzeColumn, AnalyzePartition, AnalyzeTable, - AnalyzeTable.copy(classname = - "org.apache.spark.sql.execution.command.AnalyzeTablesCommand"), AppendDataV2, CacheTable, CacheTableAsSelect, @@ -637,7 +645,7 @@ object TableCommands { "org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand"), InsertIntoHadoopFsRelationCommand, InsertIntoDataSourceDir.copy(classname = - "org.apache.spark.sql.execution.datasources.InsertIntoHiveDirCommand"), + "org.apache.spark.sql.hive.execution.InsertIntoHiveDirCommand"), InsertIntoHiveTable, LoadData, MergeIntoTable, diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/IcebergCatalogRangerSparkExtensionSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/IcebergCatalogRangerSparkExtensionSuite.scala index 909c26d36df..55fde3b685b 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/IcebergCatalogRangerSparkExtensionSuite.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/IcebergCatalogRangerSparkExtensionSuite.scala @@ -19,27 +19,40 @@ package org.apache.kyuubi.plugin.spark.authz.ranger // scalastyle:off import scala.util.Try +import org.scalatest.Outcome + import org.apache.kyuubi.Utils import org.apache.kyuubi.plugin.spark.authz.AccessControlException +import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._ +import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._ +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ +import org.apache.kyuubi.tags.IcebergTest +import org.apache.kyuubi.util.AssertionUtils._ /** * Tests for RangerSparkExtensionSuite * on Iceberg catalog with DataSource V2 API. */ +@IcebergTest class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite { override protected val catalogImpl: String = "hive" override protected val sqlExtensions: String = - if (isSparkV32OrGreater) + if (isSparkV31OrGreater) "org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions" else "" val catalogV2 = "local" - val namespace1 = "iceberg_ns" + val namespace1 = icebergNamespace val table1 = "table1" val outputTable1 = "outputTable1" + override def withFixture(test: NoArgTest): Outcome = { + assume(isSparkV31OrGreater) + test() + } + override def beforeAll(): Unit = { - if (isSparkV32OrGreater) { + if (isSparkV31OrGreater) { spark.conf.set( s"spark.sql.catalog.$catalogV2", "org.apache.iceberg.spark.SparkCatalog") @@ -50,18 +63,18 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite super.beforeAll() - doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $catalogV2.$namespace1")) + doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $catalogV2.$namespace1")) doAs( - "admin", + admin, sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$table1" + " (id int, name string, city string) USING iceberg")) doAs( - "admin", + admin, sql(s"INSERT INTO $catalogV2.$namespace1.$table1" + " (id , name , city ) VALUES (1, 'liangbowen','Guangzhou')")) doAs( - "admin", + admin, sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$outputTable1" + " (id int, name string, city string) USING iceberg")) } @@ -74,8 +87,6 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite } test("[KYUUBI #3515] MERGE INTO") { - assume(isSparkV32OrGreater) - val mergeIntoSql = s""" |MERGE INTO $catalogV2.$namespace1.$outputTable1 AS target @@ -88,65 +99,52 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite // MergeIntoTable: Using a MERGE INTO Statement val e1 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(mergeIntoSql))) assert(e1.getMessage.contains(s"does not have [select] privilege" + s" on [$namespace1/$table1/id]")) - try { - SparkRangerAdminPlugin.getRangerConf.setBoolean( - s"ranger.plugin.${SparkRangerAdminPlugin.getServiceType}.authorize.in.single.call", - true) + withSingleCallEnabled { val e2 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(mergeIntoSql))) assert(e2.getMessage.contains(s"does not have" + s" [select] privilege" + s" on [$namespace1/$table1/id,$namespace1/table1/name,$namespace1/$table1/city]," + s" [update] privilege on [$namespace1/$outputTable1]")) - } finally { - SparkRangerAdminPlugin.getRangerConf.setBoolean( - s"ranger.plugin.${SparkRangerAdminPlugin.getServiceType}.authorize.in.single.call", - false) } - doAs("admin", sql(mergeIntoSql)) + doAs(admin, sql(mergeIntoSql)) } test("[KYUUBI #3515] UPDATE TABLE") { - assume(isSparkV32OrGreater) - // UpdateTable val e1 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"UPDATE $catalogV2.$namespace1.$table1 SET city='Guangzhou' " + " WHERE id=1"))) assert(e1.getMessage.contains(s"does not have [update] privilege" + s" on [$namespace1/$table1]")) doAs( - "admin", + admin, sql(s"UPDATE $catalogV2.$namespace1.$table1 SET city='Guangzhou' " + " WHERE id=1")) } test("[KYUUBI #3515] DELETE FROM TABLE") { - assume(isSparkV32OrGreater) - // DeleteFromTable val e6 = intercept[AccessControlException]( - doAs("someone", sql(s"DELETE FROM $catalogV2.$namespace1.$table1 WHERE id=2"))) + doAs(someone, sql(s"DELETE FROM $catalogV2.$namespace1.$table1 WHERE id=2"))) assert(e6.getMessage.contains(s"does not have [update] privilege" + s" on [$namespace1/$table1]")) - doAs("admin", sql(s"DELETE FROM $catalogV2.$namespace1.$table1 WHERE id=2")) + doAs(admin, sql(s"DELETE FROM $catalogV2.$namespace1.$table1 WHERE id=2")) } test("[KYUUBI #3666] Support {OWNER} variable for queries run on CatalogV2") { - assume(isSparkV32OrGreater) - val table = "owner_variable" val select = s"SELECT key FROM $catalogV2.$namespace1.$table" @@ -164,7 +162,7 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite }.isSuccess)) doAs( - "create_only_user", { + createOnlyUser, { val e = intercept[AccessControlException](sql(select).collect()) assert(e.getMessage === errorMessage("select", s"$namespace1/$table/key")) }) @@ -179,17 +177,17 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite (s"$catalogV2.default.src", "table"), (s"$catalogV2.default.outputTable2", "table"))) { doAs( - "admin", + admin, sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.default.src" + " (id int, name string, key string) USING iceberg")) doAs( - "admin", + admin, sql(s"INSERT INTO $catalogV2.default.src" + " (id , name , key ) VALUES " + "(1, 'liangbowen1','10')" + ", (2, 'liangbowen2','20')")) doAs( - "admin", + admin, sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$outputTable2" + " (id int, name string, key string) USING iceberg")) @@ -201,20 +199,20 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite |WHEN NOT MATCHED THEN INSERT (id, name, key) VALUES (source.id, source.name, source.key) """.stripMargin - doAs("admin", sql(mergeIntoSql)) + doAs(admin, sql(mergeIntoSql)) doAs( - "admin", { + admin, { val countOutputTable = sql(s"select count(1) from $catalogV2.$namespace1.$outputTable2").collect() val rowCount = countOutputTable(0).get(0) assert(rowCount === 2) }) - doAs("admin", sql(s"truncate table $catalogV2.$namespace1.$outputTable2")) + doAs(admin, sql(s"truncate table $catalogV2.$namespace1.$outputTable2")) // source table with row filter `key`<20 - doAs("bob", sql(mergeIntoSql)) + doAs(bob, sql(mergeIntoSql)) doAs( - "admin", { + admin, { val countOutputTable = sql(s"select count(1) from $catalogV2.$namespace1.$outputTable2").collect() val rowCount = countOutputTable(0).get(0) @@ -224,11 +222,68 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite } test("[KYUUBI #4255] DESCRIBE TABLE") { - assume(isSparkV32OrGreater) val e1 = intercept[AccessControlException]( - doAs("someone", sql(s"DESCRIBE TABLE $catalogV2.$namespace1.$table1").explain())) + doAs(someone, sql(s"DESCRIBE TABLE $catalogV2.$namespace1.$table1").explain())) assert(e1.getMessage.contains(s"does not have [select] privilege" + s" on [$namespace1/$table1]")) } + test("CALL RewriteDataFilesProcedure") { + val tableName = "table_select_call_command_table" + val table = s"$catalogV2.$namespace1.$tableName" + val initDataFilesCount = 2 + val rewriteDataFiles1 = s"CALL $catalogV2.system.rewrite_data_files " + + s"(table => '$table', options => map('min-input-files','$initDataFilesCount'))" + val rewriteDataFiles2 = s"CALL $catalogV2.system.rewrite_data_files " + + s"(table => '$table', options => map('min-input-files','${initDataFilesCount + 1}'))" + + withCleanTmpResources(Seq((table, "table"))) { + doAs( + admin, { + sql(s"CREATE TABLE IF NOT EXISTS $table (id int, name string) USING iceberg") + // insert 2 data files + (0 until initDataFilesCount) + .foreach(i => sql(s"INSERT INTO $table VALUES ($i, 'user_$i')")) + }) + + interceptContains[AccessControlException](doAs(someone, sql(rewriteDataFiles1)))( + s"does not have [alter] privilege on [$namespace1/$tableName]") + interceptContains[AccessControlException](doAs(someone, sql(rewriteDataFiles2)))( + s"does not have [alter] privilege on [$namespace1/$tableName]") + + /** + * Case 1: Number of input data files equals or greater than minimum expected. + * Two logical plans triggered + * when ( input-files(2) >= min-input-files(2) ): + * + * == Physical Plan 1 == + * Call (1) + * + * == Physical Plan 2 == + * AppendData (3) + * +- * ColumnarToRow (2) + * +- BatchScan local.iceberg_ns.call_command_table (1) + */ + doAs( + admin, { + val result1 = sql(rewriteDataFiles1).collect() + // rewritten results into 2 data files + assert(result1(0)(0) === initDataFilesCount) + }) + + /** + * Case 2: Number of input data files less than minimum expected. + * Only one logical plan triggered + * when ( input-files(2) < min-input-files(3) ) + * + * == Physical Plan == + * Call (1) + */ + doAs( + admin, { + val result2 = sql(rewriteDataFiles2).collect() + assert(result2(0)(0) === 0) + }) + } + } } diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerLocalClient.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerLocalClient.scala index d25ea716a95..d7473a58065 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerLocalClient.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerLocalClient.scala @@ -19,6 +19,7 @@ package org.apache.kyuubi.plugin.spark.authz.ranger import java.text.SimpleDateFormat +import com.fasterxml.jackson.databind.DeserializationFeature import com.fasterxml.jackson.databind.json.JsonMapper import org.apache.ranger.admin.client.RangerAdminRESTClient import org.apache.ranger.plugin.util.ServicePolicies @@ -27,6 +28,7 @@ class RangerLocalClient extends RangerAdminRESTClient with RangerClientHelper { private val mapper = new JsonMapper() .setDateFormat(new SimpleDateFormat("yyyyMMdd-HH:mm:ss.SSS-Z")) + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) private val policies: ServicePolicies = { val loader = Thread.currentThread().getContextClassLoader diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerSparkExtensionSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerSparkExtensionSuite.scala index 48f3742556e..0c307195cee 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerSparkExtensionSuite.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerSparkExtensionSuite.scala @@ -20,7 +20,7 @@ package org.apache.kyuubi.plugin.spark.authz.ranger import scala.util.Try import org.apache.hadoop.security.UserGroupInformation -import org.apache.spark.sql.{Row, SparkSessionExtensions} +import org.apache.spark.sql.SparkSessionExtensions import org.apache.spark.sql.catalyst.analysis.NoSuchTableException import org.apache.spark.sql.catalyst.catalog.HiveTableRelation import org.apache.spark.sql.catalyst.plans.logical.Statistics @@ -31,9 +31,11 @@ import org.scalatest.BeforeAndAfterAll import org.scalatest.funsuite.AnyFunSuite import org.apache.kyuubi.plugin.spark.authz.{AccessControlException, SparkSessionProvider} +import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._ +import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._ import org.apache.kyuubi.plugin.spark.authz.ranger.RuleAuthorization.KYUUBI_AUTHZ_TAG -import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils.getFieldVal - +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ +import org.apache.kyuubi.util.reflect.ReflectUtils._ abstract class RangerSparkExtensionSuite extends AnyFunSuite with SparkSessionProvider with BeforeAndAfterAll { // scalastyle:on @@ -87,8 +89,23 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite } } + /** + * Enables authorizing in single call mode, + * and disables authorizing in single call mode after calling `f` + */ + protected def withSingleCallEnabled(f: => Unit): Unit = { + val singleCallConfig = + s"ranger.plugin.${SparkRangerAdminPlugin.getServiceType}.authorize.in.single.call" + try { + SparkRangerAdminPlugin.getRangerConf.setBoolean(singleCallConfig, true) + f + } finally { + SparkRangerAdminPlugin.getRangerConf.setBoolean(singleCallConfig, false) + } + } + test("[KYUUBI #3226] RuleAuthorization: Should check privileges once only.") { - val logicalPlan = doAs("admin", sql("SHOW TABLES").queryExecution.logical) + val logicalPlan = doAs(admin, sql("SHOW TABLES").queryExecution.logical) val rule = new RuleAuthorization(spark) (1 until 10).foreach { i => @@ -116,7 +133,7 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite withCleanTmpResources(Seq((testTable, "table"))) { // create tmp table doAs( - "admin", { + admin, { sql(create) // session1: first query, should auth once.[LogicalRelation] @@ -155,18 +172,18 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite val e = intercept[AccessControlException](sql(create)) assert(e.getMessage === errorMessage("create", "mydb")) withCleanTmpResources(Seq((testDb, "database"))) { - doAs("admin", assert(Try { sql(create) }.isSuccess)) - doAs("admin", assert(Try { sql(alter) }.isSuccess)) + doAs(admin, assert(Try { sql(create) }.isSuccess)) + doAs(admin, assert(Try { sql(alter) }.isSuccess)) val e1 = intercept[AccessControlException](sql(alter)) assert(e1.getMessage === errorMessage("alter", "mydb")) val e2 = intercept[AccessControlException](sql(drop)) assert(e2.getMessage === errorMessage("drop", "mydb")) - doAs("kent", Try(sql("SHOW DATABASES")).isSuccess) + doAs(kent, Try(sql("SHOW DATABASES")).isSuccess) } } test("auth: tables") { - val db = "default" + val db = defaultDb val table = "src" val col = "key" @@ -178,14 +195,14 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite assert(e.getMessage === errorMessage("create")) withCleanTmpResources(Seq((s"$db.$table", "table"))) { - doAs("bob", assert(Try { sql(create0) }.isSuccess)) - doAs("bob", assert(Try { sql(alter0) }.isSuccess)) + doAs(bob, assert(Try { sql(create0) }.isSuccess)) + doAs(bob, assert(Try { sql(alter0) }.isSuccess)) val e1 = intercept[AccessControlException](sql(drop0)) assert(e1.getMessage === errorMessage("drop")) - doAs("bob", assert(Try { sql(alter0) }.isSuccess)) - doAs("bob", assert(Try { sql(select).collect() }.isSuccess)) - doAs("kent", assert(Try { sql(s"SELECT key FROM $db.$table").collect() }.isSuccess)) + doAs(bob, assert(Try { sql(alter0) }.isSuccess)) + doAs(bob, assert(Try { sql(select).collect() }.isSuccess)) + doAs(kent, assert(Try { sql(s"SELECT key FROM $db.$table").collect() }.isSuccess)) Seq( select, @@ -196,10 +213,10 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite s"SELECT key FROM $db.$table WHERE value in (SELECT value as key FROM $db.$table)") .foreach { q => doAs( - "kent", { + kent, { withClue(q) { val e = intercept[AccessControlException](sql(q).collect()) - assert(e.getMessage === errorMessage("select", "default/src/value", "kent")) + assert(e.getMessage === errorMessage("select", "default/src/value", kent)) } }) } @@ -207,92 +224,15 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite } test("auth: functions") { - val db = "default" + val db = defaultDb val func = "func" val create0 = s"CREATE FUNCTION IF NOT EXISTS $db.$func AS 'abc.mnl.xyz'" doAs( - "kent", { + kent, { val e = intercept[AccessControlException](sql(create0)) assert(e.getMessage === errorMessage("create", "default/func")) }) - doAs("admin", assert(Try(sql(create0)).isSuccess)) - } - - test("row level filter") { - val db = "default" - val table = "src" - val col = "key" - val create = s"CREATE TABLE IF NOT EXISTS $db.$table ($col int, value int) USING $format" - - withCleanTmpResources(Seq((s"$db.${table}2", "table"), (s"$db.$table", "table"))) { - doAs("admin", assert(Try { sql(create) }.isSuccess)) - doAs("admin", sql(s"INSERT INTO $db.$table SELECT 1, 1")) - doAs("admin", sql(s"INSERT INTO $db.$table SELECT 20, 2")) - doAs("admin", sql(s"INSERT INTO $db.$table SELECT 30, 3")) - - doAs( - "kent", - assert(sql(s"SELECT key FROM $db.$table order by key").collect() === - Seq(Row(1), Row(20), Row(30)))) - - Seq( - s"SELECT value FROM $db.$table", - s"SELECT value as key FROM $db.$table", - s"SELECT max(value) FROM $db.$table", - s"SELECT coalesce(max(value), 1) FROM $db.$table", - s"SELECT value FROM $db.$table WHERE value in (SELECT value as key FROM $db.$table)") - .foreach { q => - doAs( - "bob", { - withClue(q) { - assert(sql(q).collect() === Seq(Row(1))) - } - }) - } - doAs( - "bob", { - sql(s"CREATE TABLE $db.src2 using $format AS SELECT value FROM $db.$table") - assert(sql(s"SELECT value FROM $db.${table}2").collect() === Seq(Row(1))) - }) - } - } - - test("[KYUUBI #3581]: row level filter on permanent view") { - assume(isSparkV31OrGreater) - - val db = "default" - val table = "src" - val permView = "perm_view" - val col = "key" - val create = s"CREATE TABLE IF NOT EXISTS $db.$table ($col int, value int) USING $format" - val createView = - s"CREATE OR REPLACE VIEW $db.$permView" + - s" AS SELECT * FROM $db.$table" - - withCleanTmpResources(Seq( - (s"$db.$table", "table"), - (s"$db.$permView", "view"))) { - doAs("admin", assert(Try { sql(create) }.isSuccess)) - doAs("admin", assert(Try { sql(createView) }.isSuccess)) - doAs("admin", sql(s"INSERT INTO $db.$table SELECT 1, 1")) - doAs("admin", sql(s"INSERT INTO $db.$table SELECT 20, 2")) - doAs("admin", sql(s"INSERT INTO $db.$table SELECT 30, 3")) - - Seq( - s"SELECT value FROM $db.$permView", - s"SELECT value as key FROM $db.$permView", - s"SELECT max(value) FROM $db.$permView", - s"SELECT coalesce(max(value), 1) FROM $db.$permView", - s"SELECT value FROM $db.$permView WHERE value in (SELECT value as key FROM $db.$permView)") - .foreach { q => - doAs( - "perm_view_user", { - withClue(q) { - assert(sql(q).collect() === Seq(Row(1))) - } - }) - } - } + doAs(admin, assert(Try(sql(create0)).isSuccess)) } test("show tables") { @@ -303,13 +243,14 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite (s"$db.$table", "table"), (s"$db.${table}for_show", "table"), (s"$db", "database"))) { - doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $db")) - doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.$table (key int) USING $format")) - doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.${table}for_show (key int) USING $format")) - - doAs("admin", assert(sql(s"show tables from $db").collect().length === 2)) - doAs("bob", assert(sql(s"show tables from $db").collect().length === 0)) - doAs("i_am_invisible", assert(sql(s"show tables from $db").collect().length === 0)) + doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $db")) + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.$table (key int) USING $format")) + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.${table}for_show (key int) USING $format")) + + doAs(admin, assert(sql(s"show tables from $db").collect().length === 2)) + doAs(bob, assert(sql(s"show tables from $db").collect().length === 0)) + doAs(invisibleUser, assert(sql(s"show tables from $db").collect().length === 0)) + doAs(invisibleUser, assert(sql(s"show tables from $db").limit(1).isEmpty)) } } @@ -317,18 +258,19 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite val db = "default2" withCleanTmpResources(Seq((db, "database"))) { - doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $db")) - doAs("admin", assert(sql(s"SHOW DATABASES").collect().length == 2)) - doAs("admin", assert(sql(s"SHOW DATABASES").collectAsList().get(0).getString(0) == "default")) - doAs("admin", assert(sql(s"SHOW DATABASES").collectAsList().get(1).getString(0) == s"$db")) - - doAs("bob", assert(sql(s"SHOW DATABASES").collect().length == 1)) - doAs("bob", assert(sql(s"SHOW DATABASES").collectAsList().get(0).getString(0) == "default")) + doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $db")) + doAs(admin, assert(sql(s"SHOW DATABASES").collect().length == 2)) + doAs(admin, assert(sql(s"SHOW DATABASES").collectAsList().get(0).getString(0) == defaultDb)) + doAs(admin, assert(sql(s"SHOW DATABASES").collectAsList().get(1).getString(0) == s"$db")) + + doAs(bob, assert(sql(s"SHOW DATABASES").collect().length == 1)) + doAs(bob, assert(sql(s"SHOW DATABASES").collectAsList().get(0).getString(0) == defaultDb)) + doAs(invisibleUser, assert(sql(s"SHOW DATABASES").limit(1).isEmpty)) } } test("show functions") { - val default = "default" + val default = defaultDb val db3 = "default3" val function1 = "function1" @@ -336,41 +278,41 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite (s"$default.$function1", "function"), (s"$db3.$function1", "function"), (db3, "database"))) { - doAs("admin", sql(s"CREATE FUNCTION $function1 AS 'Function1'")) - doAs("admin", assert(sql(s"show user functions $default.$function1").collect().length == 1)) - doAs("bob", assert(sql(s"show user functions $default.$function1").collect().length == 0)) + doAs(admin, sql(s"CREATE FUNCTION $function1 AS 'Function1'")) + doAs(admin, assert(sql(s"show user functions $default.$function1").collect().length == 1)) + doAs(bob, assert(sql(s"show user functions $default.$function1").collect().length == 0)) - doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $db3")) - doAs("admin", sql(s"CREATE FUNCTION $db3.$function1 AS 'Function1'")) + doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $db3")) + doAs(admin, sql(s"CREATE FUNCTION $db3.$function1 AS 'Function1'")) - doAs("admin", assert(sql(s"show user functions $db3.$function1").collect().length == 1)) - doAs("bob", assert(sql(s"show user functions $db3.$function1").collect().length == 0)) + doAs(admin, assert(sql(s"show user functions $db3.$function1").collect().length == 1)) + doAs(bob, assert(sql(s"show user functions $db3.$function1").collect().length == 0)) - doAs("admin", assert(sql(s"show system functions").collect().length > 0)) - doAs("bob", assert(sql(s"show system functions").collect().length > 0)) + doAs(admin, assert(sql(s"show system functions").collect().length > 0)) + doAs(bob, assert(sql(s"show system functions").collect().length > 0)) - val adminSystemFunctionCount = doAs("admin", sql(s"show system functions").collect().length) - val bobSystemFunctionCount = doAs("bob", sql(s"show system functions").collect().length) + val adminSystemFunctionCount = doAs(admin, sql(s"show system functions").collect().length) + val bobSystemFunctionCount = doAs(bob, sql(s"show system functions").collect().length) assert(adminSystemFunctionCount == bobSystemFunctionCount) } } test("show columns") { - val db = "default" + val db = defaultDb val table = "src" val col = "key" val create = s"CREATE TABLE IF NOT EXISTS $db.$table ($col int, value int) USING $format" withCleanTmpResources(Seq((s"$db.$table", "table"))) { - doAs("admin", sql(create)) + doAs(admin, sql(create)) - doAs("admin", assert(sql(s"SHOW COLUMNS IN $table").count() == 2)) - doAs("admin", assert(sql(s"SHOW COLUMNS IN $db.$table").count() == 2)) - doAs("admin", assert(sql(s"SHOW COLUMNS IN $table IN $db").count() == 2)) + doAs(admin, assert(sql(s"SHOW COLUMNS IN $table").count() == 2)) + doAs(admin, assert(sql(s"SHOW COLUMNS IN $db.$table").count() == 2)) + doAs(admin, assert(sql(s"SHOW COLUMNS IN $table IN $db").count() == 2)) - doAs("kent", assert(sql(s"SHOW COLUMNS IN $table").count() == 1)) - doAs("kent", assert(sql(s"SHOW COLUMNS IN $db.$table").count() == 1)) - doAs("kent", assert(sql(s"SHOW COLUMNS IN $table IN $db").count() == 1)) + doAs(kent, assert(sql(s"SHOW COLUMNS IN $table").count() == 1)) + doAs(kent, assert(sql(s"SHOW COLUMNS IN $db.$table").count() == 1)) + doAs(kent, assert(sql(s"SHOW COLUMNS IN $table IN $db").count() == 1)) } } @@ -385,24 +327,24 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite (s"$db.${table}_select2", "table"), (s"$db.${table}_select3", "table"), (s"$db", "database"))) { - doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $db")) - doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_use1 (key int) USING $format")) - doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_use2 (key int) USING $format")) - doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_select1 (key int) USING $format")) - doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_select2 (key int) USING $format")) - doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_select3 (key int) USING $format")) + doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $db")) + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_use1 (key int) USING $format")) + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_use2 (key int) USING $format")) + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_select1 (key int) USING $format")) + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_select2 (key int) USING $format")) + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_select3 (key int) USING $format")) doAs( - "admin", + admin, assert(sql(s"show table extended from $db like '$table*'").collect().length === 5)) doAs( - "bob", + bob, assert(sql(s"show tables from $db").collect().length === 5)) doAs( - "bob", + bob, assert(sql(s"show table extended from $db like '$table*'").collect().length === 3)) doAs( - "i_am_invisible", + invisibleUser, assert(sql(s"show table extended from $db like '$table*'").collect().length === 0)) } } @@ -414,48 +356,48 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite val globalTempView2 = "global_temp_view2" // create or replace view - doAs("denyuser", sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)")) + doAs(denyUser, sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)")) doAs( - "denyuser", + denyUser, sql(s"CREATE GLOBAL TEMPORARY VIEW $globalTempView AS SELECT * FROM values(1)")) // rename view - doAs("denyuser2", sql(s"ALTER VIEW $tempView RENAME TO $tempView2")) + doAs(denyUser2, sql(s"ALTER VIEW $tempView RENAME TO $tempView2")) doAs( - "denyuser2", + denyUser2, sql(s"ALTER VIEW global_temp.$globalTempView RENAME TO global_temp.$globalTempView2")) - doAs("admin", sql(s"DROP VIEW IF EXISTS $tempView2")) - doAs("admin", sql(s"DROP VIEW IF EXISTS global_temp.$globalTempView2")) - doAs("admin", assert(sql("show tables from global_temp").collect().length == 0)) + doAs(admin, sql(s"DROP VIEW IF EXISTS $tempView2")) + doAs(admin, sql(s"DROP VIEW IF EXISTS global_temp.$globalTempView2")) + doAs(admin, assert(sql("show tables from global_temp").collect().length == 0)) } test("[KYUUBI #3426] Drop temp view should be skipped permission check") { val tempView = "temp_view" val globalTempView = "global_temp_view" - doAs("denyuser", sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)")) + doAs(denyUser, sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)")) doAs( - "denyuser", + denyUser, sql(s"CREATE OR REPLACE TEMPORARY VIEW $tempView" + s" AS select * from values(1)")) doAs( - "denyuser", + denyUser, sql(s"CREATE GLOBAL TEMPORARY VIEW $globalTempView AS SELECT * FROM values(1)")) doAs( - "denyuser", + denyUser, sql(s"CREATE OR REPLACE GLOBAL TEMPORARY VIEW $globalTempView" + s" AS select * from values(1)")) // global_temp will contain the temporary view, even if it is not global - doAs("admin", assert(sql("show tables from global_temp").collect().length == 2)) + doAs(admin, assert(sql("show tables from global_temp").collect().length == 2)) - doAs("denyuser2", sql(s"DROP VIEW IF EXISTS $tempView")) - doAs("denyuser2", sql(s"DROP VIEW IF EXISTS global_temp.$globalTempView")) + doAs(denyUser2, sql(s"DROP VIEW IF EXISTS $tempView")) + doAs(denyUser2, sql(s"DROP VIEW IF EXISTS global_temp.$globalTempView")) - doAs("admin", assert(sql("show tables from global_temp").collect().length == 0)) + doAs(admin, assert(sql("show tables from global_temp").collect().length == 0)) } test("[KYUUBI #3428] AlterViewAsCommand should be skipped permission check") { @@ -463,26 +405,26 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite val globalTempView = "global_temp_view" // create or replace view - doAs("denyuser", sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)")) + doAs(denyUser, sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)")) doAs( - "denyuser", + denyUser, sql(s"CREATE OR REPLACE TEMPORARY VIEW $tempView" + s" AS select * from values(1)")) doAs( - "denyuser", + denyUser, sql(s"CREATE GLOBAL TEMPORARY VIEW $globalTempView AS SELECT * FROM values(1)")) doAs( - "denyuser", + denyUser, sql(s"CREATE OR REPLACE GLOBAL TEMPORARY VIEW $globalTempView" + s" AS select * from values(1)")) // rename view - doAs("denyuser2", sql(s"ALTER VIEW $tempView AS SELECT * FROM values(1)")) - doAs("denyuser2", sql(s"ALTER VIEW global_temp.$globalTempView AS SELECT * FROM values(1)")) + doAs(denyUser2, sql(s"ALTER VIEW $tempView AS SELECT * FROM values(1)")) + doAs(denyUser2, sql(s"ALTER VIEW global_temp.$globalTempView AS SELECT * FROM values(1)")) - doAs("admin", sql(s"DROP VIEW IF EXISTS $tempView")) - doAs("admin", sql(s"DROP VIEW IF EXISTS global_temp.$globalTempView")) - doAs("admin", assert(sql("show tables from global_temp").collect().length == 0)) + doAs(admin, sql(s"DROP VIEW IF EXISTS $tempView")) + doAs(admin, sql(s"DROP VIEW IF EXISTS global_temp.$globalTempView")) + doAs(admin, assert(sql("show tables from global_temp").collect().length == 0)) } test("[KYUUBI #3343] pass temporary view creation") { @@ -491,28 +433,39 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite withTempView(tempView) { doAs( - "denyuser", + denyUser, assert(Try(sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)")).isSuccess)) doAs( - "denyuser", + denyUser, Try(sql(s"CREATE OR REPLACE TEMPORARY VIEW $tempView" + s" AS select * from values(1)")).isSuccess) } withGlobalTempView(globalTempView) { doAs( - "denyuser", + denyUser, Try( sql( s"CREATE GLOBAL TEMPORARY VIEW $globalTempView AS SELECT * FROM values(1)")).isSuccess) doAs( - "denyuser", + denyUser, Try(sql(s"CREATE OR REPLACE GLOBAL TEMPORARY VIEW $globalTempView" + s" AS select * from values(1)")).isSuccess) } - doAs("admin", assert(sql("show tables from global_temp").collect().length == 0)) + doAs(admin, assert(sql("show tables from global_temp").collect().length == 0)) + } + + test("[KYUUBI #5172] Check USE permissions for DESCRIBE FUNCTION") { + val fun = s"$defaultDb.function1" + + withCleanTmpResources(Seq((s"$fun", "function"))) { + doAs(admin, sql(s"CREATE FUNCTION $fun AS 'Function1'")) + doAs(admin, sql(s"DESC FUNCTION $fun").collect().length == 1) + val e = intercept[AccessControlException](doAs(denyUser, sql(s"DESC FUNCTION $fun"))) + assert(e.getMessage === errorMessage("_any", "default/function1", denyUser)) + } } } @@ -525,12 +478,12 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite { test("table stats must be specified") { val table = "hive_src" withCleanTmpResources(Seq((table, "table"))) { - doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $table (id int)")) + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $table (id int)")) doAs( - "admin", { + admin, { val hiveTableRelation = sql(s"SELECT * FROM $table") .queryExecution.optimizedPlan.collectLeaves().head.asInstanceOf[HiveTableRelation] - assert(getFieldVal[Option[Statistics]](hiveTableRelation, "tableStats").nonEmpty) + assert(getField[Option[Statistics]](hiveTableRelation, "tableStats").nonEmpty) }) } } @@ -538,9 +491,9 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite { test("HiveTableRelation should be able to be converted to LogicalRelation") { val table = "hive_src" withCleanTmpResources(Seq((table, "table"))) { - doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $table (id int) STORED AS PARQUET")) + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $table (id int) STORED AS PARQUET")) doAs( - "admin", { + admin, { val relation = sql(s"SELECT * FROM $table") .queryExecution.optimizedPlan.collectLeaves().head assert(relation.isInstanceOf[LogicalRelation]) @@ -558,7 +511,7 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite { (s"$db.$table1", "table"), (s"$db", "database"))) { doAs( - "admin", { + admin, { sql(s"CREATE DATABASE IF NOT EXISTS $db") sql(s"CREATE TABLE IF NOT EXISTS $db.$table1(id int) STORED AS PARQUET") sql(s"INSERT INTO $db.$table1 SELECT 1") @@ -579,16 +532,16 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite { (adminPermView, "view"), (permView, "view"), (table, "table"))) { - doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $table (id int)")) + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $table (id int)")) - doAs("admin", sql(s"CREATE VIEW ${adminPermView} AS SELECT * FROM $table")) + doAs(admin, sql(s"CREATE VIEW ${adminPermView} AS SELECT * FROM $table")) val e1 = intercept[AccessControlException]( - doAs("someone", sql(s"CREATE VIEW $permView AS SELECT 1 as a"))) + doAs(someone, sql(s"CREATE VIEW $permView AS SELECT 1 as a"))) assert(e1.getMessage.contains(s"does not have [create] privilege on [default/$permView]")) val e2 = intercept[AccessControlException]( - doAs("someone", sql(s"CREATE VIEW $permView AS SELECT * FROM $table"))) + doAs(someone, sql(s"CREATE VIEW $permView AS SELECT * FROM $table"))) if (isSparkV32OrGreater) { assert(e2.getMessage.contains(s"does not have [select] privilege on [default/$table/id]")) } else { @@ -598,32 +551,66 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite { } test("[KYUUBI #3326] check persisted view and skip shadowed table") { + val db1 = defaultDb val table = "hive_src" val permView = "perm_view" - val db1 = "default" - val db2 = "db2" withCleanTmpResources(Seq( (s"$db1.$table", "table"), - (s"$db2.$permView", "view"), - (db2, "database"))) { - doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int)")) - - doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $db2")) - doAs("admin", sql(s"CREATE VIEW $db2.$permView AS SELECT * FROM $table")) + (s"$db1.$permView", "view"))) { + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)")) + doAs(admin, sql(s"CREATE VIEW $db1.$permView AS SELECT * FROM $db1.$table")) + // KYUUBI #3326: with no privileges to the permanent view or the source table val e1 = intercept[AccessControlException]( - doAs("someone", sql(s"select * from $db2.$permView")).show(0)) + doAs( + someone, { + sql(s"select * from $db1.$permView").collect() + })) + if (isSparkV31OrGreater) { + assert(e1.getMessage.contains(s"does not have [select] privilege on [$db1/$permView/id]")) + } else { + assert(e1.getMessage.contains(s"does not have [select] privilege on [$db1/$table/id]")) + } + } + } + + test("KYUUBI #4504: query permanent view with privilege to permanent view only") { + val db1 = defaultDb + val table = "hive_src" + val permView = "perm_view" + val userPermViewOnly = permViewOnlyUser + + withCleanTmpResources(Seq( + (s"$db1.$table", "table"), + (s"$db1.$permView", "view"))) { + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)")) + doAs(admin, sql(s"CREATE VIEW $db1.$permView AS SELECT * FROM $db1.$table")) + + // query all columns of the permanent view + // with access privileges to the permanent view but no privilege to the source table + val sql1 = s"SELECT * FROM $db1.$permView" if (isSparkV31OrGreater) { - assert(e1.getMessage.contains(s"does not have [select] privilege on [$db2/$permView/id]")) + doAs(userPermViewOnly, { sql(sql1).collect() }) } else { + val e1 = intercept[AccessControlException](doAs(userPermViewOnly, { sql(sql1).collect() })) assert(e1.getMessage.contains(s"does not have [select] privilege on [$db1/$table/id]")) } + + // query the second column of permanent view with multiple columns + // with access privileges to the permanent view but no privilege to the source table + val sql2 = s"SELECT name FROM $db1.$permView" + if (isSparkV31OrGreater) { + doAs(userPermViewOnly, { sql(sql2).collect() }) + } else { + val e2 = intercept[AccessControlException](doAs(userPermViewOnly, { sql(sql2).collect() })) + assert(e2.getMessage.contains(s"does not have [select] privilege on [$db1/$table/name]")) + } } } test("[KYUUBI #3371] support throws all disallowed privileges in exception") { - val db1 = "default" + val db1 = defaultDb val srcTable1 = "hive_src1" val srcTable2 = "hive_src2" val sinkTable1 = "hive_sink1" @@ -633,17 +620,17 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite { (s"$db1.$srcTable2", "table"), (s"$db1.$sinkTable1", "table"))) { doAs( - "admin", + admin, sql(s"CREATE TABLE IF NOT EXISTS $db1.$srcTable1" + s" (id int, name string, city string)")) doAs( - "admin", + admin, sql(s"CREATE TABLE IF NOT EXISTS $db1.$srcTable2" + s" (id int, age int)")) doAs( - "admin", + admin, sql(s"CREATE TABLE IF NOT EXISTS $db1.$sinkTable1" + s" (id int, age int, name string, city string)")) @@ -652,25 +639,17 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite { s" FROM $db1.$srcTable1 as tb1" + s" JOIN $db1.$srcTable2 as tb2" + s" on tb1.id = tb2.id" - val e1 = intercept[AccessControlException](doAs("someone", sql(insertSql1))) + val e1 = intercept[AccessControlException](doAs(someone, sql(insertSql1))) assert(e1.getMessage.contains(s"does not have [select] privilege on [$db1/$srcTable1/id]")) - try { - SparkRangerAdminPlugin.getRangerConf.setBoolean( - s"ranger.plugin.${SparkRangerAdminPlugin.getServiceType}.authorize.in.single.call", - true) - val e2 = intercept[AccessControlException](doAs("someone", sql(insertSql1))) + withSingleCallEnabled { + val e2 = intercept[AccessControlException](doAs(someone, sql(insertSql1))) assert(e2.getMessage.contains(s"does not have" + s" [select] privilege on" + s" [$db1/$srcTable1/id,$db1/$srcTable1/name,$db1/$srcTable1/city," + s"$db1/$srcTable2/age,$db1/$srcTable2/id]," + s" [update] privilege on [$db1/$sinkTable1/id,$db1/$sinkTable1/age," + s"$db1/$sinkTable1/name,$db1/$sinkTable1/city]")) - } finally { - // revert to default value - SparkRangerAdminPlugin.getRangerConf.setBoolean( - s"ranger.plugin.${SparkRangerAdminPlugin.getServiceType}.authorize.in.single.call", - false) } } } @@ -678,7 +657,7 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite { test("[KYUUBI #3411] skip checking cache table") { if (isSparkV32OrGreater) { // cache table sql supported since 3.2.0 - val db1 = "default" + val db1 = defaultDb val srcTable1 = "hive_src1" val cacheTable1 = "cacheTable1" val cacheTable2 = "cacheTable2" @@ -693,23 +672,23 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite { (s"$db1.$cacheTable4", "cache"))) { doAs( - "admin", + admin, sql(s"CREATE TABLE IF NOT EXISTS $db1.$srcTable1" + s" (id int, name string, city string)")) val e1 = intercept[AccessControlException]( - doAs("someone", sql(s"CACHE TABLE $cacheTable2 select * from $db1.$srcTable1"))) + doAs(someone, sql(s"CACHE TABLE $cacheTable2 select * from $db1.$srcTable1"))) assert( e1.getMessage.contains(s"does not have [select] privilege on [$db1/$srcTable1/id]")) - doAs("admin", sql(s"CACHE TABLE $cacheTable3 SELECT 1 AS a, 2 AS b ")) - doAs("someone", sql(s"CACHE TABLE $cacheTable4 select 1 as a, 2 as b ")) + doAs(admin, sql(s"CACHE TABLE $cacheTable3 SELECT 1 AS a, 2 AS b ")) + doAs(someone, sql(s"CACHE TABLE $cacheTable4 select 1 as a, 2 as b ")) } } } test("[KYUUBI #3608] Support {OWNER} variable for queries") { - val db = "default" + val db = defaultDb val table = "owner_variable" val select = s"SELECT key FROM $db.$table" @@ -728,7 +707,7 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite { }.isSuccess)) doAs( - "create_only_user", { + createOnlyUser, { val e = intercept[AccessControlException](sql(select).collect()) assert(e.getMessage === errorMessage("select", s"$db/$table/key")) }) @@ -742,10 +721,44 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite { Seq( (s"$db.$table", "table"), (s"$db", "database"))) { - doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $db")) - doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.$table (key int) USING $format")) + doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $db")) + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.$table (key int) USING $format")) sql("SHOW DATABASES").queryExecution.optimizedPlan.stats sql(s"SHOW TABLES IN $db").queryExecution.optimizedPlan.stats } } + + test("[KYUUBI #4658] insert overwrite hive directory") { + val db1 = defaultDb + val table = "src" + + withCleanTmpResources(Seq((s"$db1.$table", "table"))) { + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)")) + val e = intercept[AccessControlException]( + doAs( + someone, + sql( + s"""INSERT OVERWRITE DIRECTORY '/tmp/test_dir' ROW FORMAT DELIMITED FIELDS + | TERMINATED BY ',' + | SELECT * FROM $db1.$table;""".stripMargin))) + assert(e.getMessage.contains(s"does not have [select] privilege on [$db1/$table/id]")) + } + } + + test("[KYUUBI #4658] insert overwrite datasource directory") { + val db1 = defaultDb + val table = "src" + + withCleanTmpResources(Seq((s"$db1.$table", "table"))) { + doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)")) + val e = intercept[AccessControlException]( + doAs( + someone, + sql( + s"""INSERT OVERWRITE DIRECTORY '/tmp/test_dir' + | USING parquet + | SELECT * FROM $db1.$table;""".stripMargin))) + assert(e.getMessage.contains(s"does not have [select] privilege on [$db1/$table/id]")) + } + } } diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPluginSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPluginSuite.scala index 8711a728726..301ae87c553 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPluginSuite.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPluginSuite.scala @@ -22,6 +22,8 @@ import org.apache.hadoop.security.UserGroupInformation import org.scalatest.funsuite.AnyFunSuite import org.apache.kyuubi.plugin.spark.authz.{ObjectType, OperationType} +import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._ +import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._ import org.apache.kyuubi.plugin.spark.authz.ranger.SparkRangerAdminPlugin._ class SparkRangerAdminPluginSuite extends AnyFunSuite { @@ -29,13 +31,13 @@ class SparkRangerAdminPluginSuite extends AnyFunSuite { test("get filter expression") { val bob = UserGroupInformation.createRemoteUser("bob") - val are = AccessResource(ObjectType.TABLE, "default", "src", null) + val are = AccessResource(ObjectType.TABLE, defaultDb, "src", null) def buildAccessRequest(ugi: UserGroupInformation): AccessRequest = { AccessRequest(are, ugi, OperationType.QUERY, AccessType.SELECT) } val maybeString = getFilterExpr(buildAccessRequest(bob)) assert(maybeString.get === "key<20") - Seq("admin", "alice").foreach { user => + Seq(admin, alice).foreach { user => val ugi = UserGroupInformation.createRemoteUser(user) val maybeString = getFilterExpr(buildAccessRequest(ugi)) assert(maybeString.isEmpty) @@ -45,18 +47,21 @@ class SparkRangerAdminPluginSuite extends AnyFunSuite { test("get data masker") { val bob = UserGroupInformation.createRemoteUser("bob") def buildAccessRequest(ugi: UserGroupInformation, column: String): AccessRequest = { - val are = AccessResource(ObjectType.COLUMN, "default", "src", column) + val are = AccessResource(ObjectType.COLUMN, defaultDb, "src", column) AccessRequest(are, ugi, OperationType.QUERY, AccessType.SELECT) } assert(getMaskingExpr(buildAccessRequest(bob, "value1")).get === "md5(cast(value1 as string))") assert(getMaskingExpr(buildAccessRequest(bob, "value2")).get === - "regexp_replace(regexp_replace(regexp_replace(value2, '[A-Z]', 'X'), '[a-z]', 'x')," + - " '[0-9]', 'n')") + "regexp_replace(regexp_replace(regexp_replace(regexp_replace(value2, '[A-Z]', 'X')," + + " '[a-z]', 'x'), '[0-9]', 'n'), '[^A-Za-z0-9]', 'U')") assert(getMaskingExpr(buildAccessRequest(bob, "value3")).get contains "regexp_replace") assert(getMaskingExpr(buildAccessRequest(bob, "value4")).get === "date_trunc('YEAR', value4)") - assert(getMaskingExpr(buildAccessRequest(bob, "value5")).get contains "regexp_replace") + assert(getMaskingExpr(buildAccessRequest(bob, "value5")).get === + "concat(regexp_replace(regexp_replace(regexp_replace(regexp_replace(" + + "left(value5, length(value5) - 4), '[A-Z]', 'X'), '[a-z]', 'x')," + + " '[0-9]', 'n'), '[^A-Za-z0-9]', 'U'), right(value5, 4))") - Seq("admin", "alice").foreach { user => + Seq(admin, alice).foreach { user => val ugi = UserGroupInformation.createRemoteUser(user) val maybeString = getMaskingExpr(buildAccessRequest(ugi, "value1")) assert(maybeString.isEmpty) diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/V2JdbcTableCatalogRangerSparkExtensionSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/V2JdbcTableCatalogRangerSparkExtensionSuite.scala index 73a13bc1c3c..5c27a470f74 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/V2JdbcTableCatalogRangerSparkExtensionSuite.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/V2JdbcTableCatalogRangerSparkExtensionSuite.scala @@ -22,6 +22,9 @@ import scala.util.Try // scalastyle:off import org.apache.kyuubi.plugin.spark.authz.AccessControlException +import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._ +import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._ +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ /** * Tests for RangerSparkExtensionSuite @@ -32,8 +35,6 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu val catalogV2 = "testcat" val jdbcCatalogV2 = "jdbc2" - val namespace1 = "ns1" - val namespace2 = "ns2" val table1 = "table1" val table2 = "table2" val outputTable1 = "outputTable1" @@ -54,13 +55,13 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu super.beforeAll() - doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $catalogV2.$namespace1")) + doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $catalogV2.$namespace1")) doAs( - "admin", + admin, sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$table1" + " (id int, name string, city string)")) doAs( - "admin", + admin, sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$outputTable1" + " (id int, name string, city string)")) } @@ -82,7 +83,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // create database val e1 = intercept[AccessControlException]( - doAs("someone", sql(s"CREATE DATABASE IF NOT EXISTS $catalogV2.$namespace2").explain())) + doAs(someone, sql(s"CREATE DATABASE IF NOT EXISTS $catalogV2.$namespace2").explain())) assert(e1.getMessage.contains(s"does not have [create] privilege" + s" on [$namespace2]")) } @@ -92,7 +93,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // create database val e1 = intercept[AccessControlException]( - doAs("someone", sql(s"DROP DATABASE IF EXISTS $catalogV2.$namespace2").explain())) + doAs(someone, sql(s"DROP DATABASE IF EXISTS $catalogV2.$namespace2").explain())) assert(e1.getMessage.contains(s"does not have [drop] privilege" + s" on [$namespace2]")) } @@ -102,7 +103,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // select val e1 = intercept[AccessControlException]( - doAs("someone", sql(s"select city, id from $catalogV2.$namespace1.$table1").explain())) + doAs(someone, sql(s"select city, id from $catalogV2.$namespace1.$table1").explain())) assert(e1.getMessage.contains(s"does not have [select] privilege" + s" on [$namespace1/$table1/city]")) } @@ -110,7 +111,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu test("[KYUUBI #4255] DESCRIBE TABLE") { assume(isSparkV31OrGreater) val e1 = intercept[AccessControlException]( - doAs("someone", sql(s"DESCRIBE TABLE $catalogV2.$namespace1.$table1").explain())) + doAs(someone, sql(s"DESCRIBE TABLE $catalogV2.$namespace1.$table1").explain())) assert(e1.getMessage.contains(s"does not have [select] privilege" + s" on [$namespace1/$table1]")) } @@ -120,14 +121,14 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // CreateTable val e2 = intercept[AccessControlException]( - doAs("someone", sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$table2"))) + doAs(someone, sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$table2"))) assert(e2.getMessage.contains(s"does not have [create] privilege" + s" on [$namespace1/$table2]")) // CreateTableAsSelect val e21 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$table2" + s" AS select * from $catalogV2.$namespace1.$table1"))) assert(e21.getMessage.contains(s"does not have [select] privilege" + @@ -139,7 +140,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // DropTable val e3 = intercept[AccessControlException]( - doAs("someone", sql(s"DROP TABLE $catalogV2.$namespace1.$table1"))) + doAs(someone, sql(s"DROP TABLE $catalogV2.$namespace1.$table1"))) assert(e3.getMessage.contains(s"does not have [drop] privilege" + s" on [$namespace1/$table1]")) } @@ -150,7 +151,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // AppendData: Insert Using a VALUES Clause val e4 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"INSERT INTO $catalogV2.$namespace1.$outputTable1 (id, name, city)" + s" VALUES (1, 'bowenliang123', 'Guangzhou')"))) assert(e4.getMessage.contains(s"does not have [update] privilege" + @@ -159,7 +160,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // AppendData: Insert Using a TABLE Statement val e42 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"INSERT INTO $catalogV2.$namespace1.$outputTable1 (id, name, city)" + s" TABLE $catalogV2.$namespace1.$table1"))) assert(e42.getMessage.contains(s"does not have [select] privilege" + @@ -168,7 +169,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // AppendData: Insert Using a SELECT Statement val e43 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"INSERT INTO $catalogV2.$namespace1.$outputTable1 (id, name, city)" + s" SELECT * from $catalogV2.$namespace1.$table1"))) assert(e43.getMessage.contains(s"does not have [select] privilege" + @@ -177,7 +178,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // OverwriteByExpression: Insert Overwrite val e44 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"INSERT OVERWRITE $catalogV2.$namespace1.$outputTable1 (id, name, city)" + s" VALUES (1, 'bowenliang123', 'Guangzhou')"))) assert(e44.getMessage.contains(s"does not have [update] privilege" + @@ -199,27 +200,20 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // MergeIntoTable: Using a MERGE INTO Statement val e1 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(mergeIntoSql))) assert(e1.getMessage.contains(s"does not have [select] privilege" + s" on [$namespace1/$table1/id]")) - try { - SparkRangerAdminPlugin.getRangerConf.setBoolean( - s"ranger.plugin.${SparkRangerAdminPlugin.getServiceType}.authorize.in.single.call", - true) + withSingleCallEnabled { val e2 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(mergeIntoSql))) assert(e2.getMessage.contains(s"does not have" + s" [select] privilege" + s" on [$namespace1/$table1/id,$namespace1/table1/name,$namespace1/$table1/city]," + s" [update] privilege on [$namespace1/$outputTable1]")) - } finally { - SparkRangerAdminPlugin.getRangerConf.setBoolean( - s"ranger.plugin.${SparkRangerAdminPlugin.getServiceType}.authorize.in.single.call", - false) } } @@ -229,7 +223,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // UpdateTable val e5 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"UPDATE $catalogV2.$namespace1.$table1 SET city='Hangzhou' " + " WHERE id=1"))) assert(e5.getMessage.contains(s"does not have [update] privilege" + @@ -241,7 +235,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // DeleteFromTable val e6 = intercept[AccessControlException]( - doAs("someone", sql(s"DELETE FROM $catalogV2.$namespace1.$table1 WHERE id=1"))) + doAs(someone, sql(s"DELETE FROM $catalogV2.$namespace1.$table1 WHERE id=1"))) assert(e6.getMessage.contains(s"does not have [update] privilege" + s" on [$namespace1/$table1]")) } @@ -252,7 +246,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // CacheTable val e7 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"CACHE TABLE $cacheTable1" + s" AS select * from $catalogV2.$namespace1.$table1"))) if (isSparkV32OrGreater) { @@ -269,7 +263,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu val e1 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"TRUNCATE TABLE $catalogV2.$namespace1.$table1"))) assert(e1.getMessage.contains(s"does not have [update] privilege" + s" on [$namespace1/$table1]")) @@ -280,7 +274,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu val e1 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"MSCK REPAIR TABLE $catalogV2.$namespace1.$table1"))) assert(e1.getMessage.contains(s"does not have [alter] privilege" + s" on [$namespace1/$table1]")) @@ -292,7 +286,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // AddColumns val e61 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"ALTER TABLE $catalogV2.$namespace1.$table1 ADD COLUMNS (age int) ").explain())) assert(e61.getMessage.contains(s"does not have [alter] privilege" + s" on [$namespace1/$table1]")) @@ -300,7 +294,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // DropColumns val e62 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"ALTER TABLE $catalogV2.$namespace1.$table1 DROP COLUMNS city ").explain())) assert(e62.getMessage.contains(s"does not have [alter] privilege" + s" on [$namespace1/$table1]")) @@ -308,7 +302,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // RenameColumn val e63 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"ALTER TABLE $catalogV2.$namespace1.$table1 RENAME COLUMN city TO city2 ").explain())) assert(e63.getMessage.contains(s"does not have [alter] privilege" + s" on [$namespace1/$table1]")) @@ -316,7 +310,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // AlterColumn val e64 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"ALTER TABLE $catalogV2.$namespace1.$table1 " + s"ALTER COLUMN city COMMENT 'city' "))) assert(e64.getMessage.contains(s"does not have [alter] privilege" + @@ -329,7 +323,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // CommentOnNamespace val e1 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"COMMENT ON DATABASE $catalogV2.$namespace1 IS 'xYz' ").explain())) assert(e1.getMessage.contains(s"does not have [alter] privilege" + s" on [$namespace1]")) @@ -337,7 +331,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // CommentOnNamespace val e2 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"COMMENT ON NAMESPACE $catalogV2.$namespace1 IS 'xYz' ").explain())) assert(e2.getMessage.contains(s"does not have [alter] privilege" + s" on [$namespace1]")) @@ -345,7 +339,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu // CommentOnTable val e3 = intercept[AccessControlException]( doAs( - "someone", + someone, sql(s"COMMENT ON TABLE $catalogV2.$namespace1.$table1 IS 'xYz' ").explain())) assert(e3.getMessage.contains(s"does not have [alter] privilege" + s" on [$namespace1/$table1]")) diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingForIcebergSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingForIcebergSuite.scala index 99b7eb97300..905cd428cab 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingForIcebergSuite.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingForIcebergSuite.scala @@ -21,6 +21,7 @@ import org.apache.spark.SparkConf import org.scalatest.Outcome import org.apache.kyuubi.Utils +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ class DataMaskingForIcebergSuite extends DataMaskingTestBase { override protected val extraSparkConf: SparkConf = { diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingForJDBCV2Suite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingForJDBCV2Suite.scala index 894daeaf711..f74092d0b45 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingForJDBCV2Suite.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingForJDBCV2Suite.scala @@ -23,6 +23,8 @@ import scala.util.Try import org.apache.spark.SparkConf import org.scalatest.Outcome +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ + class DataMaskingForJDBCV2Suite extends DataMaskingTestBase { override protected val extraSparkConf: SparkConf = { val conf = new SparkConf() diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingTestBase.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingTestBase.scala index c13362617df..af87a39a0af 100644 --- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingTestBase.scala +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingTestBase.scala @@ -17,18 +17,20 @@ package org.apache.kyuubi.plugin.spark.authz.ranger.datamasking -// scalastyle:off import java.sql.Timestamp import scala.util.Try +// scalastyle:off import org.apache.commons.codec.digest.DigestUtils.md5Hex import org.apache.spark.sql.{Row, SparkSessionExtensions} -import org.scalatest.{Assertion, BeforeAndAfterAll} +import org.scalatest.BeforeAndAfterAll import org.scalatest.funsuite.AnyFunSuite +import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._ import org.apache.kyuubi.plugin.spark.authz.SparkSessionProvider import org.apache.kyuubi.plugin.spark.authz.ranger.RangerSparkExtension +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ /** * Base trait for data masking tests, derivative classes shall name themselves following: @@ -55,6 +57,17 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef "SELECT 20, 2, 'kyuubi', 'y', timestamp'2018-11-17 12:34:56', 'world'") sql("INSERT INTO default.src " + "SELECT 30, 3, 'spark', 'a', timestamp'2018-11-17 12:34:56', 'world'") + + // scalastyle:off + val value1 = "hello WORD 123 ~!@# AßþΔЙקم๗ቐあア叶葉엽" + val value2 = "AßþΔЙקم๗ቐあア叶葉엽 hello WORD 123 ~!@#" + // AßþΔЙקم๗ቐあア叶葉엽 reference https://zh.wikipedia.org/zh-cn/Unicode#XML.E5.92.8CUnicode + // scalastyle:on + sql(s"INSERT INTO default.src " + + s"SELECT 10, 4, '$value1', '$value1', timestamp'2018-11-17 12:34:56', '$value1'") + sql("INSERT INTO default.src " + + s"SELECT 11, 5, '$value2', '$value2', timestamp'2018-11-17 12:34:56', '$value2'") + sql(s"CREATE TABLE default.unmasked $format AS SELECT * FROM default.src") } @@ -64,45 +77,49 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef } override def beforeAll(): Unit = { - doAs("admin", setup()) + doAs(admin, setup()) super.beforeAll() } override def afterAll(): Unit = { - doAs("admin", cleanup()) + doAs(admin, cleanup()) spark.stop super.afterAll() } - protected def checkAnswer(user: String, query: String, result: Seq[Row]): Assertion = { - doAs(user, assert(sql(query).collect() === result)) - } - test("simple query with a user doesn't have mask rules") { - checkAnswer("kent", "SELECT key FROM default.src order by key", Seq(Row(1), Row(20), Row(30))) + checkAnswer( + kent, + "SELECT key FROM default.src order by key", + Seq(Row(1), Row(10), Row(11), Row(20), Row(30))) } test("simple query with a user has mask rules") { val result = Seq(Row(md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld")) - checkAnswer("bob", "SELECT value1, value2, value3, value4, value5 FROM default.src", result) checkAnswer( - "bob", - "SELECT value1 as key, value2, value3, value4, value5 FROM default.src", + bob, + "SELECT value1, value2, value3, value4, value5 FROM default.src " + + "where key = 1", + result) + checkAnswer( + bob, + "SELECT value1 as key, value2, value3, value4, value5 FROM default.src where key = 1", result) } test("star") { val result = Seq(Row(1, md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld")) - checkAnswer("bob", "SELECT * FROM default.src", result) + checkAnswer(bob, "SELECT * FROM default.src where key = 1", result) } test("simple udf") { val result = Seq(Row(md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld")) checkAnswer( - "bob", - "SELECT max(value1), max(value2), max(value3), max(value4), max(value5) FROM default.src", + bob, + "SELECT max(value1), max(value2), max(value3), max(value4), max(value5) FROM default.src" + + " where key = 1", result) } @@ -110,10 +127,10 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef val result = Seq(Row(md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld")) checkAnswer( - "bob", + bob, "SELECT coalesce(max(value1), 1), coalesce(max(value2), 1), coalesce(max(value3), 1), " + "coalesce(max(value4), timestamp '2018-01-01 22:33:44'), coalesce(max(value5), 1) " + - "FROM default.src", + "FROM default.src where key = 1", result) } @@ -121,53 +138,68 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef val result = Seq(Row(md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld")) checkAnswer( - "bob", + bob, "SELECT value1, value2, value3, value4, value5 FROM default.src WHERE value2 in " + - "(SELECT value2 as key FROM default.src)", + "(SELECT value2 as key FROM default.src where key = 1)", result) } test("create a unmasked table as select from a masked one") { withCleanTmpResources(Seq(("default.src2", "table"))) { - doAs("bob", sql(s"CREATE TABLE default.src2 $format AS SELECT value1 FROM default.src")) - checkAnswer("bob", "SELECT value1 FROM default.src2", Seq(Row(md5Hex("1")))) + doAs( + bob, + sql(s"CREATE TABLE default.src2 $format AS SELECT value1 FROM default.src " + + s"where key = 1")) + checkAnswer(bob, "SELECT value1 FROM default.src2", Seq(Row(md5Hex("1")))) } } test("insert into a unmasked table from a masked one") { withCleanTmpResources(Seq(("default.src2", "table"), ("default.src3", "table"))) { - doAs("bob", sql(s"CREATE TABLE default.src2 (value1 string) $format")) - doAs("bob", sql(s"INSERT INTO default.src2 SELECT value1 from default.src")) - doAs("bob", sql(s"INSERT INTO default.src2 SELECT value1 as v from default.src")) - checkAnswer("bob", "SELECT value1 FROM default.src2", Seq(Row(md5Hex("1")), Row(md5Hex("1")))) - doAs("bob", sql(s"CREATE TABLE default.src3 (k int, value string) $format")) - doAs("bob", sql(s"INSERT INTO default.src3 SELECT key, value1 from default.src")) - doAs("bob", sql(s"INSERT INTO default.src3 SELECT key, value1 as v from default.src")) - checkAnswer("bob", "SELECT value FROM default.src3", Seq(Row(md5Hex("1")), Row(md5Hex("1")))) + doAs(bob, sql(s"CREATE TABLE default.src2 (value1 string) $format")) + doAs( + bob, + sql(s"INSERT INTO default.src2 SELECT value1 from default.src " + + s"where key = 1")) + doAs( + bob, + sql(s"INSERT INTO default.src2 SELECT value1 as v from default.src " + + s"where key = 1")) + checkAnswer(bob, "SELECT value1 FROM default.src2", Seq(Row(md5Hex("1")), Row(md5Hex("1")))) + doAs(bob, sql(s"CREATE TABLE default.src3 (k int, value string) $format")) + doAs( + bob, + sql(s"INSERT INTO default.src3 SELECT key, value1 from default.src " + + s"where key = 1")) + doAs( + bob, + sql(s"INSERT INTO default.src3 SELECT key, value1 as v from default.src " + + s"where key = 1")) + checkAnswer(bob, "SELECT value FROM default.src3", Seq(Row(md5Hex("1")), Row(md5Hex("1")))) } } test("join on an unmasked table") { val s = "SELECT a.value1, b.value1 FROM default.src a" + " join default.unmasked b on a.value1=b.value1" - checkAnswer("bob", s, Nil) - checkAnswer("bob", s, Nil) // just for testing query multiple times, don't delete it + checkAnswer(bob, s, Nil) + checkAnswer(bob, s, Nil) // just for testing query multiple times, don't delete it } test("self join on a masked table") { val s = "SELECT a.value1, b.value1 FROM default.src a" + - " join default.src b on a.value1=b.value1" - checkAnswer("bob", s, Seq(Row(md5Hex("1"), md5Hex("1")))) + " join default.src b on a.value1=b.value1 where a.key = 1 and b.key = 1 " + checkAnswer(bob, s, Seq(Row(md5Hex("1"), md5Hex("1")))) // just for testing query multiple times, don't delete it - checkAnswer("bob", s, Seq(Row(md5Hex("1"), md5Hex("1")))) + checkAnswer(bob, s, Seq(Row(md5Hex("1"), md5Hex("1")))) } test("self join on a masked table and filter the masked column with original value") { val s = "SELECT a.value1, b.value1 FROM default.src a" + " join default.src b on a.value1=b.value1" + " where a.value1='1' and b.value1='1'" - checkAnswer("bob", s, Nil) - checkAnswer("bob", s, Nil) // just for testing query multiple times, don't delete it + checkAnswer(bob, s, Nil) + checkAnswer(bob, s, Nil) // just for testing query multiple times, don't delete it } test("self join on a masked table and filter the masked column with masked value") { @@ -215,7 +247,7 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef // +- DataMaskingStage0Marker Relation default.src[key#60,value1#61,value2#62,value3#63,value4#64,value5#65] parquet // +- Project [key#153, md5(cast(cast(value1#154 as string) as binary)) AS value1#148, regexp_replace(regexp_replace(regexp_replace(value2#155, [A-Z], X, 1), [a-z], x, 1), [0-9], n, 1) AS value2#149, regexp_replace(regexp_replace(regexp_replace(value3#156, [A-Z], X, 5), [a-z], x, 5), [0-9], n, 5) AS value3#150, date_trunc(YEAR, value4#157, Some(Asia/Shanghai)) AS value4#151, concat(regexp_replace(regexp_replace(regexp_replace(left(value5#158, (length(value5#158) - 4)), [A-Z], X, 1), [a-z], x, 1), [0-9], n, 1), right(value5#158, 4)) AS value5#152] // +- Relation default.src[key#153,value1#154,value2#155,value3#156,value4#157,value5#158] parquet - // checkAnswer("bob", s, Seq(Row(md5Hex("1"), md5Hex("1")))) + // checkAnswer(bob, s, Seq(Row(md5Hex("1"), md5Hex("1")))) // // // scalastyle:on @@ -224,44 +256,74 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef val s2 = "SELECT a.value1, b.value1 FROM default.src a" + " join default.src b on a.value1=b.value1" + s" where a.value2='xxxxx' and b.value2='xxxxx'" - checkAnswer("bob", s2, Seq(Row(md5Hex("1"), md5Hex("1")))) + checkAnswer(bob, s2, Seq(Row(md5Hex("1"), md5Hex("1")))) // just for testing query multiple times, don't delete it - checkAnswer("bob", s2, Seq(Row(md5Hex("1"), md5Hex("1")))) + checkAnswer(bob, s2, Seq(Row(md5Hex("1"), md5Hex("1")))) } test("union an unmasked table") { val s = """ SELECT value1 from ( - SELECT a.value1 FROM default.src a + SELECT a.value1 FROM default.src a where a.key = 1 union (SELECT b.value1 FROM default.unmasked b) ) c order by value1 """ - checkAnswer("bob", s, Seq(Row("1"), Row("2"), Row("3"), Row(md5Hex("1")))) + checkAnswer(bob, s, Seq(Row("1"), Row("2"), Row("3"), Row("4"), Row("5"), Row(md5Hex("1")))) } test("union a masked table") { - val s = "SELECT a.value1 FROM default.src a union" + - " (SELECT b.value1 FROM default.src b)" - checkAnswer("bob", s, Seq(Row(md5Hex("1")))) + val s = "SELECT a.value1 FROM default.src a where a.key = 1 union" + + " (SELECT b.value1 FROM default.src b where b.key = 1)" + checkAnswer(bob, s, Seq(Row(md5Hex("1")))) } - test("KYUUBI #3581: permanent view should lookup rule on itself not the ") { + test("KYUUBI #3581: permanent view should lookup rule on itself not the raw table") { assume(isSparkV31OrGreater) val supported = doAs( - "perm_view_user", + permViewUser, Try(sql("CREATE OR REPLACE VIEW default.perm_view AS SELECT * FROM default.src")).isSuccess) assume(supported, s"view support for '$format' has not been implemented yet") withCleanTmpResources(Seq(("default.perm_view", "view"))) { checkAnswer( - "perm_view_user", - "SELECT value1, value2 FROM default.src where key < 20", + permViewUser, + "SELECT value1, value2 FROM default.src where key = 1", Seq(Row(1, "hello"))) checkAnswer( - "perm_view_user", - "SELECT value1, value2 FROM default.perm_view where key < 20", + permViewUser, + "SELECT value1, value2 FROM default.perm_view where key = 1", Seq(Row(md5Hex("1"), "hello"))) } } + + // This test only includes a small subset of UCS-2 characters. + // But in theory, it should work for all characters + test("test MASK,MASK_SHOW_FIRST_4,MASK_SHOW_LAST_4 rule with non-English character set") { + val s1 = s"SELECT * FROM default.src where key = 10" + val s2 = s"SELECT * FROM default.src where key = 11" + // scalastyle:off + checkAnswer( + bob, + s1, + Seq(Row( + 10, + md5Hex("4"), + "xxxxxUXXXXUnnnUUUUUUXUUUUUUUUUUUUU", + "hellxUXXXXUnnnUUUUUUXUUUUUUUUUUUUU", + Timestamp.valueOf("2018-01-01 00:00:00"), + "xxxxxUXXXXUnnnUUUUUUXUUUUUUUUUア叶葉엽"))) + checkAnswer( + bob, + s2, + Seq(Row( + 11, + md5Hex("5"), + "XUUUUUUUUUUUUUUxxxxxUXXXXUnnnUUUUU", + "AßþΔUUUUUUUUUUUxxxxxUXXXXUnnnUUUUU", + Timestamp.valueOf("2018-01-01 00:00:00"), + "XUUUUUUUUUUUUUUxxxxxUXXXXUnnnU~!@#"))) + // scalastyle:on + } + } diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForHiveHiveParquetSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForHiveHiveParquetSuite.scala new file mode 100644 index 00000000000..142a2f82508 --- /dev/null +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForHiveHiveParquetSuite.scala @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.plugin.spark.authz.ranger.rowfiltering + +class RowFilteringForHiveHiveParquetSuite extends RowFilteringTestBase { + override protected val catalogImpl: String = "hive" + override protected def format: String = "USING hive OPTIONS(fileFormat='parquet')" +} diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForHiveParquetSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForHiveParquetSuite.scala new file mode 100644 index 00000000000..9727643cf93 --- /dev/null +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForHiveParquetSuite.scala @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.plugin.spark.authz.ranger.rowfiltering + +class RowFilteringForHiveParquetSuite extends RowFilteringTestBase { + override protected val catalogImpl: String = "hive" + override protected def format: String = "USING parquet" +} diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForIcebergSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForIcebergSuite.scala new file mode 100644 index 00000000000..a93a69662e5 --- /dev/null +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForIcebergSuite.scala @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.plugin.spark.authz.ranger.rowfiltering + +import org.apache.spark.SparkConf +import org.scalatest.Outcome + +import org.apache.kyuubi.Utils +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ + +class RowFilteringForIcebergSuite extends RowFilteringTestBase { + override protected val extraSparkConf: SparkConf = { + val conf = new SparkConf() + + if (isSparkV31OrGreater) { + conf + .set("spark.sql.defaultCatalog", "testcat") + .set( + "spark.sql.catalog.testcat", + "org.apache.iceberg.spark.SparkCatalog") + .set(s"spark.sql.catalog.testcat.type", "hadoop") + .set( + "spark.sql.catalog.testcat.warehouse", + Utils.createTempDir("iceberg-hadoop").toString) + } + conf + + } + + override protected val catalogImpl: String = "in-memory" + + override protected def format: String = "USING iceberg" + + override def beforeAll(): Unit = { + if (isSparkV31OrGreater) { + super.beforeAll() + } + } + + override def afterAll(): Unit = { + if (isSparkV31OrGreater) { + super.afterAll() + } + } + + override def withFixture(test: NoArgTest): Outcome = { + assume(isSparkV31OrGreater) + test() + } +} diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForInMemoryParquetSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForInMemoryParquetSuite.scala new file mode 100644 index 00000000000..9baaa2a3166 --- /dev/null +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForInMemoryParquetSuite.scala @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.plugin.spark.authz.ranger.rowfiltering + +class RowFilteringForInMemoryParquetSuite extends RowFilteringTestBase { + + override protected val catalogImpl: String = "in-memory" + override protected def format: String = "USING parquet" +} diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForJDBCV2Suite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForJDBCV2Suite.scala new file mode 100644 index 00000000000..09ae6a008b5 --- /dev/null +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringForJDBCV2Suite.scala @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.plugin.spark.authz.ranger.rowfiltering + +import java.sql.DriverManager + +import scala.util.Try + +import org.apache.spark.SparkConf +import org.scalatest.Outcome + +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ + +class RowFilteringForJDBCV2Suite extends RowFilteringTestBase { + override protected val extraSparkConf: SparkConf = { + val conf = new SparkConf() + if (isSparkV31OrGreater) { + conf + .set("spark.sql.defaultCatalog", "testcat") + .set( + "spark.sql.catalog.testcat", + "org.apache.spark.sql.execution.datasources.v2.jdbc.JDBCTableCatalog") + .set(s"spark.sql.catalog.testcat.url", "jdbc:derby:memory:testcat;create=true") + .set( + s"spark.sql.catalog.testcat.driver", + "org.apache.derby.jdbc.AutoloadedDriver") + } + conf + } + + override protected val catalogImpl: String = "in-memory" + + override protected def format: String = "" + + override def beforeAll(): Unit = { + if (isSparkV31OrGreater) super.beforeAll() + } + + override def afterAll(): Unit = { + if (isSparkV31OrGreater) { + super.afterAll() + // cleanup db + Try { + DriverManager.getConnection(s"jdbc:derby:memory:testcat;shutdown=true") + } + } + } + + override def withFixture(test: NoArgTest): Outcome = { + assume(isSparkV31OrGreater) + test() + } +} diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringTestBase.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringTestBase.scala new file mode 100644 index 00000000000..8d9561a897e --- /dev/null +++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringTestBase.scala @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.plugin.spark.authz.ranger.rowfiltering + +// scalastyle:off +import scala.util.Try + +import org.apache.spark.sql.{Row, SparkSessionExtensions} +import org.scalatest.BeforeAndAfterAll +import org.scalatest.funsuite.AnyFunSuite + +import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._ +import org.apache.kyuubi.plugin.spark.authz.SparkSessionProvider +import org.apache.kyuubi.plugin.spark.authz.ranger.RangerSparkExtension +import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._ + +/** + * Base trait for row filtering tests, derivative classes shall name themselves following: + * RowFilteringFor CatalogImpl? FileFormat? Additions? Suite + */ +trait RowFilteringTestBase extends AnyFunSuite with SparkSessionProvider with BeforeAndAfterAll { +// scalastyle:on + override protected val extension: SparkSessionExtensions => Unit = new RangerSparkExtension + + private def setup(): Unit = { + sql(s"CREATE TABLE IF NOT EXISTS default.src(key int, value int) $format") + sql("INSERT INTO default.src SELECT 1, 1") + sql("INSERT INTO default.src SELECT 20, 2") + sql("INSERT INTO default.src SELECT 30, 3") + } + + private def cleanup(): Unit = { + sql("DROP TABLE IF EXISTS default.src") + } + + override def beforeAll(): Unit = { + doAs(admin, setup()) + super.beforeAll() + } + override def afterAll(): Unit = { + doAs(admin, cleanup()) + spark.stop + super.afterAll() + } + + test("user without row filtering rule") { + checkAnswer( + kent, + "SELECT key FROM default.src order order by key", + Seq(Row(1), Row(20), Row(30))) + } + + test("simple query projecting filtering column") { + checkAnswer(bob, "SELECT key FROM default.src", Seq(Row(1))) + } + + test("simple query projecting non filtering column") { + checkAnswer(bob, "SELECT value FROM default.src", Seq(Row(1))) + } + + test("simple query projecting non filtering column with udf max") { + checkAnswer(bob, "SELECT max(value) FROM default.src", Seq(Row(1))) + } + + test("simple query projecting non filtering column with udf coalesce") { + checkAnswer(bob, "SELECT coalesce(max(value), 1) FROM default.src", Seq(Row(1))) + } + + test("in subquery") { + checkAnswer( + bob, + "SELECT value FROM default.src WHERE value in (SELECT value as key FROM default.src)", + Seq(Row(1))) + } + + test("ctas") { + withCleanTmpResources(Seq(("default.src2", "table"))) { + doAs(bob, sql(s"CREATE TABLE default.src2 $format AS SELECT value FROM default.src")) + val query = "select value from default.src2" + checkAnswer(admin, query, Seq(Row(1))) + checkAnswer(bob, query, Seq(Row(1))) + } + } + + test("[KYUUBI #3581]: row level filter on permanent view") { + assume(isSparkV31OrGreater) + val supported = doAs( + permViewUser, + Try(sql("CREATE OR REPLACE VIEW default.perm_view AS SELECT * FROM default.src")).isSuccess) + assume(supported, s"view support for '$format' has not been implemented yet") + + withCleanTmpResources(Seq((s"default.perm_view", "view"))) { + checkAnswer( + admin, + "SELECT key FROM default.perm_view order order by key", + Seq(Row(1), Row(20), Row(30))) + checkAnswer(bob, "SELECT key FROM default.perm_view", Seq(Row(1))) + checkAnswer(bob, "SELECT value FROM default.perm_view", Seq(Row(1))) + checkAnswer(bob, "SELECT max(value) FROM default.perm_view", Seq(Row(1))) + checkAnswer(bob, "SELECT coalesce(max(value), 1) FROM default.perm_view", Seq(Row(1))) + checkAnswer( + bob, + "SELECT value FROM default.perm_view WHERE value in " + + "(SELECT value as key FROM default.perm_view)", + Seq(Row(1))) + } + } +} diff --git a/extensions/spark/kyuubi-spark-connector-common/pom.xml b/extensions/spark/kyuubi-spark-connector-common/pom.xml index 1cba0ccdd4b..1fc0f57684e 100644 --- a/extensions/spark/kyuubi-spark-connector-common/pom.xml +++ b/extensions/spark/kyuubi-spark-connector-common/pom.xml @@ -21,16 +21,22 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../../pom.xml - kyuubi-spark-connector-common_2.12 + kyuubi-spark-connector-common_${scala.binary.version} jar Kyuubi Spark Connector Common https://kyuubi.apache.org/ + + org.apache.kyuubi + kyuubi-util-scala_${scala.binary.version} + ${project.version} + + org.scala-lang scala-library @@ -87,10 +93,21 @@ scalacheck-1-17_${scala.binary.version} test + + + org.apache.logging.log4j + log4j-1.2-api + test + + + + org.apache.logging.log4j + log4j-slf4j-impl + test + - org.apache.maven.plugins diff --git a/extensions/spark/kyuubi-spark-connector-common/src/main/scala/org/apache/kyuubi/spark/connector/common/SemanticVersion.scala b/extensions/spark/kyuubi-spark-connector-common/src/main/scala/org/apache/kyuubi/spark/connector/common/SemanticVersion.scala deleted file mode 100644 index 200937ca664..00000000000 --- a/extensions/spark/kyuubi-spark-connector-common/src/main/scala/org/apache/kyuubi/spark/connector/common/SemanticVersion.scala +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kyuubi.spark.connector.common - -/** - * Encapsulate a component Spark version for the convenience of version checks. - * Copy from org.apache.kyuubi.engine.ComponentVersion - */ -case class SemanticVersion(majorVersion: Int, minorVersion: Int) { - - def isVersionAtMost(targetVersionString: String): Boolean = { - this.compareVersion( - targetVersionString, - (targetMajor: Int, targetMinor: Int, runtimeMajor: Int, runtimeMinor: Int) => - (runtimeMajor < targetMajor) || { - runtimeMajor == targetMajor && runtimeMinor <= targetMinor - }) - } - - def isVersionAtLeast(targetVersionString: String): Boolean = { - this.compareVersion( - targetVersionString, - (targetMajor: Int, targetMinor: Int, runtimeMajor: Int, runtimeMinor: Int) => - (runtimeMajor > targetMajor) || { - runtimeMajor == targetMajor && runtimeMinor >= targetMinor - }) - } - - def isVersionEqualTo(targetVersionString: String): Boolean = { - this.compareVersion( - targetVersionString, - (targetMajor: Int, targetMinor: Int, runtimeMajor: Int, runtimeMinor: Int) => - runtimeMajor == targetMajor && runtimeMinor == targetMinor) - } - - def compareVersion( - targetVersionString: String, - callback: (Int, Int, Int, Int) => Boolean): Boolean = { - val targetVersion = SemanticVersion(targetVersionString) - val targetMajor = targetVersion.majorVersion - val targetMinor = targetVersion.minorVersion - callback(targetMajor, targetMinor, this.majorVersion, this.minorVersion) - } - - override def toString: String = s"$majorVersion.$minorVersion" -} - -object SemanticVersion { - - def apply(versionString: String): SemanticVersion = { - """^(\d+)\.(\d+)(\..*)?$""".r.findFirstMatchIn(versionString) match { - case Some(m) => - SemanticVersion(m.group(1).toInt, m.group(2).toInt) - case None => - throw new IllegalArgumentException(s"Tried to parse '$versionString' as a project" + - s" version string, but it could not find the major and minor version numbers.") - } - } -} diff --git a/extensions/spark/kyuubi-spark-connector-common/src/main/scala/org/apache/kyuubi/spark/connector/common/SparkUtils.scala b/extensions/spark/kyuubi-spark-connector-common/src/main/scala/org/apache/kyuubi/spark/connector/common/SparkUtils.scala index c1a659fbf6e..fcb99ebe6a9 100644 --- a/extensions/spark/kyuubi-spark-connector-common/src/main/scala/org/apache/kyuubi/spark/connector/common/SparkUtils.scala +++ b/extensions/spark/kyuubi-spark-connector-common/src/main/scala/org/apache/kyuubi/spark/connector/common/SparkUtils.scala @@ -19,17 +19,8 @@ package org.apache.kyuubi.spark.connector.common import org.apache.spark.SPARK_VERSION -object SparkUtils { - - def isSparkVersionAtMost(targetVersionString: String): Boolean = { - SemanticVersion(SPARK_VERSION).isVersionAtMost(targetVersionString) - } +import org.apache.kyuubi.util.SemanticVersion - def isSparkVersionAtLeast(targetVersionString: String): Boolean = { - SemanticVersion(SPARK_VERSION).isVersionAtLeast(targetVersionString) - } - - def isSparkVersionEqualTo(targetVersionString: String): Boolean = { - SemanticVersion(SPARK_VERSION).isVersionEqualTo(targetVersionString) - } +object SparkUtils { + lazy val SPARK_RUNTIME_VERSION: SemanticVersion = SemanticVersion(SPARK_VERSION) } diff --git a/extensions/spark/kyuubi-spark-connector-hive/pom.xml b/extensions/spark/kyuubi-spark-connector-hive/pom.xml index 37e3d840957..4f46138e904 100644 --- a/extensions/spark/kyuubi-spark-connector-hive/pom.xml +++ b/extensions/spark/kyuubi-spark-connector-hive/pom.xml @@ -21,18 +21,17 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../../pom.xml - kyuubi-spark-connector-hive_2.12 + kyuubi-spark-connector-hive_${scala.binary.version} jar Kyuubi Spark Hive Connector A Kyuubi hive connector based on Spark V2 DataSource https://kyuubi.apache.org/ - org.apache.kyuubi kyuubi-spark-connector-common_${scala.binary.version} @@ -40,34 +39,34 @@ - org.apache.kyuubi - kyuubi-spark-connector-common_${scala.binary.version} - ${project.version} - test-jar - test + com.google.guava + guava - org.scala-lang - scala-library + org.apache.spark + spark-hive_${scala.binary.version} provided - org.slf4j - slf4j-api + org.apache.hadoop + hadoop-client-api provided - org.apache.spark - spark-sql_${scala.binary.version} - provided + org.apache.kyuubi + kyuubi-spark-connector-common_${scala.binary.version} + ${project.version} + test-jar + test - com.google.guava - guava + org.scalatestplus + scalacheck-1-17_${scala.binary.version} + test @@ -84,17 +83,6 @@ test - - org.apache.spark - spark-hive_${scala.binary.version} - - - - org.scalatestplus - scalacheck-1-17_${scala.binary.version} - test - - org.apache.spark spark-sql_${scala.binary.version} @@ -117,15 +105,10 @@ test - - org.apache.hadoop - hadoop-client-api - - org.apache.hadoop hadoop-client-runtime - runtime + test + + 4.0.0 + + org.apache.kyuubi + kyuubi-parent + 1.9.0-SNAPSHOT + ../../pom.xml + + + kyuubi-chat-engine_${scala.binary.version} + jar + Kyuubi Project Engine Chat + https://kyuubi.apache.org/ + + + + + org.apache.kyuubi + kyuubi-common_${scala.binary.version} + ${project.version} + + + + org.apache.kyuubi + kyuubi-ha_${scala.binary.version} + ${project.version} + + + + com.theokanning.openai-gpt3-java + service + ${openai.java.version} + + + + org.apache.kyuubi + kyuubi-common_${scala.binary.version} + ${project.version} + test-jar + test + + + + org.apache.kyuubi + ${hive.jdbc.artifact} + ${project.version} + test + + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + prepare-test-jar + + test-jar + + test-compile + + + + + target/scala-${scala.binary.version}/classes + target/scala-${scala.binary.version}/test-classes + + + diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/ChatBackendService.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/ChatBackendService.scala new file mode 100644 index 00000000000..fdc710e2ccd --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/ChatBackendService.scala @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kyuubi.engine.chat + +import org.apache.kyuubi.engine.chat.session.ChatSessionManager +import org.apache.kyuubi.service.AbstractBackendService +import org.apache.kyuubi.session.SessionManager + +class ChatBackendService + extends AbstractBackendService("ChatBackendService") { + + override val sessionManager: SessionManager = new ChatSessionManager() + +} diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/ChatEngine.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/ChatEngine.scala new file mode 100644 index 00000000000..c1fdea9538c --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/ChatEngine.scala @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kyuubi.engine.chat + +import ChatEngine.currentEngine + +import org.apache.kyuubi.{Logging, Utils} +import org.apache.kyuubi.Utils.{addShutdownHook, JDBC_ENGINE_SHUTDOWN_PRIORITY} +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.ha.HighAvailabilityConf.HA_ZK_CONN_RETRY_POLICY +import org.apache.kyuubi.ha.client.RetryPolicies +import org.apache.kyuubi.service.Serverable +import org.apache.kyuubi.util.SignalRegister + +class ChatEngine extends Serverable("ChatEngine") { + + override val backendService = new ChatBackendService() + override val frontendServices = Seq(new ChatTBinaryFrontendService(this)) + + override def start(): Unit = { + super.start() + // Start engine self-terminating checker after all services are ready and it can be reached by + // all servers in engine spaces. + backendService.sessionManager.startTerminatingChecker(() => { + currentEngine.foreach(_.stop()) + }) + } + + override protected def stopServer(): Unit = {} +} + +object ChatEngine extends Logging { + + val kyuubiConf: KyuubiConf = KyuubiConf() + + var currentEngine: Option[ChatEngine] = None + + def startEngine(): Unit = { + currentEngine = Some(new ChatEngine()) + currentEngine.foreach { engine => + engine.initialize(kyuubiConf) + engine.start() + addShutdownHook( + () => { + engine.stop() + }, + JDBC_ENGINE_SHUTDOWN_PRIORITY + 1) + } + } + + def main(args: Array[String]): Unit = { + SignalRegister.registerLogger(logger) + + try { + Utils.fromCommandLineArgs(args, kyuubiConf) + kyuubiConf.setIfMissing(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0) + kyuubiConf.setIfMissing(HA_ZK_CONN_RETRY_POLICY, RetryPolicies.N_TIME.toString) + + startEngine() + } catch { + case t: Throwable if currentEngine.isDefined => + currentEngine.foreach { engine => + engine.stop() + } + error("Failed to create Chat Engine", t) + throw t + case t: Throwable => + error("Failed to create Chat Engine.", t) + throw t + } + } +} diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/ChatTBinaryFrontendService.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/ChatTBinaryFrontendService.scala new file mode 100644 index 00000000000..80702c97c3c --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/ChatTBinaryFrontendService.scala @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kyuubi.engine.chat + +import org.apache.kyuubi.ha.client.{EngineServiceDiscovery, ServiceDiscovery} +import org.apache.kyuubi.service.{Serverable, Service, TBinaryFrontendService} + +class ChatTBinaryFrontendService(override val serverable: Serverable) + extends TBinaryFrontendService("ChatTBinaryFrontend") { + + /** + * An optional `ServiceDiscovery` for [[FrontendService]] to expose itself + */ + override lazy val discoveryService: Option[Service] = + if (ServiceDiscovery.supportServiceDiscovery(conf)) { + Some(new EngineServiceDiscovery(this)) + } else { + None + } +} diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/operation/ChatOperation.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/operation/ChatOperation.scala new file mode 100644 index 00000000000..b0b1806f80c --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/operation/ChatOperation.scala @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kyuubi.engine.chat.operation + +import org.apache.hive.service.rpc.thrift._ + +import org.apache.kyuubi.{KyuubiSQLException, Utils} +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.engine.chat.schema.{RowSet, SchemaHelper} +import org.apache.kyuubi.operation.{AbstractOperation, FetchIterator, OperationState} +import org.apache.kyuubi.operation.FetchOrientation.{FETCH_FIRST, FETCH_NEXT, FETCH_PRIOR, FetchOrientation} +import org.apache.kyuubi.session.Session + +abstract class ChatOperation(session: Session) extends AbstractOperation(session) { + + protected var iter: FetchIterator[Array[String]] = _ + + protected lazy val conf: KyuubiConf = session.sessionManager.getConf + + override def getNextRowSetInternal( + order: FetchOrientation, + rowSetSize: Int): TFetchResultsResp = { + validateDefaultFetchOrientation(order) + assertState(OperationState.FINISHED) + setHasResultSet(true) + order match { + case FETCH_NEXT => + iter.fetchNext() + case FETCH_PRIOR => + iter.fetchPrior(rowSetSize) + case FETCH_FIRST => + iter.fetchAbsolute(0) + } + + val taken = iter.take(rowSetSize) + val resultRowSet = RowSet.toTRowSet(taken.toSeq, 1, getProtocolVersion) + resultRowSet.setStartRowOffset(iter.getPosition) + val resp = new TFetchResultsResp(OK_STATUS) + resp.setResults(resultRowSet) + resp.setHasMoreRows(false) + resp + } + + override def cancel(): Unit = { + cleanup(OperationState.CANCELED) + } + + override def close(): Unit = { + cleanup(OperationState.CLOSED) + } + + protected def onError(cancel: Boolean = false): PartialFunction[Throwable, Unit] = { + // We should use Throwable instead of Exception since `java.lang.NoClassDefFoundError` + // could be thrown. + case e: Throwable => + withLockRequired { + val errMsg = Utils.stringifyException(e) + if (state == OperationState.TIMEOUT) { + val ke = KyuubiSQLException(s"Timeout operating $opType: $errMsg") + setOperationException(ke) + throw ke + } else if (isTerminalState(state)) { + setOperationException(KyuubiSQLException(errMsg)) + warn(s"Ignore exception in terminal state with $statementId: $errMsg") + } else { + error(s"Error operating $opType: $errMsg", e) + val ke = KyuubiSQLException(s"Error operating $opType: $errMsg", e) + setOperationException(ke) + setState(OperationState.ERROR) + throw ke + } + } + } + + override protected def beforeRun(): Unit = { + setState(OperationState.PENDING) + setHasResultSet(true) + } + + override protected def afterRun(): Unit = {} + + override def getResultSetMetadata: TGetResultSetMetadataResp = { + val tTableSchema = SchemaHelper.stringTTableSchema("reply") + val resp = new TGetResultSetMetadataResp + resp.setSchema(tTableSchema) + resp.setStatus(OK_STATUS) + resp + } + + override def shouldRunAsync: Boolean = false +} diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/operation/ChatOperationManager.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/operation/ChatOperationManager.scala new file mode 100644 index 00000000000..1e89165176e --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/operation/ChatOperationManager.scala @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kyuubi.engine.chat.operation + +import java.util + +import org.apache.kyuubi.KyuubiSQLException +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.engine.chat.provider.ChatProvider +import org.apache.kyuubi.operation.{Operation, OperationManager} +import org.apache.kyuubi.session.Session + +class ChatOperationManager( + conf: KyuubiConf, + chatProvider: ChatProvider) extends OperationManager("ChatOperationManager") { + + override def newExecuteStatementOperation( + session: Session, + statement: String, + confOverlay: Map[String, String], + runAsync: Boolean, + queryTimeout: Long): Operation = { + val executeStatement = + new ExecuteStatement( + session, + statement, + runAsync, + queryTimeout, + chatProvider) + addOperation(executeStatement) + } + + override def newGetTypeInfoOperation(session: Session): Operation = { + throw KyuubiSQLException.featureNotSupported() + } + + override def newGetCatalogsOperation(session: Session): Operation = { + throw KyuubiSQLException.featureNotSupported() + } + + override def newGetSchemasOperation( + session: Session, + catalog: String, + schema: String): Operation = { + throw KyuubiSQLException.featureNotSupported() + } + + override def newGetTablesOperation( + session: Session, + catalogName: String, + schemaName: String, + tableName: String, + tableTypes: util.List[String]): Operation = { + throw KyuubiSQLException.featureNotSupported() + } + + override def newGetTableTypesOperation(session: Session): Operation = { + throw KyuubiSQLException.featureNotSupported() + } + + override def newGetColumnsOperation( + session: Session, + catalogName: String, + schemaName: String, + tableName: String, + columnName: String): Operation = { + throw KyuubiSQLException.featureNotSupported() + } + + override def newGetFunctionsOperation( + session: Session, + catalogName: String, + schemaName: String, + functionName: String): Operation = { + throw KyuubiSQLException.featureNotSupported() + } + + override def newGetPrimaryKeysOperation( + session: Session, + catalogName: String, + schemaName: String, + tableName: String): Operation = { + throw KyuubiSQLException.featureNotSupported() + } + + override def newGetCrossReferenceOperation( + session: Session, + primaryCatalog: String, + primarySchema: String, + primaryTable: String, + foreignCatalog: String, + foreignSchema: String, + foreignTable: String): Operation = { + throw KyuubiSQLException.featureNotSupported() + } + + override def getQueryId(operation: Operation): String = { + throw KyuubiSQLException.featureNotSupported() + } + + override def newSetCurrentCatalogOperation(session: Session, catalog: String): Operation = { + throw KyuubiSQLException.featureNotSupported() + } + + override def newGetCurrentCatalogOperation(session: Session): Operation = { + throw KyuubiSQLException.featureNotSupported() + } + + override def newSetCurrentDatabaseOperation(session: Session, database: String): Operation = { + throw KyuubiSQLException.featureNotSupported() + } + + override def newGetCurrentDatabaseOperation(session: Session): Operation = { + throw KyuubiSQLException.featureNotSupported() + } +} diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/operation/ExecuteStatement.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/operation/ExecuteStatement.scala new file mode 100644 index 00000000000..754a519324f --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/operation/ExecuteStatement.scala @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kyuubi.engine.chat.operation + +import org.apache.kyuubi.Logging +import org.apache.kyuubi.engine.chat.provider.ChatProvider +import org.apache.kyuubi.operation.{ArrayFetchIterator, OperationState} +import org.apache.kyuubi.operation.log.OperationLog +import org.apache.kyuubi.session.Session + +class ExecuteStatement( + session: Session, + override val statement: String, + override val shouldRunAsync: Boolean, + queryTimeout: Long, + chatProvider: ChatProvider) + extends ChatOperation(session) with Logging { + + private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) + override def getOperationLog: Option[OperationLog] = Option(operationLog) + + override protected def runInternal(): Unit = { + addTimeoutMonitor(queryTimeout) + if (shouldRunAsync) { + val asyncOperation = new Runnable { + override def run(): Unit = { + executeStatement() + } + } + val chatSessionManager = session.sessionManager + val backgroundHandle = chatSessionManager.submitBackgroundOperation(asyncOperation) + setBackgroundHandle(backgroundHandle) + } else { + executeStatement() + } + } + + private def executeStatement(): Unit = { + setState(OperationState.RUNNING) + + try { + val reply = chatProvider.ask(session.handle.identifier.toString, statement) + iter = new ArrayFetchIterator(Array(Array(reply))) + + setState(OperationState.FINISHED) + } catch { + onError(true) + } finally { + shutdownTimeoutMonitor() + } + } +} diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatGPTProvider.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatGPTProvider.scala new file mode 100644 index 00000000000..aae8b488a5c --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatGPTProvider.scala @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.chat.provider + +import java.net.{InetSocketAddress, Proxy, URL} +import java.time.Duration +import java.util +import java.util.concurrent.TimeUnit + +import scala.collection.JavaConverters._ + +import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} +import com.theokanning.openai.OpenAiApi +import com.theokanning.openai.completion.chat.{ChatCompletionRequest, ChatMessage, ChatMessageRole} +import com.theokanning.openai.service.OpenAiService +import com.theokanning.openai.service.OpenAiService.{defaultClient, defaultObjectMapper, defaultRetrofit} + +import org.apache.kyuubi.config.KyuubiConf + +class ChatGPTProvider(conf: KyuubiConf) extends ChatProvider { + + private val gptApiKey = conf.get(KyuubiConf.ENGINE_CHAT_GPT_API_KEY).getOrElse { + throw new IllegalArgumentException( + s"'${KyuubiConf.ENGINE_CHAT_GPT_API_KEY.key}' must be configured, " + + s"which could be got at https://platform.openai.com/account/api-keys") + } + + private val openAiService: OpenAiService = { + val builder = defaultClient( + gptApiKey, + Duration.ofMillis(conf.get(KyuubiConf.ENGINE_CHAT_GPT_HTTP_SOCKET_TIMEOUT))) + .newBuilder + .connectTimeout(Duration.ofMillis(conf.get(KyuubiConf.ENGINE_CHAT_GPT_HTTP_CONNECT_TIMEOUT))) + + conf.get(KyuubiConf.ENGINE_CHAT_GPT_HTTP_PROXY) match { + case Some(httpProxyUrl) => + val url = new URL(httpProxyUrl) + val proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(url.getHost, url.getPort)) + builder.proxy(proxy) + case _ => + } + + val retrofit = defaultRetrofit(builder.build(), defaultObjectMapper) + val api = retrofit.create(classOf[OpenAiApi]) + new OpenAiService(api) + } + + private var sessionUser: Option[String] = None + + private val chatHistory: LoadingCache[String, util.ArrayDeque[ChatMessage]] = + CacheBuilder.newBuilder() + .expireAfterWrite(10, TimeUnit.MINUTES) + .build(new CacheLoader[String, util.ArrayDeque[ChatMessage]] { + override def load(sessionId: String): util.ArrayDeque[ChatMessage] = + new util.ArrayDeque[ChatMessage] + }) + + override def open(sessionId: String, user: Option[String]): Unit = { + sessionUser = user + chatHistory.getIfPresent(sessionId) + } + + override def ask(sessionId: String, q: String): String = { + val messages = chatHistory.get(sessionId) + try { + messages.addLast(new ChatMessage(ChatMessageRole.USER.value(), q)) + val completionRequest = ChatCompletionRequest.builder() + .model(conf.get(KyuubiConf.ENGINE_CHAT_GPT_MODEL)) + .messages(messages.asScala.toList.asJava) + .user(sessionUser.orNull) + .n(1) + .build() + val responseText = openAiService.createChatCompletion(completionRequest) + .getChoices.get(0).getMessage.getContent + responseText + } catch { + case e: Throwable => + messages.removeLast() + s"Chat failed. Error: ${e.getMessage}" + } + } + + override def close(sessionId: String): Unit = { + chatHistory.invalidate(sessionId) + } +} diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatProvider.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatProvider.scala new file mode 100644 index 00000000000..06d7193805f --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatProvider.scala @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.chat.provider + +import scala.util.control.NonFatal + +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.module.scala.{ClassTagExtensions, DefaultScalaModule} + +import org.apache.kyuubi.{KyuubiException, Logging} +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.util.reflect.DynConstructors + +trait ChatProvider { + + def open(sessionId: String, user: Option[String] = None): Unit + + def ask(sessionId: String, q: String): String + + def close(sessionId: String): Unit +} + +object ChatProvider extends Logging { + + val mapper: ObjectMapper with ClassTagExtensions = + new ObjectMapper().registerModule(DefaultScalaModule) :: ClassTagExtensions + + def load(conf: KyuubiConf): ChatProvider = { + val groupProviderClass = conf.get(KyuubiConf.ENGINE_CHAT_PROVIDER) + try { + DynConstructors.builder(classOf[ChatProvider]) + .impl(groupProviderClass, classOf[KyuubiConf]) + .impl(groupProviderClass) + .buildChecked + .newInstanceChecked(conf) + } catch { + case _: ClassCastException => + throw new KyuubiException( + s"Class $groupProviderClass is not a child of '${classOf[ChatProvider].getName}'.") + case NonFatal(e) => + throw new IllegalArgumentException(s"Error while instantiating '$groupProviderClass': ", e) + } + } +} diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkHudiOperationSuite.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/EchoProvider.scala similarity index 66% rename from externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkHudiOperationSuite.scala rename to externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/EchoProvider.scala index c5e8be37aa4..1116ea785dc 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkHudiOperationSuite.scala +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/EchoProvider.scala @@ -15,16 +15,14 @@ * limitations under the License. */ -package org.apache.kyuubi.engine.spark.operation +package org.apache.kyuubi.engine.chat.provider -import org.apache.kyuubi.engine.spark.WithSparkSQLEngine -import org.apache.kyuubi.operation.HudiMetadataTests -import org.apache.kyuubi.tags.HudiTest +class EchoProvider extends ChatProvider { -@HudiTest -class SparkHudiOperationSuite extends WithSparkSQLEngine with HudiMetadataTests { + override def open(sessionId: String, user: Option[String]): Unit = {} - override protected def jdbcUrl: String = getJdbcUrl + override def ask(sessionId: String, q: String): String = + "This is ChatKyuubi, nice to meet you!" - override def withKyuubiConf: Map[String, String] = extraConfigs + override def close(sessionId: String): Unit = {} } diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/Message.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/Message.scala new file mode 100644 index 00000000000..e2162be9f1a --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/Message.scala @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.chat.provider + +case class Message(role: String, content: String) diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/schema/RowSet.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/schema/RowSet.scala new file mode 100644 index 00000000000..3bb4ba7dfa9 --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/schema/RowSet.scala @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.chat.schema + +import java.util + +import org.apache.hive.service.rpc.thrift._ + +import org.apache.kyuubi.util.RowSetUtils._ + +object RowSet { + + def emptyTRowSet(): TRowSet = { + new TRowSet(0, new java.util.ArrayList[TRow](0)) + } + + def toTRowSet( + rows: Seq[Array[String]], + columnSize: Int, + protocolVersion: TProtocolVersion): TRowSet = { + if (protocolVersion.getValue < TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6.getValue) { + toRowBasedSet(rows, columnSize) + } else { + toColumnBasedSet(rows, columnSize) + } + } + + def toRowBasedSet(rows: Seq[Array[String]], columnSize: Int): TRowSet = { + val rowSize = rows.length + val tRows = new java.util.ArrayList[TRow](rowSize) + var i = 0 + while (i < rowSize) { + val row = rows(i) + val tRow = new TRow() + var j = 0 + val columnSize = row.length + while (j < columnSize) { + val columnValue = stringTColumnValue(j, row) + tRow.addToColVals(columnValue) + j += 1 + } + i += 1 + tRows.add(tRow) + } + new TRowSet(0, tRows) + } + + def toColumnBasedSet(rows: Seq[Array[String]], columnSize: Int): TRowSet = { + val rowSize = rows.length + val tRowSet = new TRowSet(0, new util.ArrayList[TRow](rowSize)) + var i = 0 + while (i < columnSize) { + val tColumn = toTColumn(rows, i) + tRowSet.addToColumns(tColumn) + i += 1 + } + tRowSet + } + + private def toTColumn(rows: Seq[Array[String]], ordinal: Int): TColumn = { + val nulls = new java.util.BitSet() + val values = getOrSetAsNull[String](rows, ordinal, nulls, "") + TColumn.stringVal(new TStringColumn(values, nulls)) + } + + private def getOrSetAsNull[String]( + rows: Seq[Array[String]], + ordinal: Int, + nulls: util.BitSet, + defaultVal: String): util.List[String] = { + val size = rows.length + val ret = new util.ArrayList[String](size) + var idx = 0 + while (idx < size) { + val row = rows(idx) + val isNull = row(ordinal) == null + if (isNull) { + nulls.set(idx, true) + ret.add(idx, defaultVal) + } else { + ret.add(idx, row(ordinal)) + } + idx += 1 + } + ret + } + + private def stringTColumnValue(ordinal: Int, row: Array[String]): TColumnValue = { + val tStringValue = new TStringValue + if (row(ordinal) != null) tStringValue.setValue(row(ordinal)) + TColumnValue.stringVal(tStringValue) + } +} diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/schema/SchemaHelper.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/schema/SchemaHelper.scala new file mode 100644 index 00000000000..8ccfdda2fe9 --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/schema/SchemaHelper.scala @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.chat.schema + +import java.util.Collections + +import org.apache.hive.service.rpc.thrift._ + +object SchemaHelper { + + def stringTTypeQualifiers: TTypeQualifiers = { + val ret = new TTypeQualifiers() + val qualifiers = Collections.emptyMap[String, TTypeQualifierValue]() + ret.setQualifiers(qualifiers) + ret + } + + def stringTTypeDesc: TTypeDesc = { + val typeEntry = new TPrimitiveTypeEntry(TTypeId.STRING_TYPE) + typeEntry.setTypeQualifiers(stringTTypeQualifiers) + val tTypeDesc = new TTypeDesc() + tTypeDesc.addToTypes(TTypeEntry.primitiveEntry(typeEntry)) + tTypeDesc + } + + def stringTColumnDesc(fieldName: String, pos: Int): TColumnDesc = { + val tColumnDesc = new TColumnDesc() + tColumnDesc.setColumnName(fieldName) + tColumnDesc.setTypeDesc(stringTTypeDesc) + tColumnDesc.setPosition(pos) + tColumnDesc + } + + def stringTTableSchema(fieldsName: String*): TTableSchema = { + val tTableSchema = new TTableSchema() + fieldsName.zipWithIndex.foreach { case (f, i) => + tTableSchema.addToColumns(stringTColumnDesc(f, i)) + } + tTableSchema + } +} diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/session/ChatSessionImpl.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/session/ChatSessionImpl.scala new file mode 100644 index 00000000000..6ec6d062600 --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/session/ChatSessionImpl.scala @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kyuubi.engine.chat.session + +import org.apache.hive.service.rpc.thrift.{TGetInfoType, TGetInfoValue, TProtocolVersion} + +import org.apache.kyuubi.{KYUUBI_VERSION, KyuubiSQLException} +import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_HANDLE_KEY +import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager} + +class ChatSessionImpl( + protocol: TProtocolVersion, + user: String, + password: String, + ipAddress: String, + conf: Map[String, String], + sessionManager: SessionManager) + extends AbstractSession(protocol, user, password, ipAddress, conf, sessionManager) { + + override val handle: SessionHandle = + conf.get(KYUUBI_SESSION_HANDLE_KEY).map(SessionHandle.fromUUID).getOrElse(SessionHandle()) + + private val chatProvider = sessionManager.asInstanceOf[ChatSessionManager].chatProvider + + override def open(): Unit = { + info(s"Starting to open chat session.") + chatProvider.open(handle.identifier.toString, Some(user)) + super.open() + info(s"The chat session is started.") + } + + override def getInfo(infoType: TGetInfoType): TGetInfoValue = withAcquireRelease() { + infoType match { + case TGetInfoType.CLI_SERVER_NAME | TGetInfoType.CLI_DBMS_NAME => + TGetInfoValue.stringValue("Kyuubi Chat Engine") + case TGetInfoType.CLI_DBMS_VER => + TGetInfoValue.stringValue(KYUUBI_VERSION) + case TGetInfoType.CLI_ODBC_KEYWORDS => TGetInfoValue.stringValue("Unimplemented") + case TGetInfoType.CLI_MAX_COLUMN_NAME_LEN => + TGetInfoValue.lenValue(128) + case TGetInfoType.CLI_MAX_SCHEMA_NAME_LEN => + TGetInfoValue.lenValue(128) + case TGetInfoType.CLI_MAX_TABLE_NAME_LEN => + TGetInfoValue.lenValue(128) + case _ => throw KyuubiSQLException(s"Unrecognized GetInfoType value: $infoType") + } + } + + override def close(): Unit = { + chatProvider.close(handle.identifier.toString) + super.close() + } + +} diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/session/ChatSessionManager.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/session/ChatSessionManager.scala new file mode 100644 index 00000000000..33a9dd45066 --- /dev/null +++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/session/ChatSessionManager.scala @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kyuubi.engine.chat.session + +import org.apache.hive.service.rpc.thrift.TProtocolVersion + +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.KyuubiConf.ENGINE_SHARE_LEVEL +import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_HANDLE_KEY +import org.apache.kyuubi.engine.ShareLevel +import org.apache.kyuubi.engine.chat.ChatEngine +import org.apache.kyuubi.engine.chat.operation.ChatOperationManager +import org.apache.kyuubi.engine.chat.provider.ChatProvider +import org.apache.kyuubi.operation.OperationManager +import org.apache.kyuubi.session.{Session, SessionHandle, SessionManager} + +class ChatSessionManager(name: String) + extends SessionManager(name) { + + def this() = this(classOf[ChatSessionManager].getSimpleName) + + override protected def isServer: Boolean = false + + lazy val chatProvider: ChatProvider = ChatProvider.load(conf) + + override lazy val operationManager: OperationManager = + new ChatOperationManager(conf, chatProvider) + + override def initialize(conf: KyuubiConf): Unit = { + this.conf = conf + super.initialize(conf) + } + + override protected def createSession( + protocol: TProtocolVersion, + user: String, + password: String, + ipAddress: String, + conf: Map[String, String]): Session = { + conf.get(KYUUBI_SESSION_HANDLE_KEY).map(SessionHandle.fromUUID) + .flatMap(getSessionOption).getOrElse { + new ChatSessionImpl(protocol, user, password, ipAddress, conf, this) + } + } + + override def closeSession(sessionHandle: SessionHandle): Unit = { + super.closeSession(sessionHandle) + if (conf.get(ENGINE_SHARE_LEVEL) == ShareLevel.CONNECTION.toString) { + info("Session stopped due to shared level is Connection.") + stopSession() + } + } + + private def stopSession(): Unit = { + ChatEngine.currentEngine.foreach(_.stop()) + } +} diff --git a/externals/kyuubi-chat-engine/src/test/resources/log4j2-test.xml b/externals/kyuubi-chat-engine/src/test/resources/log4j2-test.xml new file mode 100644 index 00000000000..585a12c6f99 --- /dev/null +++ b/externals/kyuubi-chat-engine/src/test/resources/log4j2-test.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/externals/kyuubi-chat-engine/src/test/scala/org/apache/kyuubi/engine/chat/WithChatEngine.scala b/externals/kyuubi-chat-engine/src/test/scala/org/apache/kyuubi/engine/chat/WithChatEngine.scala new file mode 100644 index 00000000000..287fdde2fb5 --- /dev/null +++ b/externals/kyuubi-chat-engine/src/test/scala/org/apache/kyuubi/engine/chat/WithChatEngine.scala @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kyuubi.engine.chat + +import org.apache.kyuubi.KyuubiFunSuite +import org.apache.kyuubi.config.KyuubiConf + +trait WithChatEngine extends KyuubiFunSuite { + + protected var engine: ChatEngine = _ + protected var connectionUrl: String = _ + + protected val kyuubiConf: KyuubiConf = ChatEngine.kyuubiConf + + def withKyuubiConf: Map[String, String] + + override def beforeAll(): Unit = { + super.beforeAll() + startChatEngine() + } + + override def afterAll(): Unit = { + stopChatEngine() + super.afterAll() + } + + def stopChatEngine(): Unit = { + if (engine != null) { + engine.stop() + engine = null + } + } + + def startChatEngine(): Unit = { + withKyuubiConf.foreach { case (k, v) => + System.setProperty(k, v) + kyuubiConf.set(k, v) + } + ChatEngine.startEngine() + engine = ChatEngine.currentEngine.get + connectionUrl = engine.frontendServices.head.connectionUrl + } + + protected def jdbcConnectionUrl: String = s"jdbc:hive2://$connectionUrl/;" + +} diff --git a/externals/kyuubi-chat-engine/src/test/scala/org/apache/kyuubi/engine/chat/operation/ChatOperationSuite.scala b/externals/kyuubi-chat-engine/src/test/scala/org/apache/kyuubi/engine/chat/operation/ChatOperationSuite.scala new file mode 100644 index 00000000000..b14407a267b --- /dev/null +++ b/externals/kyuubi-chat-engine/src/test/scala/org/apache/kyuubi/engine/chat/operation/ChatOperationSuite.scala @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.chat.operation + +import org.apache.kyuubi.config.KyuubiConf._ +import org.apache.kyuubi.engine.chat.WithChatEngine +import org.apache.kyuubi.operation.HiveJDBCTestHelper + +class ChatOperationSuite extends HiveJDBCTestHelper with WithChatEngine { + + override def withKyuubiConf: Map[String, String] = Map( + ENGINE_CHAT_PROVIDER.key -> "echo") + + override protected def jdbcUrl: String = jdbcConnectionUrl + + test("test echo chat provider") { + withJdbcStatement() { stmt => + val result = stmt.executeQuery("Hello, Kyuubi") + assert(result.next()) + val expected = "This is ChatKyuubi, nice to meet you!" + assert(result.getString("reply") === expected) + assert(!result.next()) + } + } +} diff --git a/externals/kyuubi-download/pom.xml b/externals/kyuubi-download/pom.xml index d7f0c601322..b21e3e5a223 100644 --- a/externals/kyuubi-download/pom.xml +++ b/externals/kyuubi-download/pom.xml @@ -21,7 +21,7 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../pom.xml diff --git a/externals/kyuubi-flink-sql-engine/pom.xml b/externals/kyuubi-flink-sql-engine/pom.xml index 4d7167c1cb6..eec5c1cd9e8 100644 --- a/externals/kyuubi-flink-sql-engine/pom.xml +++ b/externals/kyuubi-flink-sql-engine/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../pom.xml - kyuubi-flink-sql-engine_2.12 + kyuubi-flink-sql-engine_${scala.binary.version} jar Kyuubi Project Engine Flink SQL https://kyuubi.apache.org/ @@ -59,55 +59,49 @@ org.apache.flink - flink-streaming-java${flink.module.scala.suffix} + flink-streaming-java provided org.apache.flink - flink-clients${flink.module.scala.suffix} + flink-clients provided org.apache.flink - flink-sql-client${flink.module.scala.suffix} + flink-sql-client provided org.apache.flink - flink-table-common - provided - - - - org.apache.flink - flink-table-api-java + flink-sql-gateway provided org.apache.flink - flink-table-api-java-bridge${flink.module.scala.suffix} + flink-table-common provided org.apache.flink - flink-table-planner_${scala.binary.version} + flink-table-api-java provided org.apache.flink - flink-table-runtime${flink.module.scala.suffix} + flink-table-api-java-bridge provided org.apache.flink - flink-sql-parser + flink-table-runtime provided @@ -126,9 +120,47 @@ ${project.version} test + + + org.apache.kyuubi + kyuubi-zookeeper_${scala.binary.version} + ${project.version} + test + + org.apache.flink - flink-test-utils${flink.module.scala.suffix} + flink-test-utils + test + + + + org.apache.hadoop + hadoop-client-minicluster + test + + + + org.bouncycastle + bcprov-jdk15on + test + + + + org.bouncycastle + bcpkix-jdk15on + test + + + + jakarta.activation + jakarta.activation-api + test + + + + jakarta.xml.bind + jakarta.xml.bind-api test @@ -142,20 +174,15 @@ false - org.apache.kyuubi:kyuubi-common_${scala.binary.version} - org.apache.kyuubi:kyuubi-ha_${scala.binary.version} com.fasterxml.jackson.core:* com.fasterxml.jackson.module:* com.google.guava:failureaccess com.google.guava:guava commons-codec:commons-codec org.apache.commons:commons-lang3 - org.apache.curator:curator-client - org.apache.curator:curator-framework - org.apache.curator:curator-recipes org.apache.hive:hive-service-rpc org.apache.thrift:* - org.apache.zookeeper:* + org.apache.kyuubi:* @@ -184,13 +211,6 @@ com.fasterxml.jackson.** - - org.apache.curator - ${kyuubi.shade.packageName}.org.apache.curator - - org.apache.curator.** - - com.google.common ${kyuubi.shade.packageName}.com.google.common @@ -234,20 +254,6 @@ org.apache.thrift.** - - org.apache.jute - ${kyuubi.shade.packageName}.org.apache.jute - - org.apache.jute.** - - - - org.apache.zookeeper - ${kyuubi.shade.packageName}.org.apache.zookeeper - - org.apache.zookeeper.** - - diff --git a/externals/kyuubi-flink-sql-engine/src/main/java/org/apache/flink/client/deployment/application/executors/EmbeddedExecutorFactory.java b/externals/kyuubi-flink-sql-engine/src/main/java/org/apache/flink/client/deployment/application/executors/EmbeddedExecutorFactory.java new file mode 100644 index 00000000000..558db74a372 --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/main/java/org/apache/flink/client/deployment/application/executors/EmbeddedExecutorFactory.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.client.deployment.application.executors; + +import static org.apache.flink.util.Preconditions.checkNotNull; +import static org.apache.flink.util.Preconditions.checkState; + +import java.util.Collection; +import java.util.concurrent.ConcurrentLinkedQueue; +import org.apache.flink.annotation.Internal; +import org.apache.flink.api.common.JobID; +import org.apache.flink.api.common.time.Time; +import org.apache.flink.client.cli.ClientOptions; +import org.apache.flink.client.deployment.application.EmbeddedJobClient; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.configuration.DeploymentOptions; +import org.apache.flink.core.execution.PipelineExecutor; +import org.apache.flink.core.execution.PipelineExecutorFactory; +import org.apache.flink.runtime.dispatcher.DispatcherGateway; +import org.apache.flink.util.concurrent.ScheduledExecutor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Copied from Apache Flink to exposed the DispatcherGateway for Kyuubi statements. */ +@Internal +public class EmbeddedExecutorFactory implements PipelineExecutorFactory { + + private static Collection bootstrapJobIds; + + private static Collection submittedJobIds; + + private static DispatcherGateway dispatcherGateway; + + private static ScheduledExecutor retryExecutor; + + private static final Object bootstrapLock = new Object(); + + private static final long BOOTSTRAP_WAIT_INTERVAL = 10_000L; + + private static final int BOOTSTRAP_WAIT_RETRIES = 3; + + private static final Logger LOGGER = LoggerFactory.getLogger(EmbeddedExecutorFactory.class); + + public EmbeddedExecutorFactory() { + LOGGER.debug( + "{} loaded in thread {} with classloader {}.", + this.getClass().getCanonicalName(), + Thread.currentThread().getName(), + this.getClass().getClassLoader().toString()); + } + + /** + * Creates an {@link EmbeddedExecutorFactory}. + * + * @param submittedJobIds a list that is going to be filled with the job ids of the new jobs that + * will be submitted. This is essentially used to return the submitted job ids to the caller. + * @param dispatcherGateway the dispatcher of the cluster which is going to be used to submit + * jobs. + */ + public EmbeddedExecutorFactory( + final Collection submittedJobIds, + final DispatcherGateway dispatcherGateway, + final ScheduledExecutor retryExecutor) { + // there should be only one instance of EmbeddedExecutorFactory + LOGGER.debug( + "{} initiated in thread {} with classloader {}.", + this.getClass().getCanonicalName(), + Thread.currentThread().getName(), + this.getClass().getClassLoader().toString()); + checkState(EmbeddedExecutorFactory.submittedJobIds == null); + checkState(EmbeddedExecutorFactory.dispatcherGateway == null); + checkState(EmbeddedExecutorFactory.retryExecutor == null); + synchronized (bootstrapLock) { + // submittedJobIds would be always 1, because we create a new list to avoid concurrent access + // issues + LOGGER.debug("Bootstrapping EmbeddedExecutorFactory."); + EmbeddedExecutorFactory.submittedJobIds = + new ConcurrentLinkedQueue<>(checkNotNull(submittedJobIds)); + EmbeddedExecutorFactory.bootstrapJobIds = submittedJobIds; + EmbeddedExecutorFactory.dispatcherGateway = checkNotNull(dispatcherGateway); + EmbeddedExecutorFactory.retryExecutor = checkNotNull(retryExecutor); + bootstrapLock.notifyAll(); + } + } + + @Override + public String getName() { + return EmbeddedExecutor.NAME; + } + + @Override + public boolean isCompatibleWith(final Configuration configuration) { + // override Flink's implementation to allow usage in Kyuubi + LOGGER.debug("Matching execution target: {}", configuration.get(DeploymentOptions.TARGET)); + return configuration.get(DeploymentOptions.TARGET).equalsIgnoreCase("yarn-application") + && configuration.toMap().getOrDefault("yarn.tags", "").toLowerCase().contains("kyuubi"); + } + + @Override + public PipelineExecutor getExecutor(final Configuration configuration) { + checkNotNull(configuration); + Collection executorJobIDs; + synchronized (bootstrapLock) { + // wait in a loop to avoid spurious wakeups + int retry = 0; + while (bootstrapJobIds == null && retry < BOOTSTRAP_WAIT_RETRIES) { + try { + LOGGER.debug("Waiting for bootstrap to complete. Wait retries: {}.", retry); + bootstrapLock.wait(BOOTSTRAP_WAIT_INTERVAL); + retry++; + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted while waiting for bootstrap.", e); + } + } + if (bootstrapJobIds == null) { + throw new RuntimeException( + "Bootstrap of Flink SQL engine timed out after " + + BOOTSTRAP_WAIT_INTERVAL * BOOTSTRAP_WAIT_RETRIES + + " ms. Please check the engine log for more details."); + } + } + if (bootstrapJobIds.size() > 0) { + LOGGER.info("Submitting new Kyuubi job. Job submitted: {}.", submittedJobIds.size()); + executorJobIDs = submittedJobIds; + } else { + LOGGER.info("Bootstrapping Flink SQL engine with the initial SQL."); + executorJobIDs = bootstrapJobIds; + } + return new EmbeddedExecutor( + executorJobIDs, + dispatcherGateway, + (jobId, userCodeClassloader) -> { + final Time timeout = + Time.milliseconds(configuration.get(ClientOptions.CLIENT_TIMEOUT).toMillis()); + return new EmbeddedJobClient( + jobId, dispatcherGateway, retryExecutor, timeout, userCodeClassloader); + }); + } +} diff --git a/externals/kyuubi-flink-sql-engine/src/main/java/org/apache/kyuubi/engine/flink/result/Constants.java b/externals/kyuubi-flink-sql-engine/src/main/java/org/apache/kyuubi/engine/flink/result/Constants.java deleted file mode 100644 index b683eb76afa..00000000000 --- a/externals/kyuubi-flink-sql-engine/src/main/java/org/apache/kyuubi/engine/flink/result/Constants.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kyuubi.engine.flink.result; - -/** Constant column names. */ -public class Constants { - - public static final String TABLE_TYPE = "TABLE"; - public static final String VIEW_TYPE = "VIEW"; - - public static final String[] SUPPORTED_TABLE_TYPES = new String[] {TABLE_TYPE, VIEW_TYPE}; -} diff --git a/externals/kyuubi-flink-sql-engine/src/main/java/org/apache/kyuubi/engine/flink/result/ResultSet.java b/externals/kyuubi-flink-sql-engine/src/main/java/org/apache/kyuubi/engine/flink/result/ResultSet.java deleted file mode 100644 index 66f03a159b9..00000000000 --- a/externals/kyuubi-flink-sql-engine/src/main/java/org/apache/kyuubi/engine/flink/result/ResultSet.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kyuubi.engine.flink.result; - -import com.google.common.collect.Iterators; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Objects; -import javax.annotation.Nullable; -import org.apache.flink.table.api.ResultKind; -import org.apache.flink.table.api.TableResult; -import org.apache.flink.table.catalog.Column; -import org.apache.flink.table.catalog.ResolvedSchema; -import org.apache.flink.types.Row; -import org.apache.flink.util.Preconditions; -import org.apache.kyuubi.operation.ArrayFetchIterator; -import org.apache.kyuubi.operation.FetchIterator; - -/** - * A set of one statement execution result containing result kind, columns, rows of data and change - * flags for streaming mode. - */ -public class ResultSet { - - private final ResultKind resultKind; - private final List columns; - private final FetchIterator data; - - // null in batch mode - // - // list of boolean in streaming mode, - // true if the corresponding row is an append row, false if its a retract row - private final List changeFlags; - - private ResultSet( - ResultKind resultKind, - List columns, - FetchIterator data, - @Nullable List changeFlags) { - this.resultKind = Preconditions.checkNotNull(resultKind, "resultKind must not be null"); - this.columns = Preconditions.checkNotNull(columns, "columns must not be null"); - this.data = Preconditions.checkNotNull(data, "data must not be null"); - this.changeFlags = changeFlags; - if (changeFlags != null) { - Preconditions.checkArgument( - Iterators.size((Iterator) data) == changeFlags.size(), - "the size of data and the size of changeFlags should be equal"); - } - } - - public List getColumns() { - return columns; - } - - public FetchIterator getData() { - return data; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ResultSet resultSet = (ResultSet) o; - return resultKind.equals(resultSet.resultKind) - && columns.equals(resultSet.columns) - && data.equals(resultSet.data) - && Objects.equals(changeFlags, resultSet.changeFlags); - } - - @Override - public int hashCode() { - return Objects.hash(resultKind, columns, data, changeFlags); - } - - @Override - public String toString() { - return "ResultSet{" - + "resultKind=" - + resultKind - + ", columns=" - + columns - + ", data=" - + data - + ", changeFlags=" - + changeFlags - + '}'; - } - - public static ResultSet fromTableResult(TableResult tableResult) { - ResolvedSchema schema = tableResult.getResolvedSchema(); - // collect all rows from table result as list - // this is ok as TableResult contains limited rows - List rows = new ArrayList<>(); - tableResult.collect().forEachRemaining(rows::add); - return builder() - .resultKind(tableResult.getResultKind()) - .columns(schema.getColumns()) - .data(rows.toArray(new Row[0])) - .build(); - } - - public static Builder builder() { - return new Builder(); - } - - /** Builder for {@link ResultSet}. */ - public static class Builder { - private ResultKind resultKind = null; - private List columns = null; - private FetchIterator data = null; - private List changeFlags = null; - - private Builder() {} - - /** Set {@link ResultKind}. */ - public Builder resultKind(ResultKind resultKind) { - this.resultKind = resultKind; - return this; - } - - /** Set columns. */ - public Builder columns(Column... columns) { - this.columns = Arrays.asList(columns); - return this; - } - - /** Set columns. */ - public Builder columns(List columns) { - this.columns = columns; - return this; - } - - /** Set data. */ - public Builder data(FetchIterator data) { - this.data = data; - return this; - } - - /** Set data. */ - public Builder data(Row[] data) { - this.data = new ArrayFetchIterator<>(data); - return this; - } - - /** Set change flags. */ - public Builder changeFlags(List changeFlags) { - this.changeFlags = changeFlags; - return this; - } - - /** Returns a {@link ResultSet} instance. */ - public ResultSet build() { - return new ResultSet(resultKind, columns, data, changeFlags); - } - } -} diff --git a/externals/kyuubi-flink-sql-engine/src/main/resources/META-INF/services/org.apache.flink.core.execution.PipelineExecutorFactory b/externals/kyuubi-flink-sql-engine/src/main/resources/META-INF/services/org.apache.flink.core.execution.PipelineExecutorFactory new file mode 100644 index 00000000000..c394c07a7ba --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/main/resources/META-INF/services/org.apache.flink.core.execution.PipelineExecutorFactory @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +org.apache.flink.client.deployment.application.executors.EmbeddedExecutorFactory \ No newline at end of file diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkEngineUtils.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkEngineUtils.scala index e271944a7c0..7d42aae8c87 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkEngineUtils.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkEngineUtils.scala @@ -18,33 +18,43 @@ package org.apache.kyuubi.engine.flink import java.io.File +import java.lang.{Boolean => JBoolean} import java.net.URL +import java.util.{ArrayList => JArrayList, Collections => JCollections, List => JList} import scala.collection.JavaConverters._ +import scala.collection.convert.ImplicitConversions._ -import org.apache.commons.cli.{CommandLine, DefaultParser, Option, Options, ParseException} +import org.apache.commons.cli.{CommandLine, DefaultParser, Options} +import org.apache.flink.api.common.JobID +import org.apache.flink.client.cli.{CustomCommandLine, DefaultCLI, GenericCLI} +import org.apache.flink.configuration.Configuration import org.apache.flink.core.fs.Path import org.apache.flink.runtime.util.EnvironmentInformation import org.apache.flink.table.client.SqlClientException -import org.apache.flink.table.client.cli.CliOptions +import org.apache.flink.table.client.cli.CliOptionsParser import org.apache.flink.table.client.cli.CliOptionsParser._ -import org.apache.flink.table.client.gateway.context.SessionContext -import org.apache.flink.table.client.gateway.local.LocalExecutor +import org.apache.flink.table.gateway.service.context.{DefaultContext, SessionContext} +import org.apache.flink.table.gateway.service.result.ResultFetcher +import org.apache.flink.table.gateway.service.session.Session +import org.apache.flink.util.JarUtils -import org.apache.kyuubi.Logging -import org.apache.kyuubi.engine.SemanticVersion +import org.apache.kyuubi.{KyuubiException, Logging} +import org.apache.kyuubi.util.SemanticVersion +import org.apache.kyuubi.util.reflect._ +import org.apache.kyuubi.util.reflect.ReflectUtils._ object FlinkEngineUtils extends Logging { - val MODE_EMBEDDED = "embedded" - val EMBEDDED_MODE_CLIENT_OPTIONS: Options = getEmbeddedModeClientOptions(new Options); + val EMBEDDED_MODE_CLIENT_OPTIONS: Options = getEmbeddedModeClientOptions(new Options) - val SUPPORTED_FLINK_VERSIONS: Array[SemanticVersion] = - Array("1.14", "1.15", "1.16").map(SemanticVersion.apply) + private def SUPPORTED_FLINK_VERSIONS = Set("1.16", "1.17").map(SemanticVersion.apply) + + val FLINK_RUNTIME_VERSION: SemanticVersion = SemanticVersion(EnvironmentInformation.getVersion) def checkFlinkVersion(): Unit = { val flinkVersion = EnvironmentInformation.getVersion - if (SUPPORTED_FLINK_VERSIONS.contains(SemanticVersion(flinkVersion))) { + if (SUPPORTED_FLINK_VERSIONS.contains(FLINK_RUNTIME_VERSION)) { info(s"The current Flink version is $flinkVersion") } else { throw new UnsupportedOperationException( @@ -53,56 +63,90 @@ object FlinkEngineUtils extends Logging { } } - def isFlinkVersionAtMost(targetVersionString: String): Boolean = - SemanticVersion(EnvironmentInformation.getVersion).isVersionAtMost(targetVersionString) - - def isFlinkVersionAtLeast(targetVersionString: String): Boolean = - SemanticVersion(EnvironmentInformation.getVersion).isVersionAtLeast(targetVersionString) - - def isFlinkVersionEqualTo(targetVersionString: String): Boolean = - SemanticVersion(EnvironmentInformation.getVersion).isVersionEqualTo(targetVersionString) - - def parseCliOptions(args: Array[String]): CliOptions = { - val (mode, modeArgs) = - if (args.isEmpty || args(0).startsWith("-")) (MODE_EMBEDDED, args) - else (args(0), args.drop(1)) - val options = parseEmbeddedModeClient(modeArgs) - if (mode == MODE_EMBEDDED) { - if (options.isPrintHelp) { - printHelpEmbeddedModeClient() + /** + * Copied and modified from [[org.apache.flink.table.client.cli.CliOptionsParser]] + * to avoid loading flink-python classes which we doesn't support yet. + */ + private def discoverDependencies( + jars: JList[URL], + libraries: JList[URL]): JList[URL] = { + val dependencies: JList[URL] = new JArrayList[URL] + try { // find jar files + for (url <- jars) { + JarUtils.checkJarFile(url) + dependencies.add(url) } - options - } else { - throw new SqlClientException("Other mode is not supported yet.") + // find jar files in library directories + libraries.foreach { libUrl => + val dir: File = new File(libUrl.toURI) + if (!dir.isDirectory) throw new SqlClientException(s"Directory expected: $dir") + if (!dir.canRead) throw new SqlClientException(s"Directory cannot be read: $dir") + val files: Array[File] = dir.listFiles + if (files == null) throw new SqlClientException(s"Directory cannot be read: $dir") + files.filter { f => f.isFile && f.getAbsolutePath.toLowerCase.endsWith(".jar") } + .foreach { f => + val url: URL = f.toURI.toURL + JarUtils.checkJarFile(url) + dependencies.add(url) + } + } + } catch { + case e: Exception => + throw new SqlClientException("Could not load all required JAR files.", e) } + dependencies } - def getSessionContext(localExecutor: LocalExecutor, sessionId: String): SessionContext = { - val method = classOf[LocalExecutor].getDeclaredMethod("getSessionContext", classOf[String]) - method.setAccessible(true) - method.invoke(localExecutor, sessionId).asInstanceOf[SessionContext] + def getDefaultContext( + args: Array[String], + flinkConf: Configuration, + flinkConfDir: String): DefaultContext = { + val parser = new DefaultParser + val line = parser.parse(EMBEDDED_MODE_CLIENT_OPTIONS, args, true) + val jars: JList[URL] = Option(checkUrls(line, CliOptionsParser.OPTION_JAR)) + .getOrElse(JCollections.emptyList()) + val libDirs: JList[URL] = Option(checkUrls(line, CliOptionsParser.OPTION_LIBRARY)) + .getOrElse(JCollections.emptyList()) + val dependencies: JList[URL] = discoverDependencies(jars, libDirs) + if (FLINK_RUNTIME_VERSION === "1.16") { + val commandLines: JList[CustomCommandLine] = + Seq(new GenericCLI(flinkConf, flinkConfDir), new DefaultCLI).asJava + DynConstructors.builder() + .impl( + classOf[DefaultContext], + classOf[Configuration], + classOf[JList[CustomCommandLine]]) + .build() + .newInstance(flinkConf, commandLines) + .asInstanceOf[DefaultContext] + } else if (FLINK_RUNTIME_VERSION === "1.17") { + invokeAs[DefaultContext]( + classOf[DefaultContext], + "load", + (classOf[Configuration], flinkConf), + (classOf[JList[URL]], dependencies), + (classOf[Boolean], JBoolean.TRUE), + (classOf[Boolean], JBoolean.FALSE)) + } else { + throw new KyuubiException( + s"Flink version ${EnvironmentInformation.getVersion} are not supported currently.") + } } - def parseEmbeddedModeClient(args: Array[String]): CliOptions = + def getSessionContext(session: Session): SessionContext = getField(session, "sessionContext") + + def getResultJobId(resultFetch: ResultFetcher): Option[JobID] = { + if (FLINK_RUNTIME_VERSION <= "1.16") { + return None + } try { - val parser = new DefaultParser - val line = parser.parse(EMBEDDED_MODE_CLIENT_OPTIONS, args, true) - val jarUrls = checkUrls(line, OPTION_JAR) - val libraryUrls = checkUrls(line, OPTION_LIBRARY) - new CliOptions( - line.hasOption(OPTION_HELP.getOpt), - checkSessionId(line), - checkUrl(line, OPTION_INIT_FILE), - checkUrl(line, OPTION_FILE), - if (jarUrls != null && jarUrls.nonEmpty) jarUrls.asJava else null, - if (libraryUrls != null && libraryUrls.nonEmpty) libraryUrls.asJava else null, - line.getOptionValue(OPTION_UPDATE.getOpt), - line.getOptionValue(OPTION_HISTORY.getOpt), - null) + Option(getField[JobID](resultFetch, "jobID")) } catch { - case e: ParseException => - throw new SqlClientException(e.getMessage) + case _: NullPointerException => None + case e: Throwable => + throw new IllegalStateException("Unexpected error occurred while fetching query ID", e) } + } def checkSessionId(line: CommandLine): String = { val sessionId = line.getOptionValue(OPTION_SESSION.getOpt) @@ -111,13 +155,13 @@ object FlinkEngineUtils extends Logging { } else sessionId } - def checkUrl(line: CommandLine, option: Option): URL = { - val urls: List[URL] = checkUrls(line, option) + def checkUrl(line: CommandLine, option: org.apache.commons.cli.Option): URL = { + val urls: JList[URL] = checkUrls(line, option) if (urls != null && urls.nonEmpty) urls.head else null } - def checkUrls(line: CommandLine, option: Option): List[URL] = { + def checkUrls(line: CommandLine, option: org.apache.commons.cli.Option): JList[URL] = { if (line.hasOption(option.getOpt)) { line.getOptionValues(option.getOpt).distinct.map((url: String) => { checkFilePath(url) diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLBackendService.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLBackendService.scala index d049e3c80bf..9802f195546 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLBackendService.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLBackendService.scala @@ -17,7 +17,7 @@ package org.apache.kyuubi.engine.flink -import org.apache.flink.table.client.gateway.context.DefaultContext +import org.apache.flink.table.gateway.service.context.DefaultContext import org.apache.kyuubi.engine.flink.session.FlinkSQLSessionManager import org.apache.kyuubi.service.AbstractBackendService diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLEngine.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLEngine.scala index 06fdc65ae61..8838799bc24 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLEngine.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLEngine.scala @@ -18,23 +18,21 @@ package org.apache.kyuubi.engine.flink import java.io.File -import java.net.URL import java.nio.file.Paths -import java.time.Instant +import java.time.Duration import java.util.concurrent.CountDownLatch import scala.collection.JavaConverters._ -import scala.collection.mutable.ListBuffer -import org.apache.flink.client.cli.{DefaultCLI, GenericCLI} -import org.apache.flink.configuration.{Configuration, DeploymentOptions, GlobalConfiguration} -import org.apache.flink.table.client.SqlClientException -import org.apache.flink.table.client.gateway.context.DefaultContext -import org.apache.flink.util.JarUtils +import org.apache.flink.configuration.{Configuration, DeploymentOptions, GlobalConfiguration, PipelineOptions} +import org.apache.flink.table.api.TableEnvironment +import org.apache.flink.table.gateway.api.config.SqlGatewayServiceConfigOptions +import org.apache.flink.table.gateway.service.context.DefaultContext -import org.apache.kyuubi.{KyuubiSQLException, Logging, Utils} +import org.apache.kyuubi.{Logging, Utils} import org.apache.kyuubi.Utils.{addShutdownHook, currentUser, FLINK_ENGINE_SHUTDOWN_PRIORITY} import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.KyuubiReservedKeys.{KYUUBI_ENGINE_NAME, KYUUBI_SESSION_USER_KEY} import org.apache.kyuubi.engine.flink.FlinkSQLEngine.{countDownLatch, currentEngine} import org.apache.kyuubi.service.Serverable import org.apache.kyuubi.util.SignalRegister @@ -71,9 +69,12 @@ object FlinkSQLEngine extends Logging { def main(args: Array[String]): Unit = { SignalRegister.registerLogger(logger) + info(s"Flink SQL engine classpath: ${System.getProperty("java.class.path")}") + FlinkEngineUtils.checkFlinkVersion() try { + kyuubiConf.loadFileDefaults() Utils.fromCommandLineArgs(args, kyuubiConf) val flinkConfDir = sys.env.getOrElse( "FLINK_CONF_DIR", { @@ -93,51 +94,33 @@ object FlinkSQLEngine extends Logging { flinkConf.addAll(Configuration.fromMap(flinkConfFromArgs.asJava)) val executionTarget = flinkConf.getString(DeploymentOptions.TARGET) - // set cluster name for per-job and application mode - executionTarget match { - case "yarn-per-job" | "yarn-application" => - if (!flinkConf.containsKey("yarn.application.name")) { - val appName = s"kyuubi_${user}_flink_${Instant.now}" - flinkConf.setString("yarn.application.name", appName) - } - case "kubernetes-application" => - if (!flinkConf.containsKey("kubernetes.cluster-id")) { - val appName = s"kyuubi-${user}-flink-${Instant.now}" - flinkConf.setString("kubernetes.cluster-id", appName) - } - case other => - debug(s"Skip generating app name for execution target $other") - } - - val cliOptions = FlinkEngineUtils.parseCliOptions(args) - val jars = if (cliOptions.getJars != null) cliOptions.getJars.asScala else List.empty - val libDirs = - if (cliOptions.getLibraryDirs != null) cliOptions.getLibraryDirs.asScala else List.empty - val dependencies = discoverDependencies(jars, libDirs) - val engineContext = new DefaultContext( - dependencies.asJava, - flinkConf, - Seq(new GenericCLI(flinkConf, flinkConfDir), new DefaultCLI).asJava) + setDeploymentConf(executionTarget, flinkConf) kyuubiConf.setIfMissing(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0) + val engineContext = FlinkEngineUtils.getDefaultContext(args, flinkConf, flinkConfDir) startEngine(engineContext) - info("started engine...") + info("Flink engine started") + + if ("yarn-application".equalsIgnoreCase(executionTarget)) { + bootstrapFlinkApplicationExecutor() + } // blocking main thread countDownLatch.await() } catch { case t: Throwable if currentEngine.isDefined => + error("Fatal error occurs, thus stopping the engines", t) currentEngine.foreach { engine => - error(t) engine.stop() } case t: Throwable => - error("Create FlinkSQL Engine Failed", t) + error("Failed to create FlinkSQL Engine", t) } } def startEngine(engineContext: DefaultContext): Unit = { + debug(s"Starting Flink SQL engine with default configuration: ${engineContext.getFlinkConfig}") currentEngine = Some(new FlinkSQLEngine(engineContext)) currentEngine.foreach { engine => engine.initialize(kyuubiConf) @@ -146,36 +129,39 @@ object FlinkSQLEngine extends Logging { } } - private def discoverDependencies( - jars: Seq[URL], - libraries: Seq[URL]): List[URL] = { - try { - var dependencies: ListBuffer[URL] = ListBuffer() - // find jar files - jars.foreach { url => - JarUtils.checkJarFile(url) - dependencies = dependencies += url - } - // find jar files in library directories - libraries.foreach { libUrl => - val dir: File = new File(libUrl.toURI) - if (!dir.isDirectory) throw new SqlClientException("Directory expected: " + dir) - else if (!dir.canRead) throw new SqlClientException("Directory cannot be read: " + dir) - val files: Array[File] = dir.listFiles - if (files == null) throw new SqlClientException("Directory cannot be read: " + dir) - files.foreach { f => - // only consider jars - if (f.isFile && f.getAbsolutePath.toLowerCase.endsWith(".jar")) { - val url: URL = f.toURI.toURL - JarUtils.checkJarFile(url) - dependencies = dependencies += url - } + private def bootstrapFlinkApplicationExecutor() = { + // trigger an execution to initiate EmbeddedExecutor with the default flink conf + val flinkConf = new Configuration() + flinkConf.set(PipelineOptions.NAME, "kyuubi-bootstrap-sql") + debug(s"Running bootstrap Flink SQL in application mode with flink conf: $flinkConf.") + val tableEnv = TableEnvironment.create(flinkConf) + val res = tableEnv.executeSql("select 'kyuubi'") + res.await() + info("Bootstrap Flink SQL finished.") + } + + private def setDeploymentConf(executionTarget: String, flinkConf: Configuration): Unit = { + // forward kyuubi engine variables to flink configuration + kyuubiConf.getOption("flink.app.name") + .foreach(flinkConf.setString(KYUUBI_ENGINE_NAME, _)) + + kyuubiConf.getOption(KYUUBI_SESSION_USER_KEY) + .foreach(flinkConf.setString(KYUUBI_SESSION_USER_KEY, _)) + + // force disable Flink's session timeout + flinkConf.set( + SqlGatewayServiceConfigOptions.SQL_GATEWAY_SESSION_IDLE_TIMEOUT, + Duration.ofMillis(0)) + + executionTarget match { + case "yarn-per-job" | "yarn-application" => + if (flinkConf.containsKey("high-availability.cluster-id")) { + flinkConf.setString( + "yarn.application.id", + flinkConf.toMap.get("high-availability.cluster-id")) } - } - dependencies.toList - } catch { - case e: Exception => - throw KyuubiSQLException(s"Could not load all required JAR files.", e) + case other => + debug(s"Skip setting deployment conf for execution target $other") } } } diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/ExecuteStatement.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/ExecuteStatement.scala index 93d013556e1..0e0c476e2d4 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/ExecuteStatement.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/ExecuteStatement.scala @@ -17,39 +17,25 @@ package org.apache.kyuubi.engine.flink.operation -import java.time.LocalDate -import java.util - -import scala.collection.JavaConverters._ -import scala.collection.mutable.ArrayBuffer +import scala.concurrent.duration.Duration import org.apache.flink.api.common.JobID -import org.apache.flink.table.api.{ResultKind, TableResult} -import org.apache.flink.table.client.gateway.TypedResult -import org.apache.flink.table.data.{GenericArrayData, GenericMapData, RowData} -import org.apache.flink.table.data.binary.{BinaryArrayData, BinaryMapData} -import org.apache.flink.table.operations.{Operation, QueryOperation} -import org.apache.flink.table.operations.command._ -import org.apache.flink.table.types.DataType -import org.apache.flink.table.types.logical._ -import org.apache.flink.types.Row +import org.apache.flink.table.gateway.api.operation.OperationHandle import org.apache.kyuubi.Logging -import org.apache.kyuubi.engine.flink.FlinkEngineUtils._ -import org.apache.kyuubi.engine.flink.result.ResultSet -import org.apache.kyuubi.engine.flink.schema.RowSet.toHiveString +import org.apache.kyuubi.engine.flink.FlinkEngineUtils +import org.apache.kyuubi.engine.flink.result.ResultSetUtil import org.apache.kyuubi.operation.OperationState import org.apache.kyuubi.operation.log.OperationLog -import org.apache.kyuubi.reflection.DynMethods import org.apache.kyuubi.session.Session -import org.apache.kyuubi.util.RowSetUtils class ExecuteStatement( session: Session, override val statement: String, override val shouldRunAsync: Boolean, queryTimeout: Long, - resultMaxRows: Int) + resultMaxRows: Int, + resultFetchTimeout: Duration) extends FlinkOperation(session) with Logging { private val operationLog: OperationLog = @@ -65,10 +51,6 @@ class ExecuteStatement( setHasResultSet(true) } - override protected def afterRun(): Unit = { - OperationLog.removeCurrentOperationLog() - } - override protected def runInternal(): Unit = { addTimeoutMonitor(queryTimeout) executeStatement() @@ -77,21 +59,11 @@ class ExecuteStatement( private def executeStatement(): Unit = { try { setState(OperationState.RUNNING) - val operation = executor.parseStatement(sessionId, statement) - operation match { - case queryOperation: QueryOperation => runQueryOperation(queryOperation) - case setOperation: SetOperation => - resultSet = OperationUtils.runSetOperation(setOperation, executor, sessionId) - case resetOperation: ResetOperation => - resultSet = OperationUtils.runResetOperation(resetOperation, executor, sessionId) - case addJarOperation: AddJarOperation if isFlinkVersionAtMost("1.15") => - resultSet = OperationUtils.runAddJarOperation(addJarOperation, executor, sessionId) - case removeJarOperation: RemoveJarOperation => - resultSet = OperationUtils.runRemoveJarOperation(removeJarOperation, executor, sessionId) - case showJarsOperation: ShowJarsOperation if isFlinkVersionAtMost("1.15") => - resultSet = OperationUtils.runShowJarOperation(showJarsOperation, executor, sessionId) - case operation: Operation => runOperation(operation) - } + val resultFetcher = executor.executeStatement( + new OperationHandle(getHandle.identifier), + statement) + jobId = FlinkEngineUtils.getResultJobId(resultFetcher) + resultSet = ResultSetUtil.fromResultFetcher(resultFetcher, resultMaxRows, resultFetchTimeout) setState(OperationState.FINISHED) } catch { onError(cancel = true) @@ -99,168 +71,4 @@ class ExecuteStatement( shutdownTimeoutMonitor() } } - - private def runQueryOperation(operation: QueryOperation): Unit = { - var resultId: String = null - try { - val resultDescriptor = executor.executeQuery(sessionId, operation) - val dataTypes = resultDescriptor.getResultSchema.getColumnDataTypes.asScala.toList - - resultId = resultDescriptor.getResultId - - val rows = new ArrayBuffer[Row]() - var loop = true - - while (loop) { - Thread.sleep(50) // slow the processing down - - val pageSize = Math.min(500, resultMaxRows) - val result = executor.snapshotResult(sessionId, resultId, pageSize) - result.getType match { - case TypedResult.ResultType.PAYLOAD => - (1 to result.getPayload).foreach { page => - if (rows.size < resultMaxRows) { - // FLINK-24461 retrieveResultPage method changes the return type from Row to RowData - val retrieveResultPage = DynMethods.builder("retrieveResultPage") - .impl(executor.getClass, classOf[String], classOf[Int]) - .build(executor) - val _page = Integer.valueOf(page) - if (isFlinkVersionEqualTo("1.14")) { - val result = retrieveResultPage.invoke[util.List[Row]](resultId, _page) - rows ++= result.asScala - } else if (isFlinkVersionAtLeast("1.15")) { - val result = retrieveResultPage.invoke[util.List[RowData]](resultId, _page) - rows ++= result.asScala.map(r => convertToRow(r, dataTypes)) - } - } else { - loop = false - } - } - case TypedResult.ResultType.EOS => loop = false - case TypedResult.ResultType.EMPTY => - } - } - - resultSet = ResultSet.builder - .resultKind(ResultKind.SUCCESS_WITH_CONTENT) - .columns(resultDescriptor.getResultSchema.getColumns) - .data(rows.slice(0, resultMaxRows).toArray[Row]) - .build - } finally { - if (resultId != null) { - cleanupQueryResult(resultId) - } - } - } - - private def runOperation(operation: Operation): Unit = { - // FLINK-24461 executeOperation method changes the return type - // from TableResult to TableResultInternal - val executeOperation = DynMethods.builder("executeOperation") - .impl(executor.getClass, classOf[String], classOf[Operation]) - .build(executor) - val result = executeOperation.invoke[TableResult](sessionId, operation) - jobId = result.getJobClient.asScala.map(_.getJobID) - result.await() - resultSet = ResultSet.fromTableResult(result) - } - - private def cleanupQueryResult(resultId: String): Unit = { - try { - executor.cancelQuery(sessionId, resultId) - } catch { - case t: Throwable => - warn(s"Failed to clean result set $resultId in session $sessionId", t) - } - } - - private[this] def convertToRow(r: RowData, dataTypes: List[DataType]): Row = { - val row = Row.withPositions(r.getRowKind, r.getArity) - for (i <- 0 until r.getArity) { - val dataType = dataTypes(i) - dataType.getLogicalType match { - case arrayType: ArrayType => - val arrayData = r.getArray(i) - if (arrayData == null) { - row.setField(i, null) - } - arrayData match { - case d: GenericArrayData => - row.setField(i, d.toObjectArray) - case d: BinaryArrayData => - row.setField(i, d.toObjectArray(arrayType.getElementType)) - case _ => - } - case _: BinaryType => - row.setField(i, r.getBinary(i)) - case _: BigIntType => - row.setField(i, r.getLong(i)) - case _: BooleanType => - row.setField(i, r.getBoolean(i)) - case _: VarCharType | _: CharType => - row.setField(i, r.getString(i)) - case t: DecimalType => - row.setField(i, r.getDecimal(i, t.getPrecision, t.getScale).toBigDecimal) - case _: DateType => - val date = RowSetUtils.formatLocalDate(LocalDate.ofEpochDay(r.getInt(i))) - row.setField(i, date) - case t: TimestampType => - val ts = RowSetUtils - .formatLocalDateTime(r.getTimestamp(i, t.getPrecision) - .toLocalDateTime) - row.setField(i, ts) - case _: TinyIntType => - row.setField(i, r.getByte(i)) - case _: SmallIntType => - row.setField(i, r.getShort(i)) - case _: IntType => - row.setField(i, r.getInt(i)) - case _: FloatType => - row.setField(i, r.getFloat(i)) - case mapType: MapType => - val mapData = r.getMap(i) - if (mapData != null && mapData.size > 0) { - val keyType = mapType.getKeyType - val valueType = mapType.getValueType - mapData match { - case d: BinaryMapData => - val kvArray = toArray(keyType, valueType, d) - val map: util.Map[Any, Any] = new util.HashMap[Any, Any] - for (i <- kvArray._1.indices) { - val value: Any = kvArray._2(i) - map.put(kvArray._1(i), value) - } - row.setField(i, map) - case d: GenericMapData => // TODO - } - } else { - row.setField(i, null) - } - case _: DoubleType => - row.setField(i, r.getDouble(i)) - case t: RowType => - val fieldDataTypes = DynMethods.builder("getFieldDataTypes") - .impl(classOf[DataType], classOf[DataType]) - .buildStatic - .invoke[util.List[DataType]](dataType) - .asScala.toList - val internalRowData = r.getRow(i, t.getFieldCount) - val internalRow = convertToRow(internalRowData, fieldDataTypes) - row.setField(i, internalRow) - case t => - val hiveString = toHiveString((row.getField(i), t)) - row.setField(i, hiveString) - } - } - row - } - - private[this] def toArray( - keyType: LogicalType, - valueType: LogicalType, - arrayData: BinaryMapData): (Array[_], Array[_]) = { - - arrayData.keyArray().toObjectArray(keyType) -> arrayData.valueArray().toObjectArray(valueType) - } - } diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperation.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperation.scala index 2859d659e62..1424b721c4b 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperation.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperation.scala @@ -18,12 +18,17 @@ package org.apache.kyuubi.engine.flink.operation import java.io.IOException +import java.time.ZoneId +import java.util.concurrent.TimeoutException import scala.collection.JavaConverters.collectionAsScalaIterableConverter +import scala.collection.mutable.ListBuffer -import org.apache.flink.table.client.gateway.Executor -import org.apache.flink.table.client.gateway.context.SessionContext -import org.apache.hive.service.rpc.thrift.{TGetResultSetMetadataResp, TRowSet, TTableSchema} +import org.apache.flink.configuration.Configuration +import org.apache.flink.table.gateway.service.context.SessionContext +import org.apache.flink.table.gateway.service.operation.OperationExecutor +import org.apache.flink.types.Row +import org.apache.hive.service.rpc.thrift.{TFetchResultsResp, TGetResultSetMetadataResp, TTableSchema} import org.apache.kyuubi.{KyuubiSQLException, Utils} import org.apache.kyuubi.engine.flink.result.ResultSet @@ -36,12 +41,16 @@ import org.apache.kyuubi.session.Session abstract class FlinkOperation(session: Session) extends AbstractOperation(session) { + protected val flinkSession: org.apache.flink.table.gateway.service.session.Session = + session.asInstanceOf[FlinkSessionImpl].fSession + + protected val executor: OperationExecutor = flinkSession.createExecutor( + Configuration.fromMap(flinkSession.getSessionConfig)) + protected val sessionContext: SessionContext = { session.asInstanceOf[FlinkSessionImpl].sessionContext } - protected val executor: Executor = session.asInstanceOf[FlinkSessionImpl].executor - protected val sessionId: String = session.handle.identifier.toString protected var resultSet: ResultSet = _ @@ -52,7 +61,7 @@ abstract class FlinkOperation(session: Session) extends AbstractOperation(sessio } override protected def afterRun(): Unit = { - state.synchronized { + withLockRequired { if (!isTerminalState(state)) { setState(OperationState.FINISHED) } @@ -66,6 +75,10 @@ abstract class FlinkOperation(session: Session) extends AbstractOperation(sessio override def close(): Unit = { cleanup(OperationState.CLOSED) + // the result set may be null if the operation ends exceptionally + if (resultSet != null) { + resultSet.close + } try { getOperationLog.foreach(_.close()) } catch { @@ -85,22 +98,50 @@ abstract class FlinkOperation(session: Session) extends AbstractOperation(sessio resp } - override def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet = { + override def getNextRowSetInternal( + order: FetchOrientation, + rowSetSize: Int): TFetchResultsResp = { validateDefaultFetchOrientation(order) assertState(OperationState.FINISHED) setHasResultSet(true) order match { - case FETCH_NEXT => resultSet.getData.fetchNext() case FETCH_PRIOR => resultSet.getData.fetchPrior(rowSetSize); case FETCH_FIRST => resultSet.getData.fetchAbsolute(0); + case FETCH_NEXT => // ignored because new data are fetched lazily + } + val batch = new ListBuffer[Row] + try { + // there could be null values at the end of the batch + // because Flink could return an EOS + var rows = 0 + while (resultSet.getData.hasNext && rows < rowSetSize) { + Option(resultSet.getData.next()).foreach { r => batch += r; rows += 1 } + } + } catch { + case e: TimeoutException => + // ignore and return the current batch if there's some data + // otherwise, rethrow the timeout exception + if (batch.nonEmpty) { + debug(s"Timeout fetching more data for $opType operation. " + + s"Returning the current fetched data.") + } else { + throw e + } + } + val timeZone = Option(flinkSession.getSessionConfig.get("table.local-time-zone")) + val zoneId = timeZone match { + case Some(tz) => ZoneId.of(tz) + case None => ZoneId.systemDefault() } - val token = resultSet.getData.take(rowSetSize) val resultRowSet = RowSet.resultSetToTRowSet( - token.toList, + batch.toList, resultSet, + zoneId, getProtocolVersion) - resultRowSet.setStartRowOffset(resultSet.getData.getPosition) - resultRowSet + val resp = new TFetchResultsResp(OK_STATUS) + resp.setResults(resultRowSet) + resp.setHasMoreRows(resultSet.getData.hasNext) + resp } override def shouldRunAsync: Boolean = false @@ -109,7 +150,7 @@ abstract class FlinkOperation(session: Session) extends AbstractOperation(sessio // We should use Throwable instead of Exception since `java.lang.NoClassDefFoundError` // could be thrown. case e: Throwable => - state.synchronized { + withLockRequired { val errMsg = Utils.stringifyException(e) if (state == OperationState.TIMEOUT) { val ke = KyuubiSQLException(s"Timeout operating $opType: $errMsg") diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkSQLOperationManager.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkSQLOperationManager.scala index d7b5e297d1a..d5c0629eedd 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkSQLOperationManager.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkSQLOperationManager.scala @@ -20,9 +20,12 @@ package org.apache.kyuubi.engine.flink.operation import java.util import scala.collection.JavaConverters._ +import scala.concurrent.duration.{Duration, DurationLong} +import scala.language.postfixOps import org.apache.kyuubi.KyuubiSQLException import org.apache.kyuubi.config.KyuubiConf._ +import org.apache.kyuubi.engine.flink.FlinkEngineUtils import org.apache.kyuubi.engine.flink.result.Constants import org.apache.kyuubi.engine.flink.session.FlinkSessionImpl import org.apache.kyuubi.operation.{NoneMode, Operation, OperationManager, PlanOnlyMode} @@ -44,7 +47,8 @@ class FlinkSQLOperationManager extends OperationManager("FlinkSQLOperationManage runAsync: Boolean, queryTimeout: Long): Operation = { val flinkSession = session.asInstanceOf[FlinkSessionImpl] - if (flinkSession.sessionContext.getConfigMap.getOrDefault( + val sessionConfig = flinkSession.fSession.getSessionConfig + if (sessionConfig.getOrDefault( ENGINE_OPERATION_CONVERT_CATALOG_DATABASE_ENABLED.key, operationConvertCatalogDatabaseDefault.toString).toBoolean) { val catalogDatabaseOperation = processCatalogDatabase(session, statement, confOverlay) @@ -53,23 +57,42 @@ class FlinkSQLOperationManager extends OperationManager("FlinkSQLOperationManage } } - val mode = PlanOnlyMode.fromString(flinkSession.sessionContext.getConfigMap.getOrDefault( - OPERATION_PLAN_ONLY_MODE.key, - operationModeDefault)) + val mode = PlanOnlyMode.fromString( + sessionConfig.getOrDefault( + OPERATION_PLAN_ONLY_MODE.key, + operationModeDefault)) - flinkSession.sessionContext.set(OPERATION_PLAN_ONLY_MODE.key, mode.name) + val sessionContext = FlinkEngineUtils.getSessionContext(flinkSession.fSession) + sessionContext.set(OPERATION_PLAN_ONLY_MODE.key, mode.name) val resultMaxRows = flinkSession.normalizedConf.getOrElse( ENGINE_FLINK_MAX_ROWS.key, resultMaxRowsDefault.toString).toInt + + val resultFetchTimeout = + flinkSession.normalizedConf.get(ENGINE_FLINK_FETCH_TIMEOUT.key).map(_.toLong milliseconds) + .getOrElse(Duration.Inf) + val op = mode match { case NoneMode => // FLINK-24427 seals calcite classes which required to access in async mode, considering // there is no much benefit in async mode, here we just ignore `runAsync` and always run // statement in sync mode as a workaround - new ExecuteStatement(session, statement, false, queryTimeout, resultMaxRows) + new ExecuteStatement( + session, + statement, + false, + queryTimeout, + resultMaxRows, + resultFetchTimeout) case mode => - new PlanOnlyStatement(session, statement, mode) + new PlanOnlyStatement( + session, + statement, + mode, + queryTimeout, + resultMaxRows, + resultFetchTimeout) } addOperation(op) } diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCatalogs.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCatalogs.scala index 11dd760e4ec..2453716812d 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCatalogs.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCatalogs.scala @@ -17,6 +17,8 @@ package org.apache.kyuubi.engine.flink.operation +import scala.collection.convert.ImplicitConversions._ + import org.apache.kyuubi.engine.flink.result.ResultSetUtil import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_CAT import org.apache.kyuubi.session.Session @@ -25,8 +27,8 @@ class GetCatalogs(session: Session) extends FlinkOperation(session) { override protected def runInternal(): Unit = { try { - val tableEnv = sessionContext.getExecutionContext.getTableEnvironment - val catalogs = tableEnv.listCatalogs.toList + val catalogManager = sessionContext.getSessionState.catalogManager + val catalogs = catalogManager.listCatalogs.toList resultSet = ResultSetUtil.stringListToResultSet(catalogs, TABLE_CAT) } catch onError() } diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetColumns.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetColumns.scala index 6ce2a6ac7e7..b1a7c0c3ee5 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetColumns.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetColumns.scala @@ -21,7 +21,7 @@ import scala.collection.JavaConverters._ import org.apache.commons.lang3.StringUtils import org.apache.flink.table.api.{DataTypes, ResultKind} -import org.apache.flink.table.catalog.Column +import org.apache.flink.table.catalog.{Column, ObjectIdentifier} import org.apache.flink.table.types.logical._ import org.apache.flink.types.Row @@ -40,17 +40,17 @@ class GetColumns( override protected def runInternal(): Unit = { try { - val tableEnv = sessionContext.getExecutionContext.getTableEnvironment val catalogName = - if (StringUtils.isEmpty(catalogNameOrEmpty)) tableEnv.getCurrentCatalog + if (StringUtils.isEmpty(catalogNameOrEmpty)) executor.getCurrentCatalog else catalogNameOrEmpty val schemaNameRegex = toJavaRegex(schemaNamePattern) val tableNameRegex = toJavaRegex(tableNamePattern) val columnNameRegex = toJavaRegex(columnNamePattern).r - val columns = tableEnv.getCatalog(catalogName).asScala.toArray.flatMap { flinkCatalog => + val catalogManager = sessionContext.getSessionState.catalogManager + val columns = catalogManager.getCatalog(catalogName).asScala.toArray.flatMap { flinkCatalog => SchemaHelper.getSchemasWithPattern(flinkCatalog, schemaNameRegex) .flatMap { schemaName => SchemaHelper.getFlinkTablesWithPattern( @@ -60,7 +60,8 @@ class GetColumns( tableNameRegex) .filter { _._2.isDefined } .flatMap { case (tableName, _) => - val flinkTable = tableEnv.from(s"`$catalogName`.`$schemaName`.`$tableName`") + val flinkTable = catalogManager.getTable( + ObjectIdentifier.of(catalogName, schemaName, tableName)).get() val resolvedSchema = flinkTable.getResolvedSchema resolvedSchema.getColumns.asScala.toArray.zipWithIndex .filter { case (column, _) => diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentCatalog.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentCatalog.scala index 988072e8da4..5f82de4a689 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentCatalog.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentCatalog.scala @@ -18,15 +18,20 @@ package org.apache.kyuubi.engine.flink.operation import org.apache.kyuubi.engine.flink.result.ResultSetUtil +import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_CAT import org.apache.kyuubi.session.Session class GetCurrentCatalog(session: Session) extends FlinkOperation(session) { + private val operationLog: OperationLog = + OperationLog.createOperationLog(session, getHandle) + + override def getOperationLog: Option[OperationLog] = Option(operationLog) + override protected def runInternal(): Unit = { try { - val tableEnv = sessionContext.getExecutionContext.getTableEnvironment - val catalog = tableEnv.getCurrentCatalog + val catalog = executor.getCurrentCatalog resultSet = ResultSetUtil.stringListToResultSet(List(catalog), TABLE_CAT) } catch onError() } diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentDatabase.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentDatabase.scala index 8315a18d3d8..107609c0639 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentDatabase.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentDatabase.scala @@ -18,15 +18,20 @@ package org.apache.kyuubi.engine.flink.operation import org.apache.kyuubi.engine.flink.result.ResultSetUtil +import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_SCHEM import org.apache.kyuubi.session.Session class GetCurrentDatabase(session: Session) extends FlinkOperation(session) { + private val operationLog: OperationLog = + OperationLog.createOperationLog(session, getHandle) + + override def getOperationLog: Option[OperationLog] = Option(operationLog) + override protected def runInternal(): Unit = { try { - val tableEnv = sessionContext.getExecutionContext.getTableEnvironment - val database = tableEnv.getCurrentDatabase + val database = sessionContext.getSessionState.catalogManager.getCurrentDatabase resultSet = ResultSetUtil.stringListToResultSet(List(database), TABLE_SCHEM) } catch onError() } diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetFunctions.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetFunctions.scala index ab870ab7931..85f34a29a05 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetFunctions.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetFunctions.scala @@ -20,9 +20,10 @@ package org.apache.kyuubi.engine.flink.operation import java.sql.DatabaseMetaData import scala.collection.JavaConverters._ +import scala.collection.convert.ImplicitConversions._ import org.apache.commons.lang3.StringUtils -import org.apache.flink.table.api.{DataTypes, ResultKind, TableEnvironment} +import org.apache.flink.table.api.{DataTypes, ResultKind} import org.apache.flink.table.catalog.Column import org.apache.flink.types.Row @@ -42,17 +43,20 @@ class GetFunctions( try { val schemaPattern = toJavaRegex(schemaName) val functionPattern = toJavaRegex(functionName) - val tableEnv: TableEnvironment = sessionContext.getExecutionContext.getTableEnvironment + val functionCatalog = sessionContext.getSessionState.functionCatalog + val catalogManager = sessionContext.getSessionState.catalogManager + val systemFunctions = filterPattern( - tableEnv.listFunctions().diff(tableEnv.listUserDefinedFunctions()), + functionCatalog.getFunctions + .diff(functionCatalog.getUserDefinedFunctions), functionPattern) .map { f => Row.of(null, null, f, null, Integer.valueOf(DatabaseMetaData.functionResultUnknown), null) - } - val catalogFunctions = tableEnv.listCatalogs() + }.toArray + val catalogFunctions = catalogManager.listCatalogs() .filter { c => StringUtils.isEmpty(catalogName) || c == catalogName } .flatMap { c => - val catalog = tableEnv.getCatalog(c).get() + val catalog = catalogManager.getCatalog(c).get() filterPattern(catalog.listDatabases().asScala, schemaPattern) .flatMap { d => filterPattern(catalog.listFunctions(d).asScala, functionPattern) @@ -66,7 +70,7 @@ class GetFunctions( null) } } - } + }.toArray resultSet = ResultSet.builder.resultKind(ResultKind.SUCCESS_WITH_CONTENT) .columns( Column.physical(FUNCTION_CAT, DataTypes.STRING()), diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetPrimaryKeys.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetPrimaryKeys.scala index b534feb1fd9..5b9060cf184 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetPrimaryKeys.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetPrimaryKeys.scala @@ -21,8 +21,9 @@ import scala.collection.JavaConverters._ import org.apache.commons.lang3.StringUtils import org.apache.flink.table.api.{DataTypes, ResultKind} -import org.apache.flink.table.catalog.Column +import org.apache.flink.table.catalog.{Column, ObjectIdentifier} import org.apache.flink.types.Row +import org.apache.flink.util.FlinkException import org.apache.kyuubi.engine.flink.result.ResultSet import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._ @@ -37,22 +38,25 @@ class GetPrimaryKeys( override protected def runInternal(): Unit = { try { - val tableEnv = sessionContext.getExecutionContext.getTableEnvironment + val catalogManager = sessionContext.getSessionState.catalogManager val catalogName = - if (StringUtils.isEmpty(catalogNameOrEmpty)) tableEnv.getCurrentCatalog + if (StringUtils.isEmpty(catalogNameOrEmpty)) catalogManager.getCurrentCatalog else catalogNameOrEmpty val schemaName = if (StringUtils.isEmpty(schemaNameOrEmpty)) { - if (catalogName != tableEnv.getCurrentCatalog) { - tableEnv.getCatalog(catalogName).get().getDefaultDatabase + if (catalogName != executor.getCurrentCatalog) { + catalogManager.getCatalog(catalogName).get().getDefaultDatabase } else { - tableEnv.getCurrentDatabase + catalogManager.getCurrentDatabase } } else schemaNameOrEmpty - val flinkTable = tableEnv.from(s"`$catalogName`.`$schemaName`.`$tableName`") + val flinkTable = catalogManager + .getTable(ObjectIdentifier.of(catalogName, schemaName, tableName)) + .orElseThrow(() => + new FlinkException(s"Table `$catalogName`.`$schemaName`.`$tableName`` not found.")) val resolvedSchema = flinkTable.getResolvedSchema val primaryKeySchema = resolvedSchema.getPrimaryKey @@ -102,5 +106,4 @@ class GetPrimaryKeys( ) // format: on } - } diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetSchemas.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetSchemas.scala index 6715b232073..f56ddd8b18e 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetSchemas.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetSchemas.scala @@ -18,9 +18,10 @@ package org.apache.kyuubi.engine.flink.operation import scala.collection.JavaConverters._ +import scala.collection.convert.ImplicitConversions._ import org.apache.commons.lang3.StringUtils -import org.apache.flink.table.api.{DataTypes, ResultKind, TableEnvironment} +import org.apache.flink.table.api.{DataTypes, ResultKind} import org.apache.flink.table.catalog.Column import org.apache.flink.types.Row @@ -35,14 +36,14 @@ class GetSchemas(session: Session, catalogName: String, schema: String) override protected def runInternal(): Unit = { try { val schemaPattern = toJavaRegex(schema) - val tableEnv: TableEnvironment = sessionContext.getExecutionContext.getTableEnvironment - val schemas = tableEnv.listCatalogs() + val catalogManager = sessionContext.getSessionState.catalogManager + val schemas = catalogManager.listCatalogs() .filter { c => StringUtils.isEmpty(catalogName) || c == catalogName } .flatMap { c => - val catalog = tableEnv.getCatalog(c).get() + val catalog = catalogManager.getCatalog(c).get() filterPattern(catalog.listDatabases().asScala, schemaPattern) .map { d => Row.of(d, c) } - } + }.toArray resultSet = ResultSet.builder.resultKind(ResultKind.SUCCESS_WITH_CONTENT) .columns( Column.physical(TABLE_SCHEM, DataTypes.STRING()), diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetTables.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetTables.scala index a4e55715a5a..325a501671e 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetTables.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetTables.scala @@ -37,16 +37,16 @@ class GetTables( override protected def runInternal(): Unit = { try { - val tableEnv = sessionContext.getExecutionContext.getTableEnvironment + val catalogManager = sessionContext.getSessionState.catalogManager val catalogName = - if (StringUtils.isEmpty(catalogNameOrEmpty)) tableEnv.getCurrentCatalog + if (StringUtils.isEmpty(catalogNameOrEmpty)) catalogManager.getCurrentCatalog else catalogNameOrEmpty val schemaNameRegex = toJavaRegex(schemaNamePattern) val tableNameRegex = toJavaRegex(tableNamePattern) - val tables = tableEnv.getCatalog(catalogName).asScala.toArray.flatMap { flinkCatalog => + val tables = catalogManager.getCatalog(catalogName).asScala.toArray.flatMap { flinkCatalog => SchemaHelper.getSchemasWithPattern(flinkCatalog, schemaNameRegex) .flatMap { schemaName => SchemaHelper.getFlinkTablesWithPattern( diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/OperationUtils.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/OperationUtils.scala deleted file mode 100644 index 7d624948c18..00000000000 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/OperationUtils.scala +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kyuubi.engine.flink.operation - -import java.util - -import scala.collection.JavaConverters._ -import scala.collection.mutable.ArrayBuffer - -import org.apache.flink.table.api.{DataTypes, ResultKind} -import org.apache.flink.table.catalog.Column -import org.apache.flink.table.client.gateway.Executor -import org.apache.flink.table.operations.command._ -import org.apache.flink.types.Row - -import org.apache.kyuubi.engine.flink.result.{ResultSet, ResultSetUtil} -import org.apache.kyuubi.engine.flink.result.ResultSetUtil.successResultSet -import org.apache.kyuubi.reflection.DynMethods - -object OperationUtils { - - /** - * Runs a SetOperation with executor. Returns when SetOperation is executed successfully. - * - * @param setOperation Set operation. - * @param executor A gateway for communicating with Flink and other external systems. - * @param sessionId Id of the session. - * @return A ResultSet of SetOperation execution. - */ - def runSetOperation( - setOperation: SetOperation, - executor: Executor, - sessionId: String): ResultSet = { - if (setOperation.getKey.isPresent) { - val key: String = setOperation.getKey.get.trim - - if (setOperation.getValue.isPresent) { - val newValue: String = setOperation.getValue.get.trim - executor.setSessionProperty(sessionId, key, newValue) - } - - val value = executor.getSessionConfigMap(sessionId).getOrDefault(key, "") - ResultSet.builder - .resultKind(ResultKind.SUCCESS_WITH_CONTENT) - .columns( - Column.physical("key", DataTypes.STRING()), - Column.physical("value", DataTypes.STRING())) - .data(Array(Row.of(key, value))) - .build - } else { - // show all properties if set without key - val properties: util.Map[String, String] = executor.getSessionConfigMap(sessionId) - - val entries = ArrayBuffer.empty[Row] - properties.forEach((key, value) => entries.append(Row.of(key, value))) - - if (entries.nonEmpty) { - val prettyEntries = entries.sortBy(_.getField(0).asInstanceOf[String]) - ResultSet.builder - .resultKind(ResultKind.SUCCESS_WITH_CONTENT) - .columns( - Column.physical("key", DataTypes.STRING()), - Column.physical("value", DataTypes.STRING())) - .data(prettyEntries.toArray) - .build - } else { - ResultSet.builder - .resultKind(ResultKind.SUCCESS_WITH_CONTENT) - .columns( - Column.physical("key", DataTypes.STRING()), - Column.physical("value", DataTypes.STRING())) - .data(Array[Row]()) - .build - } - } - } - - /** - * Runs a ResetOperation with executor. Returns when ResetOperation is executed successfully. - * - * @param resetOperation Reset operation. - * @param executor A gateway for communicating with Flink and other external systems. - * @param sessionId Id of the session. - * @return A ResultSet of ResetOperation execution. - */ - def runResetOperation( - resetOperation: ResetOperation, - executor: Executor, - sessionId: String): ResultSet = { - if (resetOperation.getKey.isPresent) { - // reset the given property - executor.resetSessionProperty(sessionId, resetOperation.getKey.get()) - } else { - // reset all properties - executor.resetSessionProperties(sessionId) - } - successResultSet - } - - /** - * Runs a AddJarOperation with the executor. Currently only jars on local filesystem - * are supported. - * - * @param addJarOperation Add-jar operation. - * @param executor A gateway for communicating with Flink and other external systems. - * @param sessionId Id of the session. - * @return A ResultSet of AddJarOperation execution. - */ - def runAddJarOperation( - addJarOperation: AddJarOperation, - executor: Executor, - sessionId: String): ResultSet = { - // Removed by FLINK-27790 - val addJar = DynMethods.builder("addJar") - .impl(executor.getClass, classOf[String], classOf[String]) - .build(executor) - addJar.invoke[Void](sessionId, addJarOperation.getPath) - successResultSet - } - - /** - * Runs a RemoveJarOperation with the executor. Only jars added by AddJarOperation could - * be removed. - * - * @param removeJarOperation Remove-jar operation. - * @param executor A gateway for communicating with Flink and other external systems. - * @param sessionId Id of the session. - * @return A ResultSet of RemoveJarOperation execution. - */ - def runRemoveJarOperation( - removeJarOperation: RemoveJarOperation, - executor: Executor, - sessionId: String): ResultSet = { - executor.removeJar(sessionId, removeJarOperation.getPath) - successResultSet - } - - /** - * Runs a ShowJarsOperation with the executor. Returns the jars of the current session. - * - * @param showJarsOperation Show-jar operation. - * @param executor A gateway for communicating with Flink and other external systems. - * @param sessionId Id of the session. - * @return A ResultSet of ShowJarsOperation execution. - */ - def runShowJarOperation( - showJarsOperation: ShowJarsOperation, - executor: Executor, - sessionId: String): ResultSet = { - // Removed by FLINK-27790 - val listJars = DynMethods.builder("listJars") - .impl(executor.getClass, classOf[String]) - .build(executor) - val jars = listJars.invoke[util.List[String]](sessionId) - ResultSetUtil.stringListToResultSet(jars.asScala.toList, "jar") - } -} diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyStatement.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyStatement.scala index afe04a30736..1284bfd73e6 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyStatement.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyStatement.scala @@ -17,10 +17,13 @@ package org.apache.kyuubi.engine.flink.operation +import scala.concurrent.duration.Duration + +import com.google.common.base.Preconditions import org.apache.flink.table.api.TableEnvironment +import org.apache.flink.table.gateway.api.operation.OperationHandle import org.apache.flink.table.operations.command._ -import org.apache.kyuubi.engine.flink.FlinkEngineUtils.isFlinkVersionAtMost import org.apache.kyuubi.engine.flink.result.ResultSetUtil import org.apache.kyuubi.operation.{ExecutionMode, ParseMode, PhysicalMode, PlanOnlyMode, UnknownMode} import org.apache.kyuubi.operation.PlanOnlyMode.{notSupportedModeError, unknownModeError} @@ -33,7 +36,10 @@ import org.apache.kyuubi.session.Session class PlanOnlyStatement( session: Session, override val statement: String, - mode: PlanOnlyMode) extends FlinkOperation(session) { + mode: PlanOnlyMode, + queryTimeout: Long, + resultMaxRows: Int, + resultFetchTimeout: Duration) extends FlinkOperation(session) { private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) private val lineSeparator: String = System.lineSeparator() @@ -45,19 +51,22 @@ class PlanOnlyStatement( } override protected def runInternal(): Unit = { + addTimeoutMonitor(queryTimeout) try { - val operation = executor.parseStatement(sessionId, statement) + val operations = executor.getTableEnvironment.getParser.parse(statement) + Preconditions.checkArgument( + operations.size() == 1, + "Plan-only mode supports single statement only", + null) + val operation = operations.get(0) operation match { - case setOperation: SetOperation => - resultSet = OperationUtils.runSetOperation(setOperation, executor, sessionId) - case resetOperation: ResetOperation => - resultSet = OperationUtils.runResetOperation(resetOperation, executor, sessionId) - case addJarOperation: AddJarOperation if isFlinkVersionAtMost("1.15") => - resultSet = OperationUtils.runAddJarOperation(addJarOperation, executor, sessionId) - case removeJarOperation: RemoveJarOperation => - resultSet = OperationUtils.runRemoveJarOperation(removeJarOperation, executor, sessionId) - case showJarsOperation: ShowJarsOperation if isFlinkVersionAtMost("1.15") => - resultSet = OperationUtils.runShowJarOperation(showJarsOperation, executor, sessionId) + case _: SetOperation | _: ResetOperation | _: AddJarOperation | _: RemoveJarOperation | + _: ShowJarsOperation => + val resultFetcher = executor.executeStatement( + new OperationHandle(getHandle.identifier), + statement) + resultSet = + ResultSetUtil.fromResultFetcher(resultFetcher, resultMaxRows, resultFetchTimeout); case _ => explainOperation(statement) } } catch { @@ -66,7 +75,7 @@ class PlanOnlyStatement( } private def explainOperation(statement: String): Unit = { - val tableEnv: TableEnvironment = sessionContext.getExecutionContext.getTableEnvironment + val tableEnv: TableEnvironment = executor.getTableEnvironment val explainPlans = tableEnv.explainSql(statement).split(s"$lineSeparator$lineSeparator") val operationPlan = mode match { diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentCatalog.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentCatalog.scala index 489cc638458..f279ccda616 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentCatalog.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentCatalog.scala @@ -17,15 +17,21 @@ package org.apache.kyuubi.engine.flink.operation +import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.session.Session class SetCurrentCatalog(session: Session, catalog: String) extends FlinkOperation(session) { + private val operationLog: OperationLog = + OperationLog.createOperationLog(session, getHandle) + + override def getOperationLog: Option[OperationLog] = Option(operationLog) + override protected def runInternal(): Unit = { try { - val tableEnv = sessionContext.getExecutionContext.getTableEnvironment - tableEnv.useCatalog(catalog) + val catalogManager = sessionContext.getSessionState.catalogManager + catalogManager.setCurrentCatalog(catalog) setHasResultSet(false) } catch onError() } diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentDatabase.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentDatabase.scala index 0d3598405d8..70535e8344f 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentDatabase.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentDatabase.scala @@ -17,15 +17,21 @@ package org.apache.kyuubi.engine.flink.operation +import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.session.Session class SetCurrentDatabase(session: Session, database: String) extends FlinkOperation(session) { + private val operationLog: OperationLog = + OperationLog.createOperationLog(session, getHandle) + + override def getOperationLog: Option[OperationLog] = Option(operationLog) + override protected def runInternal(): Unit = { try { - val tableEnv = sessionContext.getExecutionContext.getTableEnvironment - tableEnv.useDatabase(database) + val catalogManager = sessionContext.getSessionState.catalogManager + catalogManager.setCurrentDatabase(database) setHasResultSet(false) } catch onError() } diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/Constants.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/Constants.scala new file mode 100644 index 00000000000..ca582b2e3f3 --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/Constants.scala @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink.result + +object Constants { + val TABLE_TYPE: String = "TABLE" + val VIEW_TYPE: String = "VIEW" + val SUPPORTED_TABLE_TYPES: Array[String] = Array[String](TABLE_TYPE, VIEW_TYPE) +} diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/QueryResultFetchIterator.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/QueryResultFetchIterator.scala new file mode 100644 index 00000000000..60ae08d9dd8 --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/QueryResultFetchIterator.scala @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink.result + +import java.util +import java.util.concurrent.Executors + +import scala.collection.convert.ImplicitConversions._ +import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor, Future} +import scala.concurrent.duration.Duration + +import com.google.common.util.concurrent.ThreadFactoryBuilder +import org.apache.flink.table.api.DataTypes +import org.apache.flink.table.catalog.ResolvedSchema +import org.apache.flink.table.data.RowData +import org.apache.flink.table.data.conversion.DataStructureConverters +import org.apache.flink.table.gateway.service.result.ResultFetcher +import org.apache.flink.table.types.DataType +import org.apache.flink.types.Row + +import org.apache.kyuubi.Logging +import org.apache.kyuubi.engine.flink.shim.FlinkResultSet +import org.apache.kyuubi.operation.FetchIterator + +class QueryResultFetchIterator( + resultFetcher: ResultFetcher, + maxRows: Int = 1000000, + resultFetchTimeout: Duration = Duration.Inf) extends FetchIterator[Row] with Logging { + + val schema: ResolvedSchema = resultFetcher.getResultSchema + + val dataTypes: util.List[DataType] = schema.getColumnDataTypes + + var token: Long = 0 + + var pos: Long = 0 + + var fetchStart: Long = 0 + + var bufferedRows: Array[Row] = new Array[Row](0) + + var hasNext: Boolean = true + + val FETCH_INTERVAL_MS: Long = 1000 + + private val executor = Executors.newSingleThreadScheduledExecutor( + new ThreadFactoryBuilder().setNameFormat("flink-query-iterator-%d").setDaemon(true).build) + + implicit private val executionContext: ExecutionContextExecutor = + ExecutionContext.fromExecutor(executor) + + /** + * Begin a fetch block, forward from the current position. + * + * Throws TimeoutException if no data is fetched within the timeout. + */ + override def fetchNext(): Unit = { + if (!hasNext) { + return + } + val future = Future(() -> { + var fetched = false + // if no timeout is set, this would block until some rows are fetched + debug(s"Fetching from result store with timeout $resultFetchTimeout ms") + while (!fetched && !Thread.interrupted()) { + val rs = resultFetcher.fetchResults(token, maxRows - bufferedRows.length) + val flinkRs = new FlinkResultSet(rs) + // TODO: replace string-based match when Flink 1.16 support is dropped + flinkRs.getResultType.name() match { + case "EOS" => + debug("EOS received, no more data to fetch.") + fetched = true + hasNext = false + case "NOT_READY" => + // if flink jobs are not ready, continue to retry + debug("Result not ready, retrying...") + case "PAYLOAD" => + val fetchedData = flinkRs.getData + // if no data fetched, continue to retry + if (!fetchedData.isEmpty) { + debug(s"Fetched ${fetchedData.length} rows from result store.") + fetched = true + bufferedRows ++= fetchedData.map(rd => convertToRow(rd, dataTypes.toList)) + fetchStart = pos + } else { + debug("No data fetched, retrying...") + } + case _ => + throw new RuntimeException(s"Unexpected result type: ${flinkRs.getResultType}") + } + if (hasNext) { + val nextToken = flinkRs.getNextToken + if (nextToken == null) { + hasNext = false + } else { + token = nextToken + } + } + Thread.sleep(FETCH_INTERVAL_MS) + } + }) + Await.result(future, resultFetchTimeout) + } + + /** + * Begin a fetch block, moving the iterator to the given position. + * Resets the fetch start offset. + * + * @param pos index to move a position of iterator. + */ + override def fetchAbsolute(pos: Long): Unit = { + val effectivePos = Math.max(pos, 0) + if (effectivePos < bufferedRows.length) { + this.fetchStart = effectivePos + return + } + throw new IllegalArgumentException(s"Cannot skip to an unreachable position $effectivePos.") + } + + override def getFetchStart: Long = fetchStart + + override def getPosition: Long = pos + + /** + * @return returns row if any and null if no more rows can be fetched. + */ + override def next(): Row = { + if (pos < bufferedRows.length) { + debug(s"Fetching from buffered rows at pos $pos.") + val row = bufferedRows(pos.toInt) + pos += 1 + if (pos >= maxRows) { + hasNext = false + } + row + } else { + // block until some rows are fetched or TimeoutException is thrown + fetchNext() + if (hasNext) { + val row = bufferedRows(pos.toInt) + pos += 1 + if (pos >= maxRows) { + hasNext = false + } + row + } else { + null + } + } + } + + def close(): Unit = { + resultFetcher.close() + executor.shutdown() + } + + private[this] def convertToRow(r: RowData, dataTypes: List[DataType]): Row = { + val converter = DataStructureConverters.getConverter(DataTypes.ROW(dataTypes: _*)) + converter.toExternal(r).asInstanceOf[Row] + } +} diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSet.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSet.scala new file mode 100644 index 00000000000..b8d407297ac --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSet.scala @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink.result + +import java.util + +import scala.collection.JavaConverters._ + +import com.google.common.collect.Iterators +import org.apache.flink.api.common.JobID +import org.apache.flink.table.api.{DataTypes, ResultKind} +import org.apache.flink.table.catalog.Column +import org.apache.flink.types.Row + +import org.apache.kyuubi.operation.{ArrayFetchIterator, FetchIterator} + +case class ResultSet( + resultKind: ResultKind, + columns: util.List[Column], + data: FetchIterator[Row], + // null in batch mode + // list of boolean in streaming mode, + // true if the corresponding row is an append row, false if its a retract row + changeFlags: Option[util.List[Boolean]]) { + + require(resultKind != null, "resultKind must not be null") + require(columns != null, "columns must not be null") + require(data != null, "data must not be null") + changeFlags.foreach { flags => + require( + Iterators.size(data.asInstanceOf[util.Iterator[_]]) == flags.size, + "the size of data and the size of changeFlags should be equal") + } + + def getColumns: util.List[Column] = columns + + def getData: FetchIterator[Row] = data + + def close: Unit = { + data match { + case queryIte: QueryResultFetchIterator => queryIte.close() + case _ => + } + } +} + +/** + * A set of one statement execution result containing result kind, columns, rows of data and change + * flags for streaming mode. + */ +object ResultSet { + + def fromJobId(jobID: JobID): ResultSet = { + val data: Array[Row] = if (jobID != null) { + Array(Row.of(jobID.toString)) + } else { + // should not happen + Array(Row.of("(Empty Job ID)")) + } + builder + .resultKind(ResultKind.SUCCESS_WITH_CONTENT) + .columns(Column.physical("result", DataTypes.STRING())) + .data(data) + .build + } + + def builder: Builder = new ResultSet.Builder + + class Builder { + private var resultKind: ResultKind = _ + private var columns: util.List[Column] = _ + private var data: FetchIterator[Row] = _ + private var changeFlags: Option[util.List[Boolean]] = None + + def resultKind(resultKind: ResultKind): ResultSet.Builder = { + this.resultKind = resultKind + this + } + + def columns(columns: Column*): ResultSet.Builder = { + this.columns = columns.asJava + this + } + + def columns(columns: util.List[Column]): ResultSet.Builder = { + this.columns = columns + this + } + + def data(data: FetchIterator[Row]): ResultSet.Builder = { + this.data = data + this + } + + def data(data: Array[Row]): ResultSet.Builder = { + this.data = new ArrayFetchIterator[Row](data) + this + } + + def changeFlags(changeFlags: util.List[Boolean]): ResultSet.Builder = { + this.changeFlags = Some(changeFlags) + this + } + + def build: ResultSet = new ResultSet(resultKind, columns, data, changeFlags) + } +} diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSetUtil.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSetUtil.scala index ded271cf1d7..8b722f1e5e9 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSetUtil.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSetUtil.scala @@ -15,11 +15,14 @@ * limitations under the License. */ -package org.apache.kyuubi.engine.flink.result; +package org.apache.kyuubi.engine.flink.result + +import scala.concurrent.duration.Duration import org.apache.flink.table.api.DataTypes import org.apache.flink.table.api.ResultKind import org.apache.flink.table.catalog.Column +import org.apache.flink.table.gateway.service.result.ResultFetcher import org.apache.flink.types.Row /** Utility object for building ResultSet. */ @@ -54,4 +57,20 @@ object ResultSetUtil { .columns(Column.physical("result", DataTypes.STRING)) .data(Array[Row](Row.of("OK"))) .build + + def fromResultFetcher( + resultFetcher: ResultFetcher, + maxRows: Int, + resultFetchTimeout: Duration): ResultSet = { + if (maxRows <= 0) { + throw new IllegalArgumentException("maxRows should be positive") + } + val schema = resultFetcher.getResultSchema + val ite = new QueryResultFetchIterator(resultFetcher, maxRows, resultFetchTimeout) + ResultSet.builder + .resultKind(ResultKind.SUCCESS_WITH_CONTENT) + .columns(schema.getColumns) + .data(ite) + .build + } } diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/schema/RowSet.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/schema/RowSet.scala index 2b3ae50b76e..c446396d5bb 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/schema/RowSet.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/schema/RowSet.scala @@ -21,7 +21,9 @@ import java.{lang, util} import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import java.sql.{Date, Timestamp} -import java.time.{LocalDate, LocalDateTime} +import java.time.{Instant, LocalDate, LocalDateTime, ZonedDateTime, ZoneId} +import java.time.format.{DateTimeFormatter, DateTimeFormatterBuilder, TextStyle} +import java.time.temporal.ChronoField import java.util.Collections import scala.collection.JavaConverters._ @@ -42,15 +44,16 @@ object RowSet { def resultSetToTRowSet( rows: Seq[Row], resultSet: ResultSet, + zoneId: ZoneId, protocolVersion: TProtocolVersion): TRowSet = { if (protocolVersion.getValue < TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6.getValue) { - toRowBaseSet(rows, resultSet) + toRowBaseSet(rows, resultSet, zoneId) } else { - toColumnBasedSet(rows, resultSet) + toColumnBasedSet(rows, resultSet, zoneId) } } - def toRowBaseSet(rows: Seq[Row], resultSet: ResultSet): TRowSet = { + def toRowBaseSet(rows: Seq[Row], resultSet: ResultSet, zoneId: ZoneId): TRowSet = { val rowSize = rows.size val tRows = new util.ArrayList[TRow](rowSize) var i = 0 @@ -60,7 +63,7 @@ object RowSet { val columnSize = row.getArity var j = 0 while (j < columnSize) { - val columnValue = toTColumnValue(j, row, resultSet) + val columnValue = toTColumnValue(j, row, resultSet, zoneId) tRow.addToColVals(columnValue) j += 1 } @@ -71,14 +74,14 @@ object RowSet { new TRowSet(0, tRows) } - def toColumnBasedSet(rows: Seq[Row], resultSet: ResultSet): TRowSet = { + def toColumnBasedSet(rows: Seq[Row], resultSet: ResultSet, zoneId: ZoneId): TRowSet = { val size = rows.length val tRowSet = new TRowSet(0, new util.ArrayList[TRow](size)) val columnSize = resultSet.getColumns.size() var i = 0 while (i < columnSize) { val field = resultSet.getColumns.get(i) - val tColumn = toTColumn(rows, i, field.getDataType.getLogicalType) + val tColumn = toTColumn(rows, i, field.getDataType.getLogicalType, zoneId) tRowSet.addToColumns(tColumn) i += 1 } @@ -88,7 +91,8 @@ object RowSet { private def toTColumnValue( ordinal: Int, row: Row, - resultSet: ResultSet): TColumnValue = { + resultSet: ResultSet, + zoneId: ZoneId): TColumnValue = { val column = resultSet.getColumns.get(ordinal) val logicalType = column.getDataType.getLogicalType @@ -153,6 +157,12 @@ object RowSet { s"for type ${t.getClass}.") } TColumnValue.stringVal(tStringValue) + case _: LocalZonedTimestampType => + val tStringValue = new TStringValue + val fieldValue = row.getField(ordinal) + tStringValue.setValue(TIMESTAMP_LZT_FORMATTER.format( + ZonedDateTime.ofInstant(fieldValue.asInstanceOf[Instant], zoneId))) + TColumnValue.stringVal(tStringValue) case t => val tStringValue = new TStringValue if (row.getField(ordinal) != null) { @@ -166,7 +176,11 @@ object RowSet { ByteBuffer.wrap(bitSet.toByteArray) } - private def toTColumn(rows: Seq[Row], ordinal: Int, logicalType: LogicalType): TColumn = { + private def toTColumn( + rows: Seq[Row], + ordinal: Int, + logicalType: LogicalType, + zoneId: ZoneId): TColumn = { val nulls = new java.util.BitSet() // for each column, determine the conversion class by sampling the first non-value value // if there's no row, set the entire column empty @@ -211,6 +225,12 @@ object RowSet { s"for type ${t.getClass}.") } TColumn.stringVal(new TStringColumn(values, nulls)) + case _: LocalZonedTimestampType => + val values = getOrSetAsNull[Instant](rows, ordinal, nulls, Instant.EPOCH) + .toArray().map(v => + TIMESTAMP_LZT_FORMATTER.format( + ZonedDateTime.ofInstant(v.asInstanceOf[Instant], zoneId))) + TColumn.stringVal(new TStringColumn(values.toList.asJava, nulls)) case _ => var i = 0 val rowSize = rows.length @@ -303,11 +323,14 @@ object RowSet { case _: DecimalType => TTypeId.DECIMAL_TYPE case _: DateType => TTypeId.DATE_TYPE case _: TimestampType => TTypeId.TIMESTAMP_TYPE + case _: LocalZonedTimestampType => TTypeId.TIMESTAMPLOCALTZ_TYPE case _: ArrayType => TTypeId.ARRAY_TYPE case _: MapType => TTypeId.MAP_TYPE case _: RowType => TTypeId.STRUCT_TYPE case _: BinaryType => TTypeId.BINARY_TYPE - case t @ (_: ZonedTimestampType | _: LocalZonedTimestampType | _: MultisetType | + case _: VarBinaryType => TTypeId.BINARY_TYPE + case _: TimeType => TTypeId.STRING_TYPE + case t @ (_: ZonedTimestampType | _: MultisetType | _: YearMonthIntervalType | _: DayTimeIntervalType) => throw new IllegalArgumentException( "Flink data type `%s` is not supported currently".format(t.asSummaryString()), @@ -368,11 +391,33 @@ object RowSet { // Only match string in nested type values "\"" + s + "\"" - case (bin: Array[Byte], _: BinaryType) => + case (bin: Array[Byte], _ @(_: BinaryType | _: VarBinaryType)) => new String(bin, StandardCharsets.UTF_8) case (other, _) => other.toString } } + + /** should stay in sync with org.apache.kyuubi.jdbc.hive.common.TimestampTZUtil */ + var TIMESTAMP_LZT_FORMATTER: DateTimeFormatter = { + val builder = new DateTimeFormatterBuilder + // Date part + builder.append(DateTimeFormatter.ofPattern("yyyy-MM-dd")) + // Time part + builder + .optionalStart + .appendLiteral(" ") + .append(DateTimeFormatter.ofPattern("HH:mm:ss")) + .optionalStart + .appendFraction(ChronoField.NANO_OF_SECOND, 1, 9, true) + .optionalEnd + .optionalEnd + + // Zone part + builder.optionalStart.appendLiteral(" ").optionalEnd + builder.optionalStart.appendZoneText(TextStyle.NARROW).optionalEnd + + builder.toFormatter + } } diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSQLSessionManager.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSQLSessionManager.scala index 07971e39fae..b7cd462172f 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSQLSessionManager.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSQLSessionManager.scala @@ -17,12 +17,17 @@ package org.apache.kyuubi.engine.flink.session -import org.apache.flink.table.client.gateway.context.DefaultContext -import org.apache.flink.table.client.gateway.local.LocalExecutor +import scala.collection.JavaConverters._ +import scala.collection.JavaConverters.mapAsJavaMap + +import org.apache.flink.table.gateway.api.session.SessionEnvironment +import org.apache.flink.table.gateway.rest.util.SqlGatewayRestAPIVersion +import org.apache.flink.table.gateway.service.context.DefaultContext import org.apache.hive.service.rpc.thrift.TProtocolVersion import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_HANDLE_KEY import org.apache.kyuubi.engine.flink.operation.FlinkSQLOperationManager +import org.apache.kyuubi.engine.flink.shim.FlinkSessionManager import org.apache.kyuubi.session.{Session, SessionHandle, SessionManager} class FlinkSQLSessionManager(engineContext: DefaultContext) @@ -31,11 +36,11 @@ class FlinkSQLSessionManager(engineContext: DefaultContext) override protected def isServer: Boolean = false val operationManager = new FlinkSQLOperationManager() - val executor = new LocalExecutor(engineContext) + val sessionManager = new FlinkSessionManager(engineContext) override def start(): Unit = { super.start() - executor.start() + sessionManager.start() } override protected def createSession( @@ -46,19 +51,40 @@ class FlinkSQLSessionManager(engineContext: DefaultContext) conf: Map[String, String]): Session = { conf.get(KYUUBI_SESSION_HANDLE_KEY).map(SessionHandle.fromUUID).flatMap( getSessionOption).getOrElse { - new FlinkSessionImpl( + val flinkInternalSession = sessionManager.openSession( + SessionEnvironment.newBuilder + .setSessionEndpointVersion(SqlGatewayRestAPIVersion.V1) + .addSessionConfig(mapAsJavaMap(conf)) + .build) + val sessionConfig = flinkInternalSession.getSessionConfig + sessionConfig.putAll(conf.asJava) + val session = new FlinkSessionImpl( protocol, user, password, ipAddress, conf, this, - executor) + flinkInternalSession) + session } } + override def getSessionOption(sessionHandle: SessionHandle): Option[Session] = { + val session = super.getSessionOption(sessionHandle) + session.foreach(s => s.asInstanceOf[FlinkSessionImpl].fSession.touch()) + session + } + override def closeSession(sessionHandle: SessionHandle): Unit = { + val fSession = super.getSessionOption(sessionHandle) + fSession.foreach(s => + sessionManager.closeSession(s.asInstanceOf[FlinkSessionImpl].fSession.getSessionHandle)) super.closeSession(sessionHandle) - executor.closeSession(sessionHandle.toString) + } + + override def stop(): Unit = synchronized { + sessionManager.stop() + super.stop() } } diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSessionImpl.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSessionImpl.scala index 75087b48ca2..b8d1f85692b 100644 --- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSessionImpl.scala +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSessionImpl.scala @@ -19,16 +19,19 @@ package org.apache.kyuubi.engine.flink.session import scala.util.control.NonFatal +import org.apache.flink.configuration.Configuration import org.apache.flink.runtime.util.EnvironmentInformation import org.apache.flink.table.client.gateway.SqlExecutionException -import org.apache.flink.table.client.gateway.context.SessionContext -import org.apache.flink.table.client.gateway.local.LocalExecutor +import org.apache.flink.table.gateway.api.operation.OperationHandle +import org.apache.flink.table.gateway.service.context.SessionContext +import org.apache.flink.table.gateway.service.session.{Session => FSession} import org.apache.hive.service.rpc.thrift.{TGetInfoType, TGetInfoValue, TProtocolVersion} import org.apache.kyuubi.KyuubiSQLException import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_HANDLE_KEY import org.apache.kyuubi.engine.flink.FlinkEngineUtils -import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager} +import org.apache.kyuubi.engine.flink.udf.KDFRegistry +import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager, USE_CATALOG, USE_DATABASE} class FlinkSessionImpl( protocol: TProtocolVersion, @@ -37,16 +40,19 @@ class FlinkSessionImpl( ipAddress: String, conf: Map[String, String], sessionManager: SessionManager, - val executor: LocalExecutor) + val fSession: FSession) extends AbstractSession(protocol, user, password, ipAddress, conf, sessionManager) { override val handle: SessionHandle = - conf.get(KYUUBI_SESSION_HANDLE_KEY).map(SessionHandle.fromUUID).getOrElse(SessionHandle()) + conf.get(KYUUBI_SESSION_HANDLE_KEY).map(SessionHandle.fromUUID) + .getOrElse(SessionHandle.fromUUID(fSession.getSessionHandle.getIdentifier.toString)) - lazy val sessionContext: SessionContext = { - FlinkEngineUtils.getSessionContext(executor, handle.identifier.toString) + val sessionContext: SessionContext = { + FlinkEngineUtils.getSessionContext(fSession) } + KDFRegistry.registerAll(sessionContext) + private def setModifiableConfig(key: String, value: String): Unit = { try { sessionContext.set(key, value) @@ -56,26 +62,33 @@ class FlinkSessionImpl( } override def open(): Unit = { - executor.openSession(handle.identifier.toString) - normalizedConf.foreach { - case ("use:catalog", catalog) => - val tableEnv = sessionContext.getExecutionContext.getTableEnvironment - try { - tableEnv.useCatalog(catalog) - } catch { - case NonFatal(e) => + val executor = fSession.createExecutor(Configuration.fromMap(fSession.getSessionConfig)) + + val (useCatalogAndDatabaseConf, otherConf) = normalizedConf.partition { case (k, _) => + Array(USE_CATALOG, USE_DATABASE).contains(k) + } + + useCatalogAndDatabaseConf.get(USE_CATALOG).foreach { catalog => + try { + executor.executeStatement(OperationHandle.create, s"USE CATALOG $catalog") + } catch { + case NonFatal(e) => + throw e + } + } + + useCatalogAndDatabaseConf.get("use:database").foreach { database => + try { + executor.executeStatement(OperationHandle.create, s"USE $database") + } catch { + case NonFatal(e) => + if (database != "default") { throw e - } - case ("use:database", database) => - val tableEnv = sessionContext.getExecutionContext.getTableEnvironment - try { - tableEnv.useDatabase(database) - } catch { - case NonFatal(e) => - if (database != "default") { - throw e - } - } + } + } + } + + otherConf.foreach { case (key, value) => setModifiableConfig(key, value) } super.open() diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkResultSet.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkResultSet.scala new file mode 100644 index 00000000000..7fb05c8446b --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkResultSet.scala @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink.shim + +import java.lang.{Long => JLong} +import java.util + +import org.apache.flink.table.data.RowData +import org.apache.flink.table.gateway.api.results.ResultSet.ResultType + +import org.apache.kyuubi.util.reflect.ReflectUtils._ + +class FlinkResultSet(resultSet: AnyRef) { + + def getData: util.List[RowData] = invokeAs(resultSet, "getData") + + def getNextToken: JLong = invokeAs(resultSet, "getNextToken") + + def getResultType: ResultType = invokeAs(resultSet, "getResultType") +} diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkSessionManager.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkSessionManager.scala new file mode 100644 index 00000000000..89414ac4c54 --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkSessionManager.scala @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink.shim + +import org.apache.flink.table.gateway.api.session.{SessionEnvironment, SessionHandle} +import org.apache.flink.table.gateway.service.context.DefaultContext +import org.apache.flink.table.gateway.service.session.Session + +import org.apache.kyuubi.engine.flink.FlinkEngineUtils.FLINK_RUNTIME_VERSION +import org.apache.kyuubi.util.reflect._ +import org.apache.kyuubi.util.reflect.ReflectUtils._ + +class FlinkSessionManager(engineContext: DefaultContext) { + + val sessionManager: AnyRef = { + if (FLINK_RUNTIME_VERSION === "1.16") { + DynConstructors.builder().impl( + "org.apache.flink.table.gateway.service.session.SessionManager", + classOf[DefaultContext]) + .build() + .newInstance(engineContext) + } else { + DynConstructors.builder().impl( + "org.apache.flink.table.gateway.service.session.SessionManagerImpl", + classOf[DefaultContext]) + .build() + .newInstance(engineContext) + } + } + + def start(): Unit = invokeAs(sessionManager, "start") + + def stop(): Unit = invokeAs(sessionManager, "stop") + + def getSession(sessionHandle: SessionHandle): Session = + invokeAs(sessionManager, "getSession", (classOf[SessionHandle], sessionHandle)) + + def openSession(environment: SessionEnvironment): Session = + invokeAs(sessionManager, "openSession", (classOf[SessionEnvironment], environment)) + + def closeSession(sessionHandle: SessionHandle): Unit = + invokeAs(sessionManager, "closeSession", (classOf[SessionHandle], sessionHandle)) +} diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/udf/KDFRegistry.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/udf/KDFRegistry.scala new file mode 100644 index 00000000000..9ccbe7940d0 --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/udf/KDFRegistry.scala @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink.udf + +import java.util + +import scala.collection.mutable.ArrayBuffer + +import org.apache.flink.configuration.Configuration +import org.apache.flink.table.functions.{ScalarFunction, UserDefinedFunction} +import org.apache.flink.table.gateway.service.context.SessionContext + +import org.apache.kyuubi.{KYUUBI_VERSION, Utils} +import org.apache.kyuubi.config.KyuubiReservedKeys.{KYUUBI_ENGINE_NAME, KYUUBI_SESSION_USER_KEY} +import org.apache.kyuubi.engine.flink.FlinkEngineUtils.FLINK_RUNTIME_VERSION +import org.apache.kyuubi.util.reflect.DynMethods + +object KDFRegistry { + + def createKyuubiDefinedFunctions(sessionContext: SessionContext): Array[KyuubiDefinedFunction] = { + + val kyuubiDefinedFunctions = new ArrayBuffer[KyuubiDefinedFunction] + + val flinkConfigMap: util.Map[String, String] = { + if (FLINK_RUNTIME_VERSION === "1.16") { + DynMethods + .builder("getConfigMap") + .impl(classOf[SessionContext]) + .build() + .invoke(sessionContext) + .asInstanceOf[util.Map[String, String]] + } else { + DynMethods + .builder("getSessionConf") + .impl(classOf[SessionContext]) + .build() + .invoke(sessionContext) + .asInstanceOf[Configuration] + .toMap + } + } + + val kyuubi_version: KyuubiDefinedFunction = create( + "kyuubi_version", + new KyuubiVersionFunction(flinkConfigMap), + "Return the version of Kyuubi Server", + "string", + "1.8.0") + kyuubiDefinedFunctions += kyuubi_version + + val engineName: KyuubiDefinedFunction = create( + "kyuubi_engine_name", + new EngineNameFunction(flinkConfigMap), + "Return the application name for the associated query engine", + "string", + "1.8.0") + kyuubiDefinedFunctions += engineName + + val engineId: KyuubiDefinedFunction = create( + "kyuubi_engine_id", + new EngineIdFunction(flinkConfigMap), + "Return the application id for the associated query engine", + "string", + "1.8.0") + kyuubiDefinedFunctions += engineId + + val systemUser: KyuubiDefinedFunction = create( + "kyuubi_system_user", + new SystemUserFunction(flinkConfigMap), + "Return the system user name for the associated query engine", + "string", + "1.8.0") + kyuubiDefinedFunctions += systemUser + + val sessionUser: KyuubiDefinedFunction = create( + "kyuubi_session_user", + new SessionUserFunction(flinkConfigMap), + "Return the session username for the associated query engine", + "string", + "1.8.0") + kyuubiDefinedFunctions += sessionUser + + kyuubiDefinedFunctions.toArray + } + + def create( + name: String, + udf: UserDefinedFunction, + description: String, + returnType: String, + since: String): KyuubiDefinedFunction = { + val kdf = KyuubiDefinedFunction(name, udf, description, returnType, since) + kdf + } + + def registerAll(sessionContext: SessionContext): Unit = { + val functions = createKyuubiDefinedFunctions(sessionContext) + for (func <- functions) { + sessionContext.getSessionState.functionCatalog + .registerTemporarySystemFunction(func.name, func.udf, true) + } + } +} + +class KyuubiVersionFunction(confMap: util.Map[String, String]) extends ScalarFunction { + def eval(): String = KYUUBI_VERSION +} + +class EngineNameFunction(confMap: util.Map[String, String]) extends ScalarFunction { + def eval(): String = { + confMap match { + case m if m.containsKey("yarn.application.name") => m.get("yarn.application.name") + case m if m.containsKey("kubernetes.cluster-id") => m.get("kubernetes.cluster-id") + case m => m.getOrDefault(KYUUBI_ENGINE_NAME, "unknown-engine-name") + } + } +} + +class EngineIdFunction(confMap: util.Map[String, String]) extends ScalarFunction { + def eval(): String = { + confMap match { + case m if m.containsKey("yarn.application.id") => m.get("yarn.application.id") + case m if m.containsKey("kubernetes.cluster-id") => m.get("kubernetes.cluster-id") + case m => m.getOrDefault("high-availability.cluster-id", "unknown-engine-id") + } + } +} + +class SystemUserFunction(confMap: util.Map[String, String]) extends ScalarFunction { + def eval(): String = Utils.currentUser +} + +class SessionUserFunction(confMap: util.Map[String, String]) extends ScalarFunction { + def eval(): String = confMap.getOrDefault(KYUUBI_SESSION_USER_KEY, "unknown-user") +} diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/udf/KyuubiDefinedFunction.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/udf/KyuubiDefinedFunction.scala new file mode 100644 index 00000000000..5cfce86d6e0 --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/udf/KyuubiDefinedFunction.scala @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink.udf + +import org.apache.flink.table.functions.UserDefinedFunction + +/** + * A wrapper for Flink's [[UserDefinedFunction]] + * + * @param name function name + * @param udf user-defined function + * @param description function description + */ +case class KyuubiDefinedFunction( + name: String, + udf: UserDefinedFunction, + description: String, + returnType: String, + since: String) diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithDiscoveryFlinkSQLEngine.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithDiscoveryFlinkSQLEngine.scala new file mode 100644 index 00000000000..c352429eadc --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithDiscoveryFlinkSQLEngine.scala @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink + +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.ha.client.{DiscoveryClient, DiscoveryClientProvider} + +trait WithDiscoveryFlinkSQLEngine { + + protected def namespace: String + + protected def conf: KyuubiConf + + def withDiscoveryClient(f: DiscoveryClient => Unit): Unit = { + DiscoveryClientProvider.withDiscoveryClient(conf)(f) + } + + def getFlinkEngineServiceUrl: String = { + var hostPort: Option[(String, Int)] = None + var retries = 0 + while (hostPort.isEmpty && retries < 10) { + withDiscoveryClient(client => hostPort = client.getServerHost(namespace)) + retries += 1 + Thread.sleep(1000L) + } + if (hostPort.isEmpty) { + throw new RuntimeException("Time out retrieving Flink engine service url.") + } + // delay the access to thrift service because the thrift service + // may not be ready although it's registered + Thread.sleep(3000L) + s"jdbc:hive2://${hostPort.get._1}:${hostPort.get._2}" + } +} diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngine.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngine.scala deleted file mode 100644 index fbfb8df29ac..00000000000 --- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngine.scala +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kyuubi.engine.flink - -import scala.collection.JavaConverters._ - -import org.apache.flink.client.cli.{CustomCommandLine, DefaultCLI} -import org.apache.flink.configuration.{Configuration, RestOptions} -import org.apache.flink.runtime.minicluster.{MiniCluster, MiniClusterConfiguration} -import org.apache.flink.table.client.gateway.context.DefaultContext - -import org.apache.kyuubi.{KyuubiFunSuite, Utils} -import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.engine.flink.util.TestUserClassLoaderJar - -trait WithFlinkSQLEngine extends KyuubiFunSuite { - - protected val flinkConfig = new Configuration() - protected var miniCluster: MiniCluster = _ - protected var engine: FlinkSQLEngine = _ - // conf will be loaded until start flink engine - def withKyuubiConf: Map[String, String] - val kyuubiConf: KyuubiConf = FlinkSQLEngine.kyuubiConf - - protected var connectionUrl: String = _ - - protected val GENERATED_UDF_CLASS: String = "LowerUDF" - - protected val GENERATED_UDF_CODE: String = - s""" - public class $GENERATED_UDF_CLASS extends org.apache.flink.table.functions.ScalarFunction { - public String eval(String str) { - return str.toLowerCase(); - } - } - """ - - override def beforeAll(): Unit = { - startMiniCluster() - startFlinkEngine() - super.beforeAll() - } - - override def afterAll(): Unit = { - super.afterAll() - stopFlinkEngine() - miniCluster.close() - } - - def startFlinkEngine(): Unit = { - withKyuubiConf.foreach { case (k, v) => - System.setProperty(k, v) - kyuubiConf.set(k, v) - } - val udfJar = TestUserClassLoaderJar.createJarFile( - Utils.createTempDir("test-jar").toFile, - "test-classloader-udf.jar", - GENERATED_UDF_CLASS, - GENERATED_UDF_CODE) - val engineContext = new DefaultContext( - List(udfJar.toURI.toURL).asJava, - flinkConfig, - List[CustomCommandLine](new DefaultCLI).asJava) - FlinkSQLEngine.startEngine(engineContext) - engine = FlinkSQLEngine.currentEngine.get - connectionUrl = engine.frontendServices.head.connectionUrl - } - - def stopFlinkEngine(): Unit = { - if (engine != null) { - engine.stop() - engine = null - } - } - - private def startMiniCluster(): Unit = { - val cfg = new MiniClusterConfiguration.Builder() - .setConfiguration(flinkConfig) - .setNumSlotsPerTaskManager(1) - .build - miniCluster = new MiniCluster(cfg) - miniCluster.start() - flinkConfig.setString(RestOptions.ADDRESS, miniCluster.getRestAddress.get().getHost) - flinkConfig.setInteger(RestOptions.PORT, miniCluster.getRestAddress.get().getPort) - } - - protected def getJdbcUrl: String = s"jdbc:hive2://$connectionUrl/;" - -} diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala new file mode 100644 index 00000000000..92c1bcd83fc --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala @@ -0,0 +1,228 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink + +import java.io.{File, FilenameFilter} +import java.lang.ProcessBuilder.Redirect +import java.net.URI +import java.nio.file.{Files, Paths} + +import scala.collection.JavaConverters._ +import scala.collection.mutable.ArrayBuffer + +import org.apache.flink.configuration.{Configuration, RestOptions} +import org.apache.flink.runtime.minicluster.{MiniCluster, MiniClusterConfiguration} + +import org.apache.kyuubi.{KYUUBI_VERSION, KyuubiException, KyuubiFunSuite, SCALA_COMPILE_VERSION, Utils} +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.KyuubiConf._ +import org.apache.kyuubi.ha.HighAvailabilityConf.HA_ADDRESSES +import org.apache.kyuubi.zookeeper.EmbeddedZookeeper +import org.apache.kyuubi.zookeeper.ZookeeperConf.{ZK_CLIENT_PORT, ZK_CLIENT_PORT_ADDRESS} + +trait WithFlinkSQLEngineLocal extends KyuubiFunSuite with WithFlinkTestResources { + + protected val flinkConfig = new Configuration() + + protected var miniCluster: MiniCluster = _ + + protected var engineProcess: Process = _ + + private var zkServer: EmbeddedZookeeper = _ + + protected val conf: KyuubiConf = FlinkSQLEngine.kyuubiConf + + protected def engineRefId: String + + def withKyuubiConf: Map[String, String] + + protected var connectionUrl: String = _ + + override def beforeAll(): Unit = { + withKyuubiConf.foreach { case (k, v) => + if (k.startsWith("flink.")) { + flinkConfig.setString(k.stripPrefix("flink."), v) + } + } + withKyuubiConf.foreach { case (k, v) => + System.setProperty(k, v) + conf.set(k, v) + } + + zkServer = new EmbeddedZookeeper() + conf.set(ZK_CLIENT_PORT, 0).set(ZK_CLIENT_PORT_ADDRESS, "localhost") + zkServer.initialize(conf) + zkServer.start() + conf.set(HA_ADDRESSES, zkServer.getConnectString) + + val envs = scala.collection.mutable.Map[String, String]() + val kyuubiExternals = Utils.getCodeSourceLocation(getClass) + .split("externals").head + val flinkHome = { + val candidates = Paths.get(kyuubiExternals, "externals", "kyuubi-download", "target") + .toFile.listFiles(f => f.getName.contains("flink")) + if (candidates == null) None else candidates.map(_.toPath).headOption + } + if (flinkHome.isDefined) { + envs("FLINK_HOME") = flinkHome.get.toString + envs("FLINK_CONF_DIR") = Paths.get(flinkHome.get.toString, "conf").toString + } + envs("JAVA_HOME") = System.getProperty("java.home") + envs("JAVA_EXEC") = Paths.get(envs("JAVA_HOME"), "bin", "java").toString + + startMiniCluster() + startFlinkEngine(envs.toMap) + super.beforeAll() + } + + override def afterAll(): Unit = { + super.afterAll() + if (engineProcess != null) { + engineProcess.destroy() + engineProcess = null + } + if (miniCluster != null) { + miniCluster.close() + miniCluster = null + } + if (zkServer != null) { + zkServer.stop() + zkServer = null + } + } + + def startFlinkEngine(envs: Map[String, String]): Unit = { + val flinkHome = envs("FLINK_HOME") + val processBuilder: ProcessBuilder = new ProcessBuilder + processBuilder.environment().putAll(envs.asJava) + + conf.set(ENGINE_FLINK_EXTRA_CLASSPATH, udfJar.getAbsolutePath) + val command = new ArrayBuffer[String]() + + command += envs("JAVA_EXEC") + + val memory = conf.get(ENGINE_FLINK_MEMORY) + command += s"-Xmx$memory" + val javaOptions = conf.get(ENGINE_FLINK_JAVA_OPTIONS) + if (javaOptions.isDefined) { + command += javaOptions.get + } + + command += "-cp" + val classpathEntries = new java.util.LinkedHashSet[String] + // flink engine runtime jar + mainResource(envs).foreach(classpathEntries.add) + // flink sql jars + Paths.get(flinkHome) + .resolve("opt") + .toFile + .listFiles(new FilenameFilter { + override def accept(dir: File, name: String): Boolean = { + name.toLowerCase.startsWith("flink-sql-client") || + name.toLowerCase.startsWith("flink-sql-gateway") + } + }).foreach(jar => classpathEntries.add(jar.getAbsolutePath)) + + // jars from flink lib + classpathEntries.add(s"$flinkHome${File.separator}lib${File.separator}*") + + // classpath contains flink configurations, default to flink.home/conf + classpathEntries.add(envs.getOrElse("FLINK_CONF_DIR", "")) + // classpath contains hadoop configurations + val cp = System.getProperty("java.class.path") + // exclude kyuubi flink engine jar that has SPI for EmbeddedExecutorFactory + // which can't be initialized on the client side + val hadoopJars = cp.split(":").filter(s => !s.contains("flink")) + hadoopJars.foreach(classpathEntries.add) + val extraCp = conf.get(ENGINE_FLINK_EXTRA_CLASSPATH) + extraCp.foreach(classpathEntries.add) + if (hadoopJars.isEmpty && extraCp.isEmpty) { + mainResource(envs).foreach { path => + val devHadoopJars = Paths.get(path).getParent + .resolve(s"scala-$SCALA_COMPILE_VERSION") + .resolve("jars") + if (!Files.exists(devHadoopJars)) { + throw new KyuubiException(s"The path $devHadoopJars does not exists. " + + s"Please set FLINK_HADOOP_CLASSPATH or ${ENGINE_FLINK_EXTRA_CLASSPATH.key}" + + s" for configuring location of hadoop client jars, etc.") + } + classpathEntries.add(s"$devHadoopJars${File.separator}*") + } + } + command += classpathEntries.asScala.mkString(File.pathSeparator) + command += "org.apache.kyuubi.engine.flink.FlinkSQLEngine" + + conf.getAll.foreach { case (k, v) => + command += "--conf" + command += s"$k=$v" + } + + processBuilder.command(command.toList.asJava) + processBuilder.redirectOutput(Redirect.INHERIT) + processBuilder.redirectError(Redirect.INHERIT) + + info(s"staring flink local engine...") + engineProcess = processBuilder.start() + } + + private def startMiniCluster(): Unit = { + val cfg = new MiniClusterConfiguration.Builder() + .setConfiguration(flinkConfig) + .setNumSlotsPerTaskManager(1) + .setNumTaskManagers(2) + .build + miniCluster = new MiniCluster(cfg) + miniCluster.start() + flinkConfig.setString(RestOptions.ADDRESS, miniCluster.getRestAddress.get().getHost) + flinkConfig.setInteger(RestOptions.PORT, miniCluster.getRestAddress.get().getPort) + } + + protected def getJdbcUrl: String = s"jdbc:hive2://$connectionUrl/;" + + def mainResource(env: Map[String, String]): Option[String] = { + val module = "kyuubi-flink-sql-engine" + val shortName = "flink" + // 1. get the main resource jar for user specified config first + val jarName = s"${module}_$SCALA_COMPILE_VERSION-$KYUUBI_VERSION.jar" + conf.getOption(s"kyuubi.session.engine.$shortName.main.resource").filter { + userSpecified => + // skip check exist if not local file. + val uri = new URI(userSpecified) + val schema = if (uri.getScheme != null) uri.getScheme else "file" + schema match { + case "file" => Files.exists(Paths.get(userSpecified)) + case _ => true + } + }.orElse { + // 2. get the main resource jar from system build default + env.get(KYUUBI_HOME).toSeq + .flatMap { p => + Seq( + Paths.get(p, "externals", "engines", shortName, jarName), + Paths.get(p, "externals", module, "target", jarName)) + } + .find(Files.exists(_)).map(_.toAbsolutePath.toFile.getCanonicalPath) + }.orElse { + // 3. get the main resource from dev environment + val cwd = Utils.getCodeSourceLocation(getClass).split("externals") + assert(cwd.length > 1) + Option(Paths.get(cwd.head, "externals", module, "target", jarName)) + .map(_.toAbsolutePath.toFile.getCanonicalPath) + } + } +} diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineOnYarn.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineOnYarn.scala new file mode 100644 index 00000000000..49fb947a3ec --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineOnYarn.scala @@ -0,0 +1,266 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink + +import java.io.{File, FilenameFilter, FileWriter} +import java.lang.ProcessBuilder.Redirect +import java.net.URI +import java.nio.file.{Files, Paths} + +import scala.collection.JavaConverters._ +import scala.collection.mutable.{ArrayBuffer, ListBuffer} + +import org.apache.hadoop.conf.Configuration +import org.apache.hadoop.hdfs.MiniDFSCluster +import org.apache.hadoop.yarn.conf.YarnConfiguration +import org.apache.hadoop.yarn.server.MiniYARNCluster + +import org.apache.kyuubi.{KYUUBI_VERSION, KyuubiFunSuite, SCALA_COMPILE_VERSION, Utils} +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.KyuubiConf.{ENGINE_FLINK_APPLICATION_JARS, KYUUBI_HOME} +import org.apache.kyuubi.ha.HighAvailabilityConf.HA_ADDRESSES +import org.apache.kyuubi.zookeeper.EmbeddedZookeeper +import org.apache.kyuubi.zookeeper.ZookeeperConf.{ZK_CLIENT_PORT, ZK_CLIENT_PORT_ADDRESS} + +trait WithFlinkSQLEngineOnYarn extends KyuubiFunSuite with WithFlinkTestResources { + + protected def engineRefId: String + + protected val conf: KyuubiConf = new KyuubiConf(false) + + private var hdfsCluster: MiniDFSCluster = _ + + private var yarnCluster: MiniYARNCluster = _ + + private var zkServer: EmbeddedZookeeper = _ + + def withKyuubiConf: Map[String, String] = testExtraConf + + private val yarnConf: YarnConfiguration = { + val yarnConfig = new YarnConfiguration() + + // configurations copied from org.apache.flink.yarn.YarnTestBase + yarnConfig.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 32) + yarnConfig.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 4096) + + yarnConfig.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true) + yarnConfig.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2) + yarnConfig.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, 2) + yarnConfig.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, 4) + yarnConfig.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 3600) + yarnConfig.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false) + // memory is overwritten in the MiniYARNCluster. + // so we have to change the number of cores for testing. + yarnConfig.setInt(YarnConfiguration.NM_VCORES, 666) + yarnConfig.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 99.0f) + yarnConfig.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, 1000) + yarnConfig.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, 5000) + + // capacity-scheduler.xml is missing in hadoop-client-minicluster so this is a workaround + yarnConfig.set("yarn.scheduler.capacity.root.queues", "default,four_cores_queue") + + yarnConfig.setInt("yarn.scheduler.capacity.root.default.capacity", 100) + yarnConfig.setFloat("yarn.scheduler.capacity.root.default.user-limit-factor", 1) + yarnConfig.setInt("yarn.scheduler.capacity.root.default.maximum-capacity", 100) + yarnConfig.set("yarn.scheduler.capacity.root.default.state", "RUNNING") + yarnConfig.set("yarn.scheduler.capacity.root.default.acl_submit_applications", "*") + yarnConfig.set("yarn.scheduler.capacity.root.default.acl_administer_queue", "*") + + yarnConfig.setInt("yarn.scheduler.capacity.root.four_cores_queue.maximum-capacity", 100) + yarnConfig.setInt("yarn.scheduler.capacity.root.four_cores_queue.maximum-applications", 10) + yarnConfig.setInt("yarn.scheduler.capacity.root.four_cores_queue.maximum-allocation-vcores", 4) + yarnConfig.setFloat("yarn.scheduler.capacity.root.four_cores_queue.user-limit-factor", 1) + yarnConfig.set("yarn.scheduler.capacity.root.four_cores_queue.acl_submit_applications", "*") + yarnConfig.set("yarn.scheduler.capacity.root.four_cores_queue.acl_administer_queue", "*") + + yarnConfig.setInt("yarn.scheduler.capacity.node-locality-delay", -1) + // Set bind host to localhost to avoid java.net.BindException + yarnConfig.set(YarnConfiguration.RM_BIND_HOST, "localhost") + yarnConfig.set(YarnConfiguration.NM_BIND_HOST, "localhost") + + yarnConfig + } + + override def beforeAll(): Unit = { + zkServer = new EmbeddedZookeeper() + conf.set(ZK_CLIENT_PORT, 0).set(ZK_CLIENT_PORT_ADDRESS, "localhost") + zkServer.initialize(conf) + zkServer.start() + conf.set(HA_ADDRESSES, zkServer.getConnectString) + + hdfsCluster = new MiniDFSCluster.Builder(new Configuration) + .numDataNodes(1) + .checkDataNodeAddrConfig(true) + .checkDataNodeHostConfig(true) + .build() + + val hdfsServiceUrl = s"hdfs://localhost:${hdfsCluster.getNameNodePort}" + yarnConf.set("fs.defaultFS", hdfsServiceUrl) + yarnConf.addResource(hdfsCluster.getConfiguration(0)) + + val cp = System.getProperty("java.class.path") + // exclude kyuubi flink engine jar that has SPI for EmbeddedExecutorFactory + // which can't be initialized on the client side + val hadoopJars = cp.split(":").filter(s => !s.contains("flink") && !s.contains("log4j")) + val hadoopClasspath = hadoopJars.mkString(":") + yarnConf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, hadoopClasspath) + + yarnCluster = new MiniYARNCluster("flink-engine-cluster", 1, 1, 1) + yarnCluster.init(yarnConf) + yarnCluster.start() + + val hadoopConfDir = Utils.createTempDir().toFile + val writer = new FileWriter(new File(hadoopConfDir, "core-site.xml")) + yarnCluster.getConfig.writeXml(writer) + writer.close() + + val envs = scala.collection.mutable.Map[String, String]() + val kyuubiExternals = Utils.getCodeSourceLocation(getClass) + .split("externals").head + val flinkHome = { + val candidates = Paths.get(kyuubiExternals, "externals", "kyuubi-download", "target") + .toFile.listFiles(f => f.getName.contains("flink")) + if (candidates == null) None else candidates.map(_.toPath).headOption + } + if (flinkHome.isDefined) { + envs("FLINK_HOME") = flinkHome.get.toString + envs("FLINK_CONF_DIR") = Paths.get(flinkHome.get.toString, "conf").toString + } + envs("HADOOP_CLASSPATH") = hadoopClasspath + envs("HADOOP_CONF_DIR") = hadoopConfDir.getAbsolutePath + + startFlinkEngine(envs.toMap) + + super.beforeAll() + } + + private def startFlinkEngine(envs: Map[String, String]): Unit = { + val processBuilder: ProcessBuilder = new ProcessBuilder + processBuilder.environment().putAll(envs.asJava) + + conf.set(ENGINE_FLINK_APPLICATION_JARS, udfJar.getAbsolutePath) + val flinkExtraJars = extraFlinkJars(envs("FLINK_HOME")) + val command = new ArrayBuffer[String]() + + command += s"${envs("FLINK_HOME")}${File.separator}bin/flink" + command += "run-application" + command += "-t" + command += "yarn-application" + command += s"-Dyarn.ship-files=${flinkExtraJars.mkString(";")}" + command += s"-Dyarn.application.name=kyuubi_user_flink_paul" + command += s"-Dyarn.tags=KYUUBI,$engineRefId" + command += "-Djobmanager.memory.process.size=1g" + command += "-Dtaskmanager.memory.process.size=1g" + command += "-Dcontainerized.master.env.FLINK_CONF_DIR=." + command += "-Dcontainerized.taskmanager.env.FLINK_CONF_DIR=." + command += s"-Dcontainerized.master.env.HADOOP_CONF_DIR=${envs("HADOOP_CONF_DIR")}" + command += s"-Dcontainerized.taskmanager.env.HADOOP_CONF_DIR=${envs("HADOOP_CONF_DIR")}" + command += "-Dexecution.target=yarn-application" + command += "-c" + command += "org.apache.kyuubi.engine.flink.FlinkSQLEngine" + command += s"${mainResource(envs).get}" + + for ((k, v) <- withKyuubiConf) { + conf.set(k, v) + } + + for ((k, v) <- conf.getAll) { + command += "--conf" + command += s"$k=$v" + } + + processBuilder.command(command.toList.asJava) + processBuilder.redirectOutput(Redirect.INHERIT) + processBuilder.redirectError(Redirect.INHERIT) + + info(s"staring flink yarn-application cluster for engine $engineRefId..") + val process = processBuilder.start() + process.waitFor() + info(s"flink yarn-application cluster for engine $engineRefId has started") + } + + def extraFlinkJars(flinkHome: String): Array[String] = { + // locate flink sql jars + val flinkExtraJars = new ListBuffer[String] + val flinkSQLJars = Paths.get(flinkHome) + .resolve("opt") + .toFile + .listFiles(new FilenameFilter { + override def accept(dir: File, name: String): Boolean = { + name.toLowerCase.startsWith("flink-sql-client") || + name.toLowerCase.startsWith("flink-sql-gateway") + } + }).map(f => f.getAbsolutePath).sorted + flinkExtraJars ++= flinkSQLJars + + val userJars = conf.get(ENGINE_FLINK_APPLICATION_JARS) + userJars.foreach(jars => flinkExtraJars ++= jars.split(",")) + flinkExtraJars.toArray + } + + /** + * Copied form org.apache.kyuubi.engine.ProcBuilder + * The engine jar or other runnable jar containing the main method + */ + def mainResource(env: Map[String, String]): Option[String] = { + // 1. get the main resource jar for user specified config first + val module = "kyuubi-flink-sql-engine" + val shortName = "flink" + val jarName = s"${module}_$SCALA_COMPILE_VERSION-$KYUUBI_VERSION.jar" + conf.getOption(s"kyuubi.session.engine.$shortName.main.resource").filter { userSpecified => + // skip check exist if not local file. + val uri = new URI(userSpecified) + val schema = if (uri.getScheme != null) uri.getScheme else "file" + schema match { + case "file" => Files.exists(Paths.get(userSpecified)) + case _ => true + } + }.orElse { + // 2. get the main resource jar from system build default + env.get(KYUUBI_HOME).toSeq + .flatMap { p => + Seq( + Paths.get(p, "externals", "engines", shortName, jarName), + Paths.get(p, "externals", module, "target", jarName)) + } + .find(Files.exists(_)).map(_.toAbsolutePath.toFile.getCanonicalPath) + }.orElse { + // 3. get the main resource from dev environment + val cwd = Utils.getCodeSourceLocation(getClass).split("externals") + assert(cwd.length > 1) + Option(Paths.get(cwd.head, "externals", module, "target", jarName)) + .map(_.toAbsolutePath.toFile.getCanonicalPath) + } + } + + override def afterAll(): Unit = { + super.afterAll() + if (yarnCluster != null) { + yarnCluster.stop() + yarnCluster = null + } + if (hdfsCluster != null) { + hdfsCluster.shutdown() + hdfsCluster = null + } + if (zkServer != null) { + zkServer.stop() + zkServer = null + } + } +} diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala new file mode 100644 index 00000000000..3b1d65cb233 --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink + +import java.io.File + +import org.apache.kyuubi.Utils +import org.apache.kyuubi.engine.flink.util.TestUserClassLoaderJar + +trait WithFlinkTestResources { + + protected val GENERATED_UDF_CLASS: String = "LowerUDF" + + protected val GENERATED_UDF_CODE: String = + s""" + public class $GENERATED_UDF_CLASS extends org.apache.flink.table.functions.ScalarFunction { + public String eval(String str) { + return str.toLowerCase(); + } + } + """ + + protected val udfJar: File = TestUserClassLoaderJar.createJarFile( + Utils.createTempDir("test-jar").toFile, + "test-classloader-udf.jar", + GENERATED_UDF_CLASS, + GENERATED_UDF_CODE) + + protected val savepointDir: File = Utils.createTempDir("savepoints").toFile + + protected val testExtraConf: Map[String, String] = Map( + "flink.pipeline.name" -> "test-job", + "flink.state.savepoints.dir" -> savepointDir.toURI.toString) +} diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationLocalSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationLocalSuite.scala new file mode 100644 index 00000000000..279cbea22a4 --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationLocalSuite.scala @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink.operation + +import java.util.UUID + +import org.apache.kyuubi.{KYUUBI_VERSION, Utils} +import org.apache.kyuubi.config.KyuubiConf._ +import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_USER_KEY +import org.apache.kyuubi.engine.ShareLevel +import org.apache.kyuubi.engine.flink.{WithDiscoveryFlinkSQLEngine, WithFlinkSQLEngineLocal} +import org.apache.kyuubi.ha.HighAvailabilityConf.{HA_ENGINE_REF_ID, HA_NAMESPACE} +import org.apache.kyuubi.operation.NoneMode + +class FlinkOperationLocalSuite extends FlinkOperationSuite + with WithDiscoveryFlinkSQLEngine with WithFlinkSQLEngineLocal { + + protected def jdbcUrl: String = getFlinkEngineServiceUrl + + override def withKyuubiConf: Map[String, String] = { + Map( + "flink.execution.target" -> "remote", + "flink.high-availability.cluster-id" -> "flink-mini-cluster", + "flink.app.name" -> "kyuubi_connection_flink_paul", + HA_NAMESPACE.key -> namespace, + HA_ENGINE_REF_ID.key -> engineRefId, + ENGINE_TYPE.key -> "FLINK_SQL", + ENGINE_SHARE_LEVEL.key -> shareLevel, + OPERATION_PLAN_ONLY_MODE.key -> NoneMode.name, + KYUUBI_SESSION_USER_KEY -> "paullin") ++ testExtraConf + } + + override protected def engineRefId: String = UUID.randomUUID().toString + + def namespace: String = "/kyuubi/flink-local-engine-test" + + def shareLevel: String = ShareLevel.USER.toString + + def engineType: String = "flink" + + test("execute statement - kyuubi defined functions") { + withJdbcStatement() { statement => + var resultSet = statement.executeQuery("select kyuubi_version() as kyuubi_version") + assert(resultSet.next()) + assert(resultSet.getString(1) === KYUUBI_VERSION) + + resultSet = statement.executeQuery("select kyuubi_engine_name() as engine_name") + assert(resultSet.next()) + assert(resultSet.getString(1).equals(s"kyuubi_connection_flink_paul")) + + resultSet = statement.executeQuery("select kyuubi_engine_id() as engine_id") + assert(resultSet.next()) + assert(resultSet.getString(1) === "flink-mini-cluster") + + resultSet = statement.executeQuery("select kyuubi_system_user() as `system_user`") + assert(resultSet.next()) + assert(resultSet.getString(1) === Utils.currentUser) + + resultSet = statement.executeQuery("select kyuubi_session_user() as `session_user`") + assert(resultSet.next()) + assert(resultSet.getString(1) === "paullin") + } + } +} diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationOnYarnSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationOnYarnSuite.scala new file mode 100644 index 00000000000..401c3b0bdd0 --- /dev/null +++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationOnYarnSuite.scala @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.flink.operation + +import java.util.UUID + +import org.apache.kyuubi.{KYUUBI_VERSION, Utils} +import org.apache.kyuubi.config.KyuubiConf.{ENGINE_SHARE_LEVEL, ENGINE_TYPE} +import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_USER_KEY +import org.apache.kyuubi.engine.ShareLevel +import org.apache.kyuubi.engine.flink.{WithDiscoveryFlinkSQLEngine, WithFlinkSQLEngineOnYarn} +import org.apache.kyuubi.ha.HighAvailabilityConf.{HA_ENGINE_REF_ID, HA_NAMESPACE} + +class FlinkOperationOnYarnSuite extends FlinkOperationSuite + with WithDiscoveryFlinkSQLEngine with WithFlinkSQLEngineOnYarn { + + protected def jdbcUrl: String = getFlinkEngineServiceUrl + + override def withKyuubiConf: Map[String, String] = { + Map( + HA_NAMESPACE.key -> namespace, + HA_ENGINE_REF_ID.key -> engineRefId, + ENGINE_TYPE.key -> "FLINK_SQL", + ENGINE_SHARE_LEVEL.key -> shareLevel, + KYUUBI_SESSION_USER_KEY -> "paullin") ++ testExtraConf + } + + override protected def engineRefId: String = UUID.randomUUID().toString + + def namespace: String = "/kyuubi/flink-yarn-application-test" + + def shareLevel: String = ShareLevel.USER.toString + + def engineType: String = "flink" + + test("execute statement - kyuubi defined functions") { + withJdbcStatement() { statement => + var resultSet = statement.executeQuery("select kyuubi_version() as kyuubi_version") + assert(resultSet.next()) + assert(resultSet.getString(1) === KYUUBI_VERSION) + + resultSet = statement.executeQuery("select kyuubi_engine_name() as engine_name") + assert(resultSet.next()) + assert(resultSet.getString(1).equals(s"kyuubi_user_flink_paul")) + + resultSet = statement.executeQuery("select kyuubi_engine_id() as engine_id") + assert(resultSet.next()) + assert(resultSet.getString(1).startsWith("application_")) + + resultSet = statement.executeQuery("select kyuubi_system_user() as `system_user`") + assert(resultSet.next()) + assert(resultSet.getString(1) === Utils.currentUser) + + resultSet = statement.executeQuery("select kyuubi_session_user() as `session_user`") + assert(resultSet.next()) + assert(resultSet.getString(1) === "paullin") + } + } +} diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala index d0522d3eaaa..8e7c35a95a4 100644 --- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala +++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala @@ -17,43 +17,29 @@ package org.apache.kyuubi.engine.flink.operation +import java.nio.file.Paths import java.sql.DatabaseMetaData import java.util.UUID import scala.collection.JavaConverters._ import org.apache.flink.api.common.JobID +import org.apache.flink.configuration.PipelineOptions import org.apache.flink.table.types.logical.LogicalTypeRoot import org.apache.hive.service.rpc.thrift._ -import org.scalatest.concurrent.PatienceConfiguration.Timeout -import org.scalatest.time.SpanSugar._ import org.apache.kyuubi.Utils import org.apache.kyuubi.config.KyuubiConf._ -import org.apache.kyuubi.engine.flink.FlinkEngineUtils._ -import org.apache.kyuubi.engine.flink.WithFlinkSQLEngine +import org.apache.kyuubi.engine.flink.FlinkEngineUtils.FLINK_RUNTIME_VERSION +import org.apache.kyuubi.engine.flink.WithFlinkTestResources import org.apache.kyuubi.engine.flink.result.Constants import org.apache.kyuubi.engine.flink.util.TestUserClassLoaderJar -import org.apache.kyuubi.jdbc.hive.KyuubiStatement -import org.apache.kyuubi.operation.{HiveJDBCTestHelper, NoneMode} +import org.apache.kyuubi.jdbc.hive.{KyuubiSQLException, KyuubiStatement} +import org.apache.kyuubi.jdbc.hive.common.TimestampTZ +import org.apache.kyuubi.operation.HiveJDBCTestHelper import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._ -import org.apache.kyuubi.service.ServiceState._ -class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper { - override def withKyuubiConf: Map[String, String] = - Map(OPERATION_PLAN_ONLY_MODE.key -> NoneMode.name) - - override protected def jdbcUrl: String = - s"jdbc:hive2://${engine.frontendServices.head.connectionUrl}/;" - - ignore("release session if shared level is CONNECTION") { - logger.info(s"jdbc url is $jdbcUrl") - assert(engine.getServiceState == STARTED) - withJdbcStatement() { _ => } - eventually(Timeout(20.seconds)) { - assert(engine.getServiceState == STOPPED) - } - } +abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTestResources { test("get catalogs") { withJdbcStatement() { statement => @@ -649,6 +635,60 @@ class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper { } } + test("execute statement - show/stop jobs") { + if (FLINK_RUNTIME_VERSION >= "1.17") { + withSessionConf()(Map(ENGINE_FLINK_MAX_ROWS.key -> "10"))(Map.empty) { + withMultipleConnectionJdbcStatement()({ statement => + statement.executeQuery( + "create table tbl_a (a int) with (" + + "'connector' = 'datagen', " + + "'rows-per-second'='10')") + statement.executeQuery("create table tbl_b (a int) with ('connector' = 'blackhole')") + val insertResult1 = statement.executeQuery("insert into tbl_b select * from tbl_a") + assert(insertResult1.next()) + val jobId1 = insertResult1.getString(1) + + Thread.sleep(5000) + + val showResult = statement.executeQuery("show jobs") + val metadata = showResult.getMetaData + assert(metadata.getColumnName(1) === "job id") + assert(metadata.getColumnType(1) === java.sql.Types.VARCHAR) + assert(metadata.getColumnName(2) === "job name") + assert(metadata.getColumnType(2) === java.sql.Types.VARCHAR) + assert(metadata.getColumnName(3) === "status") + assert(metadata.getColumnType(3) === java.sql.Types.VARCHAR) + assert(metadata.getColumnName(4) === "start time") + assert(metadata.getColumnType(4) === java.sql.Types.OTHER) + + var isFound = false + while (showResult.next()) { + if (showResult.getString(1) === jobId1) { + isFound = true + assert(showResult.getString(2) === "test-job") + assert(showResult.getString(3) === "RUNNING") + assert(showResult.getObject(4).isInstanceOf[TimestampTZ]) + } + } + assert(isFound) + + val stopResult1 = statement.executeQuery(s"stop job '$jobId1'") + assert(stopResult1.next()) + assert(stopResult1.getString(1) === "OK") + + val insertResult2 = statement.executeQuery("insert into tbl_b select * from tbl_a") + assert(insertResult2.next()) + val jobId2 = insertResult2.getString(1) + + val stopResult2 = statement.executeQuery(s"stop job '$jobId2' with savepoint") + assert(stopResult2.getMetaData.getColumnName(1).equals("savepoint path")) + assert(stopResult2.next()) + assert(Paths.get(stopResult2.getString(1)).getFileName.toString.startsWith("savepoint-")) + }) + } + } + } + test("execute statement - select column name with dots") { withJdbcStatement() { statement => val resultSet = statement.executeQuery("select 'tmp.hello'") @@ -756,30 +796,54 @@ class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper { } } - test("execute statement - select array") { + test("execute statement - select timestamp with local time zone") { + withJdbcStatement() { statement => + statement.executeQuery("CREATE VIEW T1 AS SELECT TO_TIMESTAMP_LTZ(4001, 3)") + statement.executeQuery("SET 'table.local-time-zone' = 'UTC'") + val resultSetUTC = statement.executeQuery("SELECT * FROM T1") + val metaData = resultSetUTC.getMetaData + assert(metaData.getColumnType(1) === java.sql.Types.OTHER) + assert(resultSetUTC.next()) + assert(resultSetUTC.getString(1) === "1970-01-01 00:00:04.001 UTC") + + statement.executeQuery("SET 'table.local-time-zone' = 'America/Los_Angeles'") + val resultSetPST = statement.executeQuery("SELECT * FROM T1") + assert(resultSetPST.next()) + assert(resultSetPST.getString(1) === "1969-12-31 16:00:04.001 America/Los_Angeles") + } + } + + test("execute statement - select time") { withJdbcStatement() { statement => val resultSet = - statement.executeQuery("select array ['v1', 'v2', 'v3']") + statement.executeQuery( + "select time '00:00:03', time '00:00:05.123456789'") + val metaData = resultSet.getMetaData + assert(metaData.getColumnType(1) === java.sql.Types.VARCHAR) + assert(metaData.getColumnType(2) === java.sql.Types.VARCHAR) + assert(resultSet.next()) + assert(resultSet.getString(1) == "00:00:03") + assert(resultSet.getString(2) == "00:00:05.123") + } + } + + test("execute statement - select array") { + withJdbcStatement() { statement => + val resultSet = statement.executeQuery("select array ['v1', 'v2', 'v3']") val metaData = resultSet.getMetaData assert(metaData.getColumnType(1) === java.sql.Types.ARRAY) assert(resultSet.next()) - if (isFlinkVersionEqualTo("1.14")) { - val expected = """["v1","v2","v3"]""" - assert(resultSet.getObject(1).toString == expected) - } - if (isFlinkVersionAtLeast("1.15")) { - val expected = "[v1,v2,v3]" - assert(resultSet.getObject(1).toString == expected) - } + val expected = "[\"v1\",\"v2\",\"v3\"]" + assert(resultSet.getObject(1).toString == expected) } } test("execute statement - select map") { withJdbcStatement() { statement => - val resultSet = - statement.executeQuery("select map ['k1', 'v1', 'k2', 'v2']") + val resultSet = statement.executeQuery("select map ['k1', 'v1', 'k2', 'v2']") assert(resultSet.next()) - assert(resultSet.getString(1) == "{k1=v1, k2=v2}") + assert(List("{k1=v1, k2=v2}", "{k2=v2, k1=v1}") + .contains(resultSet.getString(1))) val metaData = resultSet.getMetaData assert(metaData.getColumnType(1) === java.sql.Types.JAVA_OBJECT) } @@ -787,17 +851,10 @@ class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper { test("execute statement - select row") { withJdbcStatement() { statement => - val resultSet = - statement.executeQuery("select (1, '2', true)") + val resultSet = statement.executeQuery("select (1, '2', true)") assert(resultSet.next()) - if (isFlinkVersionEqualTo("1.14")) { - val expected = """{INT NOT NULL:1,CHAR(1) NOT NULL:"2",BOOLEAN NOT NULL:true}""" - assert(resultSet.getString(1) == expected) - } - if (isFlinkVersionAtLeast("1.15")) { - val expected = """{INT NOT NULL:1,CHAR(1) NOT NULL:2,BOOLEAN NOT NULL:true}""" - assert(resultSet.getString(1) == expected) - } + val expected = """{INT NOT NULL:1,CHAR(1) NOT NULL:"2",BOOLEAN NOT NULL:true}""" + assert(resultSet.getString(1) == expected) val metaData = resultSet.getMetaData assert(metaData.getColumnType(1) === java.sql.Types.STRUCT) } @@ -807,25 +864,30 @@ class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper { withJdbcStatement() { statement => val resultSet = statement.executeQuery("select encode('kyuubi', 'UTF-8')") assert(resultSet.next()) - if (isFlinkVersionEqualTo("1.14")) { - assert(resultSet.getString(1) == "kyuubi") - } - if (isFlinkVersionAtLeast("1.15")) { - // TODO: validate table results after FLINK-28882 is resolved - assert(resultSet.getString(1) == "k") - } + // TODO: validate table results after FLINK-28882 is resolved + assert(resultSet.getString(1) == "k") + val metaData = resultSet.getMetaData + assert(metaData.getColumnType(1) === java.sql.Types.BINARY) + } + } + + test("execute statement - select varbinary") { + withJdbcStatement() { statement => + val resultSet = statement.executeQuery("select cast('kyuubi' as varbinary)") + assert(resultSet.next()) + assert(resultSet.getString(1) == "kyuubi") val metaData = resultSet.getMetaData assert(metaData.getColumnType(1) === java.sql.Types.BINARY) } } test("execute statement - select float") { - withJdbcStatement()({ statement => + withJdbcStatement() { statement => val resultSet = statement.executeQuery("SELECT cast(0.1 as float)") assert(resultSet.next()) assert(resultSet.getString(1) == "0.1") assert(resultSet.getFloat(1) == 0.1f) - }) + } } test("execute statement - select count") { @@ -876,20 +938,15 @@ class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper { } test("execute statement - create/drop catalog") { - withJdbcStatement()({ statement => - val createResult = { + withJdbcStatement() { statement => + val createResult = statement.executeQuery("create catalog cat_a with ('type'='generic_in_memory')") - } - if (isFlinkVersionAtLeast("1.15")) { - assert(createResult.next()) - assert(createResult.getString(1) === "OK") - } + assert(createResult.next()) + assert(createResult.getString(1) === "OK") val dropResult = statement.executeQuery("drop catalog cat_a") - if (isFlinkVersionAtLeast("1.15")) { - assert(dropResult.next()) - assert(dropResult.getString(1) === "OK") - } - }) + assert(dropResult.next()) + assert(dropResult.getString(1) === "OK") + } } test("execute statement - set/get catalog") { @@ -910,30 +967,24 @@ class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper { } test("execute statement - create/alter/drop database") { - withJdbcStatement()({ statement => + withJdbcStatement() { statement => val createResult = statement.executeQuery("create database db_a") - if (isFlinkVersionAtLeast("1.15")) { - assert(createResult.next()) - assert(createResult.getString(1) === "OK") - } + assert(createResult.next()) + assert(createResult.getString(1) === "OK") val alterResult = statement.executeQuery("alter database db_a set ('k1' = 'v1')") - if (isFlinkVersionAtLeast("1.15")) { - assert(alterResult.next()) - assert(alterResult.getString(1) === "OK") - } + assert(alterResult.next()) + assert(alterResult.getString(1) === "OK") val dropResult = statement.executeQuery("drop database db_a") - if (isFlinkVersionAtLeast("1.15")) { - assert(dropResult.next()) - assert(dropResult.getString(1) === "OK") - } - }) + assert(dropResult.next()) + assert(dropResult.getString(1) === "OK") + } } test("execute statement - set/get database") { withSessionConf()( Map(ENGINE_OPERATION_CONVERT_CATALOG_DATABASE_ENABLED.key -> "true"))( Map.empty) { - withJdbcStatement()({ statement => + withJdbcStatement() { statement => statement.executeQuery("create database db_a") val schema = statement.getConnection.getSchema assert(schema == "default_database") @@ -941,102 +992,113 @@ class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper { val changedSchema = statement.getConnection.getSchema assert(changedSchema == "db_a") assert(statement.execute("drop database db_a")) - }) + } } } test("execute statement - create/alter/drop table") { - withJdbcStatement()({ statement => - val createResult = { + withJdbcStatement() { statement => + val createResult = statement.executeQuery("create table tbl_a (a string) with ('connector' = 'blackhole')") - } - if (isFlinkVersionAtLeast("1.15")) { - assert(createResult.next()) - assert(createResult.getString(1) === "OK") - } + assert(createResult.next()) + assert(createResult.getString(1) === "OK") val alterResult = statement.executeQuery("alter table tbl_a rename to tbl_b") - if (isFlinkVersionAtLeast("1.15")) { - assert(alterResult.next()) - assert(alterResult.getString(1) === "OK") - } + assert(alterResult.next()) + assert(alterResult.getString(1) === "OK") val dropResult = statement.executeQuery("drop table tbl_b") - if (isFlinkVersionAtLeast("1.15")) { - assert(dropResult.next()) - assert(dropResult.getString(1) === "OK") - } - }) + assert(dropResult.next()) + assert(dropResult.getString(1) === "OK") + } } test("execute statement - create/alter/drop view") { - withMultipleConnectionJdbcStatement()({ statement => + withMultipleConnectionJdbcStatement() { statement => val createResult = statement.executeQuery("create view view_a as select 1") - if (isFlinkVersionAtLeast("1.15")) { - assert(createResult.next()) - assert(createResult.getString(1) === "OK") - } + assert(createResult.next()) + assert(createResult.getString(1) === "OK") val alterResult = statement.executeQuery("alter view view_a rename to view_b") - if (isFlinkVersionAtLeast("1.15")) { - assert(alterResult.next()) - assert(alterResult.getString(1) === "OK") - } + assert(alterResult.next()) + assert(alterResult.getString(1) === "OK") val dropResult = statement.executeQuery("drop view view_b") - if (isFlinkVersionAtLeast("1.15")) { - assert(dropResult.next()) - assert(dropResult.getString(1) === "OK") - } - }) + assert(dropResult.next()) + assert(dropResult.getString(1) === "OK") + } } - test("execute statement - insert into") { - withMultipleConnectionJdbcStatement()({ statement => + test("execute statement - batch insert into") { + withMultipleConnectionJdbcStatement() { statement => statement.executeQuery("create table tbl_a (a int) with ('connector' = 'blackhole')") val resultSet = statement.executeQuery("insert into tbl_a select 1") val metadata = resultSet.getMetaData - assert(metadata.getColumnName(1) == "default_catalog.default_database.tbl_a") - assert(metadata.getColumnType(1) == java.sql.Types.BIGINT) + assert(metadata.getColumnName(1) === "job id") + assert(metadata.getColumnType(1) === java.sql.Types.VARCHAR) + assert(resultSet.next()) + assert(resultSet.getString(1).length == 32) + } + } + + test("execute statement - streaming insert into") { + withMultipleConnectionJdbcStatement()({ statement => + // Flink currently doesn't support stop job statement, thus use a finite stream + statement.executeQuery( + "create table tbl_a (a int) with (" + + "'connector' = 'datagen', " + + "'rows-per-second'='10', " + + "'number-of-rows'='100')") + statement.executeQuery("create table tbl_b (a int) with ('connector' = 'blackhole')") + val resultSet = statement.executeQuery("insert into tbl_b select * from tbl_a") + val metadata = resultSet.getMetaData + assert(metadata.getColumnName(1) === "job id") + assert(metadata.getColumnType(1) === java.sql.Types.VARCHAR) assert(resultSet.next()) - assert(resultSet.getLong(1) == -1L) + val jobId = resultSet.getString(1) + assert(jobId.length == 32) + + if (FLINK_RUNTIME_VERSION >= "1.17") { + val stopResult = statement.executeQuery(s"stop job '$jobId'") + assert(stopResult.next()) + assert(stopResult.getString(1) === "OK") + } }) } test("execute statement - set properties") { - withMultipleConnectionJdbcStatement()({ statement => + withMultipleConnectionJdbcStatement() { statement => val resultSet = statement.executeQuery("set table.dynamic-table-options.enabled = true") val metadata = resultSet.getMetaData - assert(metadata.getColumnName(1) == "key") - assert(metadata.getColumnName(2) == "value") + assert(metadata.getColumnName(1) == "result") assert(resultSet.next()) - assert(resultSet.getString(1) == "table.dynamic-table-options.enabled") - assert(resultSet.getString(2) == "true") - }) + assert(resultSet.getString(1) == "OK") + } } test("execute statement - show properties") { - withMultipleConnectionJdbcStatement()({ statement => + withMultipleConnectionJdbcStatement() { statement => val resultSet = statement.executeQuery("set") val metadata = resultSet.getMetaData assert(metadata.getColumnName(1) == "key") assert(metadata.getColumnName(2) == "value") assert(resultSet.next()) - }) + } } test("execute statement - reset property") { - withMultipleConnectionJdbcStatement()({ statement => - statement.executeQuery("set pipeline.jars = my.jar") - statement.executeQuery("reset pipeline.jars") + val originalName = "test-job" // defined in WithFlinkTestResource + withMultipleConnectionJdbcStatement() { statement => + statement.executeQuery(s"set ${PipelineOptions.NAME.key()} = wrong-name") + statement.executeQuery(s"reset ${PipelineOptions.NAME.key()}") val resultSet = statement.executeQuery("set") // Flink does not support set key without value currently, // thus read all rows to find the desired one var success = false while (resultSet.next()) { - if (resultSet.getString(1) == "pipeline.jars" && - !resultSet.getString(2).contains("my.jar")) { + if (resultSet.getString(1) == PipelineOptions.NAME.key() && + resultSet.getString(2).equals(originalName)) { success = true } } assert(success) - }) + } } test("execute statement - select udf") { @@ -1074,7 +1136,8 @@ class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper { test("ensure result max rows") { withSessionConf()(Map(ENGINE_FLINK_MAX_ROWS.key -> "200"))(Map.empty) { withJdbcStatement() { statement => - statement.execute("create table tbl_src (a bigint) with ('connector' = 'datagen')") + statement.execute("create table tbl_src (a bigint) with (" + + "'connector' = 'datagen', 'number-of-rows' = '1000')") val resultSet = statement.executeQuery(s"select a from tbl_src") var rows = 0 while (resultSet.next()) { @@ -1085,7 +1148,31 @@ class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper { } } - test("execute statement - add/remove/show jar") { + test("execute statement - add/show jar") { + val jarName = s"newly-added-${UUID.randomUUID()}.jar" + val newJar = TestUserClassLoaderJar.createJarFile( + Utils.createTempDir("add-jar-test").toFile, + jarName, + GENERATED_UDF_CLASS, + GENERATED_UDF_CODE).toPath + + withMultipleConnectionJdbcStatement()({ statement => + statement.execute(s"add jar '$newJar'") + + val showJarsResultAdded = statement.executeQuery("show jars") + var exists = false + while (showJarsResultAdded.next()) { + if (showJarsResultAdded.getString(1).contains(jarName)) { + exists = true + } + } + assert(exists) + }) + } + + // ignored because Flink gateway doesn't support remove-jar statements + // see org.apache.flink.table.gateway.service.operation.OperationExecutor#callRemoveJar(..) + ignore("execute statement - remove jar") { val jarName = s"newly-added-${UUID.randomUUID()}.jar" val newJar = TestUserClassLoaderJar.createJarFile( Utils.createTempDir("add-jar-test").toFile, @@ -1155,9 +1242,25 @@ class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper { assert(stmt.asInstanceOf[KyuubiStatement].getQueryId === null) stmt.executeQuery("insert into tbl_a values (1)") val queryId = stmt.asInstanceOf[KyuubiStatement].getQueryId - assert(queryId !== null) - // parse the string to check if it's valid Flink job id - assert(JobID.fromHexString(queryId) !== null) + // Flink 1.16 doesn't support query id via ResultFetcher + if (FLINK_RUNTIME_VERSION >= "1.17") { + assert(queryId !== null) + // parse the string to check if it's valid Flink job id + assert(JobID.fromHexString(queryId) !== null) + } } } + + test("test result fetch timeout") { + val exception = intercept[KyuubiSQLException]( + withSessionConf()(Map(ENGINE_FLINK_FETCH_TIMEOUT.key -> "60000"))() { + withJdbcStatement("tbl_a") { stmt => + stmt.executeQuery("create table tbl_a (a int) " + + "with ('connector' = 'datagen', 'rows-per-second'='0')") + val resultSet = stmt.executeQuery("select * from tbl_a") + while (resultSet.next()) {} + } + }) + assert(exception.getMessage === "Futures timed out after [60000 milliseconds]") + } } diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyOperationSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyOperationSuite.scala index 1194f3582b1..17c49464fae 100644 --- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyOperationSuite.scala +++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyOperationSuite.scala @@ -18,21 +18,33 @@ package org.apache.kyuubi.engine.flink.operation import java.sql.Statement +import java.util.UUID import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.engine.flink.WithFlinkSQLEngine +import org.apache.kyuubi.engine.flink.{WithDiscoveryFlinkSQLEngine, WithFlinkSQLEngineLocal} +import org.apache.kyuubi.ha.HighAvailabilityConf.{HA_ENGINE_REF_ID, HA_NAMESPACE} import org.apache.kyuubi.operation.{AnalyzeMode, ExecutionMode, HiveJDBCTestHelper, ParseMode, PhysicalMode} -class PlanOnlyOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper { +class PlanOnlyOperationSuite extends WithFlinkSQLEngineLocal + with HiveJDBCTestHelper with WithDiscoveryFlinkSQLEngine { + + override protected def engineRefId: String = UUID.randomUUID().toString + + override protected def namespace: String = "/kyuubi/flink-plan-only-test" + + def engineType: String = "flink" override def withKyuubiConf: Map[String, String] = Map( + "flink.execution.target" -> "remote", + HA_NAMESPACE.key -> namespace, + HA_ENGINE_REF_ID.key -> engineRefId, + KyuubiConf.ENGINE_TYPE.key -> "FLINK_SQL", KyuubiConf.ENGINE_SHARE_LEVEL.key -> "user", KyuubiConf.OPERATION_PLAN_ONLY_MODE.key -> ParseMode.name, - KyuubiConf.ENGINE_SHARE_LEVEL_SUBDOMAIN.key -> "plan-only") + KyuubiConf.ENGINE_SHARE_LEVEL_SUBDOMAIN.key -> "plan-only") ++ testExtraConf - override protected def jdbcUrl: String = - s"jdbc:hive2://${engine.frontendServices.head.connectionUrl}/;" + override protected def jdbcUrl: String = getFlinkEngineServiceUrl test("Plan only operation with system defaults") { withJdbcStatement() { statement => diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/result/ResultSetSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/result/ResultSetSuite.scala index 9190456b32b..9ee5c658bc9 100644 --- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/result/ResultSetSuite.scala +++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/result/ResultSetSuite.scala @@ -17,6 +17,8 @@ package org.apache.kyuubi.engine.flink.result +import java.time.ZoneId + import org.apache.flink.table.api.{DataTypes, ResultKind} import org.apache.flink.table.catalog.Column import org.apache.flink.table.data.StringData @@ -44,9 +46,10 @@ class ResultSetSuite extends KyuubiFunSuite { .data(rowsNew) .build - assert(RowSet.toRowBaseSet(rowsNew, resultSetNew) - === RowSet.toRowBaseSet(rowsOld, resultSetOld)) - assert(RowSet.toColumnBasedSet(rowsNew, resultSetNew) - === RowSet.toColumnBasedSet(rowsOld, resultSetOld)) + val timeZone = ZoneId.of("America/Los_Angeles") + assert(RowSet.toRowBaseSet(rowsNew, resultSetNew, timeZone) + === RowSet.toRowBaseSet(rowsOld, resultSetOld, timeZone)) + assert(RowSet.toColumnBasedSet(rowsNew, resultSetNew, timeZone) + === RowSet.toColumnBasedSet(rowsOld, resultSetOld, timeZone)) } } diff --git a/externals/kyuubi-hive-sql-engine/pom.xml b/externals/kyuubi-hive-sql-engine/pom.xml index 0319d3dd2f3..caed7e27c37 100644 --- a/externals/kyuubi-hive-sql-engine/pom.xml +++ b/externals/kyuubi-hive-sql-engine/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../pom.xml - kyuubi-hive-sql-engine_2.12 + kyuubi-hive-sql-engine_${scala.binary.version} jar Kyuubi Project Engine Hive SQL https://kyuubi.apache.org/ @@ -163,6 +163,12 @@ HikariCP test + + + com.vladsch.flexmark + flexmark-all + test + @@ -179,12 +185,7 @@ com.fasterxml.jackson.core:jackson-core com.fasterxml.jackson.core:jackson-databind com.fasterxml.jackson.module:jackson-module-scala_${scala.binary.version} - org.apache.kyuubi:kyuubi-common_${scala.binary.version} - org.apache.kyuubi:kyuubi-events_${scala.binary.version} - org.apache.kyuubi:kyuubi-ha_${scala.binary.version} - org.apache.curator:curator-client - org.apache.curator:curator-framework - org.apache.curator:curator-recipes + org.apache.kyuubi:* @@ -205,15 +206,6 @@ - - - org.apache.curator - ${kyuubi.shade.packageName}.org.apache.curator - - org.apache.curator.** - - - diff --git a/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/HiveSQLEngine.scala b/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/HiveSQLEngine.scala index 839da710e3e..3cc426c435a 100644 --- a/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/HiveSQLEngine.scala +++ b/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/HiveSQLEngine.scala @@ -18,6 +18,7 @@ package org.apache.kyuubi.engine.hive import java.security.PrivilegedExceptionAction +import java.time.Instant import scala.util.control.NonFatal @@ -65,6 +66,7 @@ object HiveSQLEngine extends Logging { var currentEngine: Option[HiveSQLEngine] = None val hiveConf = new HiveConf() val kyuubiConf = new KyuubiConf() + val user = UserGroupInformation.getCurrentUser.getShortUserName def startEngine(): Unit = { try { @@ -97,6 +99,8 @@ object HiveSQLEngine extends Logging { } val engine = new HiveSQLEngine() + val appName = s"kyuubi_${user}_hive_${Instant.now}" + hiveConf.setIfUnset("hive.engine.name", appName) info(s"Starting ${engine.getName}") engine.initialize(kyuubiConf) EventBus.post(HiveEngineEvent(engine)) diff --git a/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/operation/HiveOperation.scala b/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/operation/HiveOperation.scala index 81affdff3a3..9759fa00be4 100644 --- a/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/operation/HiveOperation.scala +++ b/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/operation/HiveOperation.scala @@ -21,9 +21,10 @@ import java.util.concurrent.Future import org.apache.hive.service.cli.operation.{Operation, OperationManager} import org.apache.hive.service.cli.session.{HiveSession, SessionManager => HiveSessionManager} -import org.apache.hive.service.rpc.thrift.{TGetResultSetMetadataResp, TRowSet} +import org.apache.hive.service.rpc.thrift.{TFetchResultsResp, TGetResultSetMetadataResp} import org.apache.kyuubi.KyuubiSQLException +import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_USER_KEY import org.apache.kyuubi.engine.hive.session.HiveSessionImpl import org.apache.kyuubi.operation.{AbstractOperation, FetchOrientation, OperationState, OperationStatus} import org.apache.kyuubi.operation.FetchOrientation.FetchOrientation @@ -43,12 +44,14 @@ abstract class HiveOperation(session: Session) extends AbstractOperation(session override def beforeRun(): Unit = { setState(OperationState.RUNNING) + hive.getHiveConf.set(KYUUBI_SESSION_USER_KEY, session.user) } override def afterRun(): Unit = { - state.synchronized { + withLockRequired { if (!isTerminalState(state)) { setState(OperationState.FINISHED) + hive.getHiveConf.unset(KYUUBI_SESSION_USER_KEY) } } } @@ -92,22 +95,31 @@ abstract class HiveOperation(session: Session) extends AbstractOperation(session resp } - override def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet = { + override def getNextRowSetInternal( + order: FetchOrientation, + rowSetSize: Int): TFetchResultsResp = { val tOrder = FetchOrientation.toTFetchOrientation(order) val hiveOrder = org.apache.hive.service.cli.FetchOrientation.getFetchOrientation(tOrder) val rowSet = internalHiveOperation.getNextRowSet(hiveOrder, rowSetSize) - rowSet.toTRowSet + val resp = new TFetchResultsResp(OK_STATUS) + resp.setResults(rowSet.toTRowSet) + resp.setHasMoreRows(false) + resp } - def getOperationLogRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet = { + def getOperationLogRowSet(order: FetchOrientation, rowSetSize: Int): TFetchResultsResp = { val tOrder = FetchOrientation.toTFetchOrientation(order) val hiveOrder = org.apache.hive.service.cli.FetchOrientation.getFetchOrientation(tOrder) val handle = internalHiveOperation.getHandle - delegatedOperationManager.getOperationLogRowSet( + val rowSet = delegatedOperationManager.getOperationLogRowSet( handle, hiveOrder, rowSetSize, hive.getHiveConf).toTRowSet + val resp = new TFetchResultsResp(OK_STATUS) + resp.setResults(rowSet) + resp.setHasMoreRows(false) + resp } override def isTimedOut: Boolean = internalHiveOperation.isTimedOut(System.currentTimeMillis) diff --git a/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/operation/HiveOperationManager.scala b/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/operation/HiveOperationManager.scala index 0762a2938e0..4e41e742e0b 100644 --- a/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/operation/HiveOperationManager.scala +++ b/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/operation/HiveOperationManager.scala @@ -20,7 +20,7 @@ package org.apache.kyuubi.engine.hive.operation import java.util.List import org.apache.hadoop.hive.conf.HiveConf.ConfVars -import org.apache.hive.service.rpc.thrift.TRowSet +import org.apache.hive.service.rpc.thrift.TFetchResultsResp import org.apache.kyuubi.config.KyuubiConf._ import org.apache.kyuubi.engine.hive.session.HiveSessionImpl @@ -154,7 +154,7 @@ class HiveOperationManager() extends OperationManager("HiveOperationManager") { override def getOperationLogRowSet( opHandle: OperationHandle, order: FetchOrientation, - maxRows: Int): TRowSet = { + maxRows: Int): TFetchResultsResp = { val operation = getOperation(opHandle).asInstanceOf[HiveOperation] operation.getOperationLogRowSet(order, maxRows) } diff --git a/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/session/HiveSessionImpl.scala b/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/session/HiveSessionImpl.scala index 3b85f94dfb9..5069b13798c 100644 --- a/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/session/HiveSessionImpl.scala +++ b/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/session/HiveSessionImpl.scala @@ -27,6 +27,7 @@ import org.apache.hive.service.rpc.thrift.{TGetInfoType, TGetInfoValue, TProtoco import org.apache.kyuubi.KyuubiSQLException import org.apache.kyuubi.engine.hive.events.HiveSessionEvent +import org.apache.kyuubi.engine.hive.udf.KDFRegistry import org.apache.kyuubi.events.EventBus import org.apache.kyuubi.operation.{Operation, OperationHandle} import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager} @@ -48,6 +49,7 @@ class HiveSessionImpl( val confClone = new HashMap[String, String]() confClone.putAll(conf.asJava) // pass conf.asScala not support `put` method hive.open(confClone) + KDFRegistry.registerAll() EventBus.post(sessionEvent) } diff --git a/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/udf/KDFRegistry.scala b/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/udf/KDFRegistry.scala new file mode 100644 index 00000000000..5ff468b7782 --- /dev/null +++ b/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/udf/KDFRegistry.scala @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.hive.udf + +import scala.collection.mutable.ArrayBuffer + +import org.apache.hadoop.hive.ql.exec.{FunctionRegistry, UDFArgumentLengthException} +import org.apache.hadoop.hive.ql.session.SessionState +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector +import org.apache.hadoop.hive.serde2.objectinspector.primitive.{PrimitiveObjectInspectorFactory, StringObjectInspector} + +import org.apache.kyuubi.{KYUUBI_VERSION, Utils} +import org.apache.kyuubi.config.KyuubiReservedKeys.{KYUUBI_ENGINE_ID, KYUUBI_SESSION_USER_KEY} + +object KDFRegistry { + + @transient + val registeredFunctions = new ArrayBuffer[KyuubiDefinedFunction]() + + val kyuubi_version: KyuubiDefinedFunction = create( + "kyuubi_version", + new KyuubiVersionFunction, + "Return the version of Kyuubi Server", + "string", + "1.8.0") + + val engine_name: KyuubiDefinedFunction = create( + "engine_name", + new EngineNameFunction, + "Return the name of engine", + "string", + "1.8.0") + + val engine_id: KyuubiDefinedFunction = create( + "engine_id", + new EngineIdFunction, + "Return the id of engine", + "string", + "1.8.0") + + val system_user: KyuubiDefinedFunction = create( + "system_user", + new SystemUserFunction, + "Return the system user", + "string", + "1.8.0") + + val session_user: KyuubiDefinedFunction = create( + "session_user", + new SessionUserFunction, + "Return the session user", + "string", + "1.8.0") + + def create( + name: String, + udf: GenericUDF, + description: String, + returnType: String, + since: String): KyuubiDefinedFunction = { + val kdf = KyuubiDefinedFunction(name, udf, description, returnType, since) + registeredFunctions += kdf + kdf + } + + def registerAll(): Unit = { + for (func <- registeredFunctions) { + FunctionRegistry.registerTemporaryUDF(func.name, func.udf.getClass) + } + } +} + +class KyuubiVersionFunction() extends GenericUDF { + private val returnOI: StringObjectInspector = + PrimitiveObjectInspectorFactory.javaStringObjectInspector + override def initialize(arguments: Array[ObjectInspector]): ObjectInspector = { + if (arguments.length != 0) { + throw new UDFArgumentLengthException("The function kyuubi_version() takes no arguments, got " + + arguments.length) + } + returnOI + } + + override def evaluate(arguments: Array[GenericUDF.DeferredObject]): AnyRef = KYUUBI_VERSION + + override def getDisplayString(children: Array[String]): String = "kyuubi_version()" +} + +class EngineNameFunction() extends GenericUDF { + private val returnOI: StringObjectInspector = + PrimitiveObjectInspectorFactory.javaStringObjectInspector + override def initialize(arguments: Array[ObjectInspector]): ObjectInspector = { + if (arguments.length != 0) { + throw new UDFArgumentLengthException("The function engine_name() takes no arguments, got " + + arguments.length) + } + returnOI + } + override def evaluate(arguments: Array[GenericUDF.DeferredObject]): AnyRef = + SessionState.get.getConf.get("hive.engine.name", "") + override def getDisplayString(children: Array[String]): String = "engine_name()" +} + +class EngineIdFunction() extends GenericUDF { + private val returnOI: StringObjectInspector = + PrimitiveObjectInspectorFactory.javaStringObjectInspector + override def initialize(arguments: Array[ObjectInspector]): ObjectInspector = { + if (arguments.length != 0) { + throw new UDFArgumentLengthException("The function engine_id() takes no arguments, got " + + arguments.length) + } + returnOI + } + + override def evaluate(arguments: Array[GenericUDF.DeferredObject]): AnyRef = + SessionState.get.getConf.get(KYUUBI_ENGINE_ID, "") + + override def getDisplayString(children: Array[String]): String = "engine_id()" +} + +class SystemUserFunction() extends GenericUDF { + private val returnOI: StringObjectInspector = + PrimitiveObjectInspectorFactory.javaStringObjectInspector + override def initialize(arguments: Array[ObjectInspector]): ObjectInspector = { + if (arguments.length != 0) { + throw new UDFArgumentLengthException("The function system_user() takes no arguments, got " + + arguments.length) + } + returnOI + } + + override def evaluate(arguments: Array[GenericUDF.DeferredObject]): AnyRef = Utils.currentUser + + override def getDisplayString(children: Array[String]): String = "system_user()" +} + +class SessionUserFunction() extends GenericUDF { + private val returnOI: StringObjectInspector = + PrimitiveObjectInspectorFactory.javaStringObjectInspector + override def initialize(arguments: Array[ObjectInspector]): ObjectInspector = { + if (arguments.length != 0) { + throw new UDFArgumentLengthException("The function session_user() takes no arguments, got " + + arguments.length) + } + returnOI + } + + override def evaluate(arguments: Array[GenericUDF.DeferredObject]): AnyRef = { + SessionState.get.getConf.get(KYUUBI_SESSION_USER_KEY, "") + } + + override def getDisplayString(children: Array[String]): String = "session_user()" +} diff --git a/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/udf/KyuubiDefinedFunction.scala b/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/udf/KyuubiDefinedFunction.scala new file mode 100644 index 00000000000..ee91a804e1f --- /dev/null +++ b/externals/kyuubi-hive-sql-engine/src/main/scala/org/apache/kyuubi/engine/hive/udf/KyuubiDefinedFunction.scala @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.hive.udf + +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF + +/** + * A wrapper for Hive's [[UserDefinedFunction]] + * + * @param name function name + * @param udf user-defined function + * @param description function description + */ +case class KyuubiDefinedFunction( + name: String, + udf: GenericUDF, + description: String, + returnType: String, + since: String) diff --git a/externals/kyuubi-hive-sql-engine/src/test/scala/org/apache/kyuubi/engine/hive/operation/HiveOperationSuite.scala b/externals/kyuubi-hive-sql-engine/src/test/scala/org/apache/kyuubi/engine/hive/operation/HiveOperationSuite.scala index f949ec37ab7..eb10e0b4144 100644 --- a/externals/kyuubi-hive-sql-engine/src/test/scala/org/apache/kyuubi/engine/hive/operation/HiveOperationSuite.scala +++ b/externals/kyuubi-hive-sql-engine/src/test/scala/org/apache/kyuubi/engine/hive/operation/HiveOperationSuite.scala @@ -19,7 +19,7 @@ package org.apache.kyuubi.engine.hive.operation import org.apache.commons.lang3.{JavaVersion, SystemUtils} -import org.apache.kyuubi.{HiveEngineTests, Utils} +import org.apache.kyuubi.{HiveEngineTests, KYUUBI_VERSION, Utils} import org.apache.kyuubi.engine.hive.HiveSQLEngine import org.apache.kyuubi.jdbc.hive.KyuubiStatement @@ -49,4 +49,20 @@ class HiveOperationSuite extends HiveEngineTests { assert(kyuubiStatement.getQueryId != null) } } + + test("kyuubi defined function - kyuubi_version") { + withJdbcStatement("hive_engine_test") { statement => + val rs = statement.executeQuery("SELECT kyuubi_version()") + assert(rs.next()) + assert(rs.getString(1) == KYUUBI_VERSION) + } + } + + test("kyuubi defined function - engine_name") { + withJdbcStatement("hive_engine_test") { statement => + val rs = statement.executeQuery("SELECT engine_name()") + assert(rs.next()) + assert(rs.getString(1).nonEmpty) + } + } } diff --git a/externals/kyuubi-hive-sql-engine/src/test/scala/org/apache/kyuubi/engine/hive/udf/KyuubiDefinedFunctionSuite.scala b/externals/kyuubi-hive-sql-engine/src/test/scala/org/apache/kyuubi/engine/hive/udf/KyuubiDefinedFunctionSuite.scala new file mode 100644 index 00000000000..08cb143e04a --- /dev/null +++ b/externals/kyuubi-hive-sql-engine/src/test/scala/org/apache/kyuubi/engine/hive/udf/KyuubiDefinedFunctionSuite.scala @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.hive.udf + +import java.nio.file.Paths + +import org.apache.kyuubi.{KyuubiFunSuite, MarkdownBuilder, Utils} +import org.apache.kyuubi.util.GoldenFileUtils._ + +/** + * End-to-end test cases for configuration doc file + * The golden result file is "docs/extensions/engines/hive/functions.md". + * + * To run the entire test suite: + * {{{ + * KYUUBI_UPDATE=0 dev/gen/gen_hive_kdf_docs.sh + * }}} + * + * To re-generate golden files for entire suite, run: + * {{{ + * dev/gen/gen_hive_kdf_docs.sh + * }}} + */ +class KyuubiDefinedFunctionSuite extends KyuubiFunSuite { + + private val kyuubiHome: String = Utils.getCodeSourceLocation(getClass) + .split("kyuubi-hive-sql-engine")(0) + private val markdown = + Paths.get(kyuubiHome, "..", "docs", "extensions", "engines", "hive", "functions.md") + .toAbsolutePath + + test("verify or update kyuubi hive sql functions") { + val builder = MarkdownBuilder(licenced = true, getClass.getName) + + builder += "# Auxiliary SQL Functions" += + """Kyuubi provides several auxiliary SQL functions as supplement to Hive's + | [Built-in Functions](https://cwiki.apache.org/confluence/display/hive/languagemanual+udf# + |LanguageManualUDF-Built-inFunctions)""" ++= + """ + | Name | Description | Return Type | Since + | --- | --- | --- | --- + |""" + KDFRegistry.registeredFunctions.foreach { func => + builder += s"${func.name} | ${func.description} | ${func.returnType} | ${func.since}" + } + + verifyOrRegenerateGoldenFile(markdown, builder.toMarkdown, "dev/gen/gen_hive_kdf_docs.sh") + } +} diff --git a/externals/kyuubi-jdbc-engine/pom.xml b/externals/kyuubi-jdbc-engine/pom.xml index 4bcc4fb601f..3c21fed570f 100644 --- a/externals/kyuubi-jdbc-engine/pom.xml +++ b/externals/kyuubi-jdbc-engine/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../pom.xml - kyuubi-jdbc-engine_2.12 + kyuubi-jdbc-engine_${scala.binary.version} jar Kyuubi Project Engine JDBC https://kyuubi.apache.org/ diff --git a/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/JdbcSQLEngine.scala b/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/JdbcSQLEngine.scala index 618098f31b9..6e0647f6c7a 100644 --- a/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/JdbcSQLEngine.scala +++ b/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/JdbcSQLEngine.scala @@ -19,7 +19,9 @@ package org.apache.kyuubi.engine.jdbc import org.apache.kyuubi.{Logging, Utils} import org.apache.kyuubi.Utils.{addShutdownHook, JDBC_ENGINE_SHUTDOWN_PRIORITY} import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.KyuubiConf.ENGINE_JDBC_INITIALIZE_SQL import org.apache.kyuubi.engine.jdbc.JdbcSQLEngine.currentEngine +import org.apache.kyuubi.engine.jdbc.util.KyuubiJdbcUtils import org.apache.kyuubi.ha.HighAvailabilityConf.HA_ZK_CONN_RETRY_POLICY import org.apache.kyuubi.ha.client.RetryPolicies import org.apache.kyuubi.service.Serverable @@ -71,6 +73,8 @@ object JdbcSQLEngine extends Logging { kyuubiConf.setIfMissing(HA_ZK_CONN_RETRY_POLICY, RetryPolicies.N_TIME.toString) startEngine() + + KyuubiJdbcUtils.initializeJdbcSession(kyuubiConf, kyuubiConf.get(ENGINE_JDBC_INITIALIZE_SQL)) } catch { case t: Throwable if currentEngine.isDefined => currentEngine.foreach { engine => diff --git a/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/connection/ConnectionProvider.scala b/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/connection/ConnectionProvider.scala index 798c92fbe41..cb6e4b6c551 100644 --- a/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/connection/ConnectionProvider.scala +++ b/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/connection/ConnectionProvider.scala @@ -16,26 +16,25 @@ */ package org.apache.kyuubi.engine.jdbc.connection -import java.sql.{Connection, DriverManager} -import java.util.ServiceLoader - -import scala.collection.mutable.ArrayBuffer +import java.sql.{Connection, Driver, DriverManager} import org.apache.kyuubi.Logging import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf.{ENGINE_JDBC_CONNECTION_PROVIDER, ENGINE_JDBC_CONNECTION_URL, ENGINE_JDBC_DRIVER_CLASS} +import org.apache.kyuubi.util.reflect.DynClasses +import org.apache.kyuubi.util.reflect.ReflectUtils._ abstract class AbstractConnectionProvider extends Logging { protected val providers = loadProviders() def getProviderClass(kyuubiConf: KyuubiConf): String = { - val specifiedDriverClass = kyuubiConf.get(ENGINE_JDBC_DRIVER_CLASS) - specifiedDriverClass.foreach(Class.forName) - - specifiedDriverClass.getOrElse { + val driverClass: Class[_ <: Driver] = Option( + DynClasses.builder().impl(kyuubiConf.get(ENGINE_JDBC_DRIVER_CLASS).get) + .orNull().build[Driver]()).getOrElse { val url = kyuubiConf.get(ENGINE_JDBC_CONNECTION_URL).get - DriverManager.getDriver(url).getClass.getCanonicalName + DriverManager.getDriver(url).getClass } + driverClass.getCanonicalName } def create(kyuubiConf: KyuubiConf): Connection = { @@ -69,27 +68,12 @@ abstract class AbstractConnectionProvider extends Logging { selectedProvider.getConnection(kyuubiConf) } - def loadProviders(): Seq[JdbcConnectionProvider] = { - val loader = ServiceLoader.load( - classOf[JdbcConnectionProvider], - Thread.currentThread().getContextClassLoader) - val providers = ArrayBuffer[JdbcConnectionProvider]() - - val iterator = loader.iterator() - while (iterator.hasNext) { - try { - val provider = iterator.next() + def loadProviders(): Seq[JdbcConnectionProvider] = + loadFromServiceLoader[JdbcConnectionProvider]() + .map { provider => info(s"Loaded provider: $provider") - providers += provider - } catch { - case t: Throwable => - warn(s"Loaded of the provider failed with the exception", t) - } - } - - // TODO support disable provider - providers - } + provider + }.toSeq } object ConnectionProvider extends AbstractConnectionProvider diff --git a/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/dialect/JdbcDialect.scala b/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/dialect/JdbcDialect.scala index b7ac7f43b0f..e08b2275875 100644 --- a/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/dialect/JdbcDialect.scala +++ b/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/dialect/JdbcDialect.scala @@ -18,9 +18,6 @@ package org.apache.kyuubi.engine.jdbc.dialect import java.sql.{Connection, Statement} import java.util -import java.util.ServiceLoader - -import scala.collection.JavaConverters._ import org.apache.kyuubi.{KyuubiException, Logging} import org.apache.kyuubi.config.KyuubiConf @@ -29,6 +26,7 @@ import org.apache.kyuubi.engine.jdbc.schema.{RowSetHelper, SchemaHelper} import org.apache.kyuubi.engine.jdbc.util.SupportServiceLoader import org.apache.kyuubi.operation.Operation import org.apache.kyuubi.session.Session +import org.apache.kyuubi.util.reflect.ReflectUtils._ abstract class JdbcDialect extends SupportServiceLoader with Logging { @@ -75,9 +73,8 @@ object JdbcDialects extends Logging { assert(url.length > 5 && url.substring(5).contains(":")) url.substring(5, url.indexOf(":", 5)) } - val serviceLoader = - ServiceLoader.load(classOf[JdbcDialect], Thread.currentThread().getContextClassLoader) - serviceLoader.asScala.filter(_.name().equalsIgnoreCase(shortName)).toList match { + loadFromServiceLoader[JdbcDialect]() + .filter(_.name().equalsIgnoreCase(shortName)).toList match { case Nil => throw new KyuubiException(s"Don't find jdbc dialect implement for jdbc engine: $shortName.") case head :: Nil => diff --git a/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/operation/JdbcOperation.scala b/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/operation/JdbcOperation.scala index 6cac42f49ef..2ca17375717 100644 --- a/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/operation/JdbcOperation.scala +++ b/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/operation/JdbcOperation.scala @@ -16,7 +16,7 @@ */ package org.apache.kyuubi.engine.jdbc.operation -import org.apache.hive.service.rpc.thrift.{TGetResultSetMetadataResp, TRowSet} +import org.apache.hive.service.rpc.thrift.{TFetchResultsResp, TGetResultSetMetadataResp, TRowSet} import org.apache.kyuubi.{KyuubiSQLException, Utils} import org.apache.kyuubi.config.KyuubiConf @@ -36,7 +36,9 @@ abstract class JdbcOperation(session: Session) extends AbstractOperation(session protected lazy val dialect: JdbcDialect = JdbcDialects.get(conf) - override def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet = { + override def getNextRowSetInternal( + order: FetchOrientation, + rowSetSize: Int): TFetchResultsResp = { validateDefaultFetchOrientation(order) assertState(OperationState.FINISHED) setHasResultSet(true) @@ -51,7 +53,10 @@ abstract class JdbcOperation(session: Session) extends AbstractOperation(session val taken = iter.take(rowSetSize) val resultRowSet = toTRowSet(taken) resultRowSet.setStartRowOffset(iter.getPosition) - resultRowSet + val resp = new TFetchResultsResp(OK_STATUS) + resp.setResults(resultRowSet) + resp.setHasMoreRows(false) + resp } override def cancel(): Unit = { @@ -66,7 +71,7 @@ abstract class JdbcOperation(session: Session) extends AbstractOperation(session // We should use Throwable instead of Exception since `java.lang.NoClassDefFoundError` // could be thrown. case e: Throwable => - state.synchronized { + withLockRequired { val errMsg = Utils.stringifyException(e) if (state == OperationState.TIMEOUT) { val ke = KyuubiSQLException(s"Timeout operating $opType: $errMsg") diff --git a/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/session/JdbcSessionImpl.scala b/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/session/JdbcSessionImpl.scala index f8cd40412f0..8b36e5a56df 100644 --- a/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/session/JdbcSessionImpl.scala +++ b/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/session/JdbcSessionImpl.scala @@ -23,8 +23,11 @@ import scala.util.{Failure, Success, Try} import org.apache.hive.service.rpc.thrift.{TGetInfoType, TGetInfoValue, TProtocolVersion} import org.apache.kyuubi.KyuubiSQLException +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.KyuubiConf._ import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_HANDLE_KEY import org.apache.kyuubi.engine.jdbc.connection.ConnectionProvider +import org.apache.kyuubi.engine.jdbc.util.KyuubiJdbcUtils import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager} class JdbcSessionImpl( @@ -43,7 +46,16 @@ class JdbcSessionImpl( private var databaseMetaData: DatabaseMetaData = _ - private val kyuubiConf = sessionManager.getConf + private val kyuubiConf: KyuubiConf = normalizeConf + + private def normalizeConf: KyuubiConf = { + val kyuubiConf = sessionManager.getConf.clone + if (kyuubiConf.get(ENGINE_JDBC_CONNECTION_PROPAGATECREDENTIAL)) { + kyuubiConf.set(ENGINE_JDBC_CONNECTION_USER, user) + kyuubiConf.set(ENGINE_JDBC_CONNECTION_PASSWORD, password) + } + kyuubiConf + } override def open(): Unit = { info(s"Starting to open jdbc session.") @@ -51,6 +63,10 @@ class JdbcSessionImpl( sessionConnection = ConnectionProvider.create(kyuubiConf) databaseMetaData = sessionConnection.getMetaData } + KyuubiJdbcUtils.initializeJdbcSession( + kyuubiConf, + sessionConnection, + kyuubiConf.get(ENGINE_JDBC_SESSION_INITIALIZE_SQL)) super.open() info(s"The jdbc session is started.") } diff --git a/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/util/KyuubiJdbcUtils.scala b/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/util/KyuubiJdbcUtils.scala new file mode 100644 index 00000000000..7107045ff14 --- /dev/null +++ b/externals/kyuubi-jdbc-engine/src/main/scala/org/apache/kyuubi/engine/jdbc/util/KyuubiJdbcUtils.scala @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.jdbc.util + +import java.sql.Connection + +import org.apache.kyuubi.{KyuubiSQLException, Logging} +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.engine.jdbc.connection.ConnectionProvider +import org.apache.kyuubi.engine.jdbc.dialect.{JdbcDialect, JdbcDialects} +import org.apache.kyuubi.util.JdbcUtils + +object KyuubiJdbcUtils extends Logging { + + def initializeJdbcSession(kyuubiConf: KyuubiConf, initializationSQLs: Seq[String]): Unit = { + JdbcUtils.withCloseable(ConnectionProvider.create(kyuubiConf)) { connection => + initializeJdbcSession(kyuubiConf, connection, initializationSQLs) + } + } + + def initializeJdbcSession( + kyuubiConf: KyuubiConf, + connection: Connection, + initializationSQLs: Seq[String]): Unit = { + if (initializationSQLs == null || initializationSQLs.isEmpty) { + return + } + try { + val dialect: JdbcDialect = JdbcDialects.get(kyuubiConf) + JdbcUtils.withCloseable(dialect.createStatement(connection)) { statement => + initializationSQLs.foreach { sql => + debug(s"Execute initialization sql: $sql") + statement.execute(sql) + } + } + } catch { + case e: Exception => + error("Failed to execute initialization sql.", e) + throw KyuubiSQLException(e) + } + } +} diff --git a/externals/kyuubi-spark-sql-engine/pom.xml b/externals/kyuubi-spark-sql-engine/pom.xml index 5b227cb5e29..c453bd28382 100644 --- a/externals/kyuubi-spark-sql-engine/pom.xml +++ b/externals/kyuubi-spark-sql-engine/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../pom.xml - kyuubi-spark-sql-engine_2.12 + kyuubi-spark-sql-engine_${scala.binary.version} jar Kyuubi Project Engine Spark SQL https://kyuubi.apache.org/ @@ -65,6 +65,13 @@ provided + + org.apache.spark + spark-sql_${scala.binary.version} + test-jar + test + + org.apache.spark spark-repl_${scala.binary.version} @@ -140,69 +147,77 @@ - org.apache.parquet - parquet-avro - test - - - - org.apache.spark - spark-avro_${scala.binary.version} - test - - - - org.apache.hudi - hudi-common + io.delta + delta-core_${scala.binary.version} test - org.apache.hudi - hudi-spark-common_${scala.binary.version} + org.apache.kyuubi + kyuubi-zookeeper_${scala.binary.version} + ${project.version} test - org.apache.hudi - hudi-spark_${scala.binary.version} + com.dimafeng + testcontainers-scala-scalatest_${scala.binary.version} test - org.apache.hudi - hudi-spark3.1.x_${scala.binary.version} + io.etcd + jetcd-launcher test - io.delta - delta-core_${scala.binary.version} + com.vladsch.flexmark + flexmark-all test org.apache.kyuubi - kyuubi-zookeeper_${scala.binary.version} + kyuubi-spark-lineage_${scala.binary.version} ${project.version} test - - - io.etcd - jetcd-launcher - test - - - - com.vladsch.flexmark - flexmark-all - test - + + + org.codehaus.mojo + build-helper-maven-plugin + + + add-scala-sources + + add-source + + generate-sources + + + src/main/scala-${scala.binary.version} + + + + + add-scala-test-sources + + add-test-source + + generate-test-sources + + + src/test/scala-${scala.binary.version} + + + + + org.apache.maven.plugins maven-shade-plugin @@ -223,15 +238,9 @@ io.perfmark:perfmark-api io.vertx:* net.jodah:failsafe - org.apache.curator:curator-client - org.apache.curator:curator-framework - org.apache.curator:curator-recipes org.apache.hive:hive-service-rpc - org.apache.kyuubi:kyuubi-common_${scala.binary.version} - org.apache.kyuubi:kyuubi-events_${scala.binary.version} - org.apache.kyuubi:kyuubi-ha_${scala.binary.version} + org.apache.kyuubi:* org.apache.thrift:* - org.apache.zookeeper:zookeeper org.checkerframework:checker-qual org.codehaus.mojo:animal-sniffer-annotations @@ -256,27 +265,6 @@ - - org.apache.curator - ${kyuubi.shade.packageName}.org.apache.curator - - org.apache.curator.** - - - - org.apache.zookeeper - ${kyuubi.shade.packageName}.org.apache.zookeeper - - org.apache.zookeeper.** - - - - org.apache.jute - ${kyuubi.shade.packageName}.org.apache.jute - - org.apache.jute.** - - org.apache.hive.service.rpc.thrift ${kyuubi.shade.packageName}.org.apache.hive.service.rpc.thrift diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/repl/KyuubiSparkILoop.scala b/externals/kyuubi-spark-sql-engine/src/main/scala-2.12/org/apache/kyuubi/engine/spark/repl/KyuubiSparkILoop.scala similarity index 90% rename from externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/repl/KyuubiSparkILoop.scala rename to externals/kyuubi-spark-sql-engine/src/main/scala-2.12/org/apache/kyuubi/engine/spark/repl/KyuubiSparkILoop.scala index 27090fae4af..fbbda89edbd 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/repl/KyuubiSparkILoop.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala-2.12/org/apache/kyuubi/engine/spark/repl/KyuubiSparkILoop.scala @@ -17,22 +17,23 @@ package org.apache.kyuubi.engine.spark.repl -import java.io.{ByteArrayOutputStream, File} +import java.io.{ByteArrayOutputStream, File, PrintWriter} import java.util.concurrent.locks.ReentrantLock import scala.tools.nsc.Settings -import scala.tools.nsc.interpreter.IR -import scala.tools.nsc.interpreter.JPrintWriter +import scala.tools.nsc.interpreter.Results import org.apache.spark.SparkContext import org.apache.spark.repl.SparkILoop import org.apache.spark.sql.{DataFrame, SparkSession} import org.apache.spark.util.MutableURLClassLoader +import org.apache.kyuubi.Utils + private[spark] case class KyuubiSparkILoop private ( spark: SparkSession, output: ByteArrayOutputStream) - extends SparkILoop(None, new JPrintWriter(output)) { + extends SparkILoop(None, new PrintWriter(output)) { import KyuubiSparkILoop._ val result = new DataFrameHolder(spark) @@ -100,7 +101,7 @@ private[spark] case class KyuubiSparkILoop private ( def clearResult(statementId: String): Unit = result.unset(statementId) - def interpretWithRedirectOutError(statement: String): IR.Result = withLockRequired { + def interpretWithRedirectOutError(statement: String): Results.Result = withLockRequired { Console.withOut(output) { Console.withErr(output) { this.interpret(statement) @@ -124,10 +125,5 @@ private[spark] object KyuubiSparkILoop { } private val lock = new ReentrantLock() - private def withLockRequired[T](block: => T): T = { - try { - lock.lock() - block - } finally lock.unlock() - } + private def withLockRequired[T](block: => T): T = Utils.withLockRequired(lock)(block) } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala-2.13/org/apache/kyuubi/engine/spark/repl/KyuubiSparkILoop.scala b/externals/kyuubi-spark-sql-engine/src/main/scala-2.13/org/apache/kyuubi/engine/spark/repl/KyuubiSparkILoop.scala new file mode 100644 index 00000000000..a63d71a7885 --- /dev/null +++ b/externals/kyuubi-spark-sql-engine/src/main/scala-2.13/org/apache/kyuubi/engine/spark/repl/KyuubiSparkILoop.scala @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.spark.repl + +import java.io.{ByteArrayOutputStream, File, PrintWriter} +import java.util.concurrent.locks.ReentrantLock + +import scala.tools.nsc.Settings +import scala.tools.nsc.interpreter.{IMain, Results} + +import org.apache.spark.SparkContext +import org.apache.spark.repl.SparkILoop +import org.apache.spark.sql.{DataFrame, SparkSession} +import org.apache.spark.util.MutableURLClassLoader + +import org.apache.kyuubi.Utils + +private[spark] case class KyuubiSparkILoop private ( + spark: SparkSession, + output: ByteArrayOutputStream) + extends SparkILoop(null, new PrintWriter(output)) { + import KyuubiSparkILoop._ + + val result = new DataFrameHolder(spark) + + private def initialize(): Unit = withLockRequired { + val settings = new Settings + val interpArguments = List( + "-Yrepl-class-based", + "-Yrepl-outdir", + s"${spark.sparkContext.getConf.get("spark.repl.class.outputDir")}") + settings.processArguments(interpArguments, processAll = true) + settings.usejavacp.value = true + val currentClassLoader = Thread.currentThread().getContextClassLoader + settings.embeddedDefaults(currentClassLoader) + this.createInterpreter(settings) + val iMain = this.intp.asInstanceOf[IMain] + iMain.initializeCompiler() + try { + this.compilerClasspath + iMain.ensureClassLoader() + var classLoader: ClassLoader = Thread.currentThread().getContextClassLoader + while (classLoader != null) { + classLoader match { + case loader: MutableURLClassLoader => + val allJars = loader.getURLs.filter { u => + val file = new File(u.getPath) + u.getProtocol == "file" && file.isFile && + file.getName.contains("scala-lang_scala-reflect") + } + this.addUrlsToClassPath(allJars: _*) + classLoader = null + case _ => + classLoader = classLoader.getParent + } + } + + this.addUrlsToClassPath( + classOf[DataFrameHolder].getProtectionDomain.getCodeSource.getLocation) + } finally { + Thread.currentThread().setContextClassLoader(currentClassLoader) + } + + this.beQuietDuring { + // SparkSession/SparkContext and their implicits + this.bind("spark", classOf[SparkSession].getCanonicalName, spark, List("""@transient""")) + this.bind( + "sc", + classOf[SparkContext].getCanonicalName, + spark.sparkContext, + List("""@transient""")) + + this.interpret("import org.apache.spark.SparkContext._") + this.interpret("import spark.implicits._") + this.interpret("import spark.sql") + this.interpret("import org.apache.spark.sql.functions._") + + // for feeding results to client, e.g. beeline + this.bind( + "result", + classOf[DataFrameHolder].getCanonicalName, + result) + } + } + + def getResult(statementId: String): DataFrame = result.get(statementId) + + def clearResult(statementId: String): Unit = result.unset(statementId) + + def interpretWithRedirectOutError(statement: String): Results.Result = withLockRequired { + Console.withOut(output) { + Console.withErr(output) { + this.interpret(statement) + } + } + } + + def getOutput: String = { + val res = output.toString.trim + output.reset() + res + } +} + +private[spark] object KyuubiSparkILoop { + def apply(spark: SparkSession): KyuubiSparkILoop = { + val os = new ByteArrayOutputStream() + val iLoop = new KyuubiSparkILoop(spark, os) + iLoop.initialize() + iLoop + } + + private val lock = new ReentrantLock() + private def withLockRequired[T](block: => T): T = Utils.withLockRequired(lock)(block) +} diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/KyuubiSparkUtil.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/KyuubiSparkUtil.scala index 2c3e7195c43..b9fb9325999 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/KyuubiSparkUtil.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/KyuubiSparkUtil.scala @@ -21,12 +21,12 @@ import java.time.{Instant, LocalDateTime, ZoneId} import scala.annotation.meta.getter -import org.apache.spark.SparkContext +import org.apache.spark.{SPARK_VERSION, SparkContext} import org.apache.spark.sql.SparkSession import org.apache.spark.util.kvstore.KVIndex import org.apache.kyuubi.Logging -import org.apache.kyuubi.engine.SemanticVersion +import org.apache.kyuubi.util.SemanticVersion object KyuubiSparkUtil extends Logging { @@ -95,9 +95,7 @@ object KyuubiSparkUtil extends Logging { } } - lazy val sparkMajorMinorVersion: (Int, Int) = { - val runtimeSparkVer = org.apache.spark.SPARK_VERSION - val runtimeVersion = SemanticVersion(runtimeSparkVer) - (runtimeVersion.majorVersion, runtimeVersion.minorVersion) - } + // Given that we are on the Spark SQL engine side, the [[org.apache.spark.SPARK_VERSION]] can be + // represented as the runtime version of the Spark SQL engine. + lazy val SPARK_ENGINE_RUNTIME_VERSION: SemanticVersion = SemanticVersion(SPARK_VERSION) } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkSQLEngine.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkSQLEngine.scala index 42e7c44a137..5f91bc73db5 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkSQLEngine.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkSQLEngine.scala @@ -17,7 +17,6 @@ package org.apache.kyuubi.engine.spark -import java.net.InetAddress import java.time.Instant import java.util.{Locale, UUID} import java.util.concurrent.{CountDownLatch, ScheduledExecutorService, ThreadPoolExecutor, TimeUnit} @@ -36,7 +35,8 @@ import org.apache.kyuubi.{KyuubiException, Logging, Utils} import org.apache.kyuubi.Utils._ import org.apache.kyuubi.config.{KyuubiConf, KyuubiReservedKeys} import org.apache.kyuubi.config.KyuubiConf._ -import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_ENGINE_SUBMIT_TIME_KEY +import org.apache.kyuubi.config.KyuubiReservedKeys.{KYUUBI_ENGINE_SUBMIT_TIME_KEY, KYUUBI_ENGINE_URL} +import org.apache.kyuubi.engine.ShareLevel import org.apache.kyuubi.engine.spark.SparkSQLEngine.{countDownLatch, currentEngine} import org.apache.kyuubi.engine.spark.events.{EngineEvent, EngineEventsStore, SparkEventHandlerRegister} import org.apache.kyuubi.engine.spark.session.SparkSessionImpl @@ -80,6 +80,12 @@ case class SparkSQLEngine(spark: SparkSession) extends Serverable("SparkSQLEngin assert(currentEngine.isDefined) currentEngine.get.stop() }) + + val maxInitTimeout = conf.get(ENGINE_SPARK_MAX_INITIAL_WAIT) + if (conf.get(ENGINE_SHARE_LEVEL) == ShareLevel.CONNECTION.toString && + maxInitTimeout > 0) { + startFastFailChecker(maxInitTimeout) + } } override def stop(): Unit = if (shutdown.compareAndSet(false, true)) { @@ -114,6 +120,27 @@ case class SparkSQLEngine(spark: SparkSession) extends Serverable("SparkSQLEngin stopEngineExec.get.execute(stopTask) } + private[kyuubi] def startFastFailChecker(maxTimeout: Long): Unit = { + val startedTime = System.currentTimeMillis() + Utils.tryLogNonFatalError { + ThreadUtils.runInNewThread("spark-engine-failfast-checker") { + if (!shutdown.get) { + while (backendService.sessionManager.getOpenSessionCount <= 0 && + System.currentTimeMillis() - startedTime < maxTimeout) { + info(s"Waiting for the initial connection") + Thread.sleep(Duration(10, TimeUnit.SECONDS).toMillis) + } + if (backendService.sessionManager.getOpenSessionCount <= 0) { + error(s"Spark engine has been terminated because no incoming connection" + + s" for more than $maxTimeout ms, de-registering from engine discovery space.") + assert(currentEngine.isDefined) + currentEngine.get.stop() + } + } + } + } + } + override protected def stopServer(): Unit = { countDownLatch.countDown() } @@ -165,6 +192,10 @@ object SparkSQLEngine extends Logging { private val sparkSessionCreated = new AtomicBoolean(false) + // Kubernetes pod name max length - '-exec-' - Int.MAX_VALUE.length + // 253 - 10 - 6 + val EXECUTOR_POD_NAME_PREFIX_MAX_LENGTH = 237 + SignalRegister.registerLogger(logger) setupConf() @@ -189,7 +220,6 @@ object SparkSQLEngine extends Logging { _kyuubiConf = KyuubiConf() val rootDir = _sparkConf.getOption("spark.repl.classdir").getOrElse(getLocalDir(_sparkConf)) val outputDir = Utils.createTempDir(prefix = "repl", root = rootDir) - _sparkConf.setIfMissing("spark.sql.execution.topKSortFallbackThreshold", "10000") _sparkConf.setIfMissing("spark.sql.legacy.castComplexTypesToString.enabled", "true") _sparkConf.setIfMissing("spark.master", "local") _sparkConf.set( @@ -223,7 +253,7 @@ object SparkSQLEngine extends Logging { if (!isOnK8sClusterMode) { // set driver host to ip instead of kyuubi pod name - _sparkConf.set("spark.driver.host", InetAddress.getLocalHost.getHostAddress) + _sparkConf.setIfMissing("spark.driver.host", Utils.findLocalInetAddress.getHostAddress) } } @@ -259,6 +289,7 @@ object SparkSQLEngine extends Logging { KyuubiSparkUtil.initializeSparkSession( session, kyuubiConf.get(ENGINE_INITIALIZE_SQL) ++ kyuubiConf.get(ENGINE_SESSION_INITIALIZE_SQL)) + session.sparkContext.setLocalProperty(KYUUBI_ENGINE_URL, KyuubiSparkUtil.engineUrl) session } @@ -359,7 +390,7 @@ object SparkSQLEngine extends Logging { private def startInitTimeoutChecker(startTime: Long, timeout: Long): Unit = { val mainThread = Thread.currentThread() - new Thread( + val checker = new Thread( () => { while (System.currentTimeMillis() - startTime < timeout && !sparkSessionCreated.get()) { Thread.sleep(500) @@ -368,7 +399,9 @@ object SparkSQLEngine extends Logging { mainThread.interrupt() } }, - "CreateSparkTimeoutChecker").start() + "CreateSparkTimeoutChecker") + checker.setDaemon(true) + checker.start() } private def isOnK8sClusterMode: Boolean = { @@ -390,8 +423,4 @@ object SparkSQLEngine extends Logging { s"kyuubi-${UUID.randomUUID()}" } } - - // Kubernetes pod name max length - '-exec-' - Int.MAX_VALUE.length - // 253 - 10 - 6 - val EXECUTOR_POD_NAME_PREFIX_MAX_LENGTH = 237 } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkTBinaryFrontendService.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkTBinaryFrontendService.scala index d4eaf3454a4..c2563b32bce 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkTBinaryFrontendService.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkTBinaryFrontendService.scala @@ -19,6 +19,7 @@ package org.apache.kyuubi.engine.spark import scala.collection.JavaConverters._ +import org.apache.hadoop.conf.Configuration import org.apache.hadoop.io.Text import org.apache.hadoop.security.{Credentials, UserGroupInformation} import org.apache.hadoop.security.token.{Token, TokenIdentifier} @@ -27,11 +28,13 @@ import org.apache.spark.SparkContext import org.apache.spark.kyuubi.SparkContextHelper import org.apache.kyuubi.{KyuubiSQLException, Logging} +import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiReservedKeys._ import org.apache.kyuubi.ha.client.{EngineServiceDiscovery, ServiceDiscovery} import org.apache.kyuubi.service.{Serverable, Service, TBinaryFrontendService} import org.apache.kyuubi.service.TFrontendService._ import org.apache.kyuubi.util.KyuubiHadoopUtils +import org.apache.kyuubi.util.reflect.DynConstructors class SparkTBinaryFrontendService( override val serverable: Serverable) @@ -94,13 +97,23 @@ class SparkTBinaryFrontendService( } override def attributes: Map[String, String] = { - Map(KYUUBI_ENGINE_ID -> KyuubiSparkUtil.engineId) + val extraAttributes = conf.get(KyuubiConf.ENGINE_SPARK_REGISTER_ATTRIBUTES).map { attr => + attr -> KyuubiSparkUtil.globalSparkContext.getConf.get(attr, "") + }.toMap + val attributes = extraAttributes ++ Map(KYUUBI_ENGINE_ID -> KyuubiSparkUtil.engineId) + // TODO Support Spark Web UI Enabled SSL + sc.uiWebUrl match { + case Some(url) => attributes ++ Map(KYUUBI_ENGINE_URL -> url.split("//").last) + case None => attributes + } } } object SparkTBinaryFrontendService extends Logging { val HIVE_DELEGATION_TOKEN = new Text("HIVE_DELEGATION_TOKEN") + val HIVE_CONF_CLASSNAME = "org.apache.hadoop.hive.conf.HiveConf" + @volatile private var _hiveConf: Configuration = _ private[spark] def renewDelegationToken(sc: SparkContext, delegationToken: String): Unit = { val newCreds = KyuubiHadoopUtils.decodeCredentials(delegationToken) @@ -124,7 +137,7 @@ object SparkTBinaryFrontendService extends Logging { newTokens: Map[Text, Token[_ <: TokenIdentifier]], oldCreds: Credentials, updateCreds: Credentials): Unit = { - val metastoreUris = sc.hadoopConfiguration.getTrimmed("hive.metastore.uris", "") + val metastoreUris = hiveConf(sc.hadoopConfiguration).getTrimmed("hive.metastore.uris", "") // `HiveMetaStoreClient` selects the first token whose service is "" and kind is // "HIVE_DELEGATION_TOKEN" to authenticate. @@ -195,4 +208,25 @@ object SparkTBinaryFrontendService extends Logging { 1 } } + + private[kyuubi] def hiveConf(hadoopConf: Configuration): Configuration = { + if (_hiveConf == null) { + synchronized { + if (_hiveConf == null) { + _hiveConf = + try { + DynConstructors.builder() + .impl(HIVE_CONF_CLASSNAME, classOf[Configuration], classOf[Class[_]]) + .build[Configuration]() + .newInstance(hadoopConf, Class.forName(HIVE_CONF_CLASSNAME)) + } catch { + case e: Throwable => + warn("Fail to create Hive Configuration", e) + hadoopConf + } + } + } + } + _hiveConf + } } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecutePython.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecutePython.scala index d2627fd99fd..badd835301a 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecutePython.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecutePython.scala @@ -40,7 +40,7 @@ import org.apache.kyuubi.{KyuubiSQLException, Logging, Utils} import org.apache.kyuubi.config.KyuubiConf.{ENGINE_SPARK_PYTHON_ENV_ARCHIVE, ENGINE_SPARK_PYTHON_ENV_ARCHIVE_EXEC_PATH, ENGINE_SPARK_PYTHON_HOME_ARCHIVE} import org.apache.kyuubi.config.KyuubiReservedKeys.{KYUUBI_SESSION_USER_KEY, KYUUBI_STATEMENT_ID_KEY} import org.apache.kyuubi.engine.spark.KyuubiSparkUtil._ -import org.apache.kyuubi.operation.{ArrayFetchIterator, OperationState} +import org.apache.kyuubi.operation.{ArrayFetchIterator, OperationHandle, OperationState} import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.session.Session @@ -49,7 +49,8 @@ class ExecutePython( override val statement: String, override val shouldRunAsync: Boolean, queryTimeout: Long, - worker: SessionPythonWorker) extends SparkOperation(session) { + worker: SessionPythonWorker, + override protected val handle: OperationHandle) extends SparkOperation(session) { private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) override def getOperationLog: Option[OperationLog] = Option(operationLog) @@ -77,30 +78,31 @@ class ExecutePython( OperationLog.removeCurrentOperationLog() } - private def executePython(): Unit = withLocalProperties { + private def executePython(): Unit = try { - setState(OperationState.RUNNING) - info(diagnostics) - addOperationListener() - val response = worker.runCode(statement) - val status = response.map(_.content.status).getOrElse("UNKNOWN_STATUS") - if (PythonResponse.OK_STATUS.equalsIgnoreCase(status)) { - val output = response.map(_.content.getOutput()).getOrElse("") - val ename = response.map(_.content.getEname()).getOrElse("") - val evalue = response.map(_.content.getEvalue()).getOrElse("") - val traceback = response.map(_.content.getTraceback()).getOrElse(Seq.empty) - iter = - new ArrayFetchIterator[Row](Array(Row(output, status, ename, evalue, traceback))) - setState(OperationState.FINISHED) - } else { - throw KyuubiSQLException(s"Interpret error:\n$statement\n $response") + withLocalProperties { + setState(OperationState.RUNNING) + info(diagnostics) + addOperationListener() + val response = worker.runCode(statement) + val status = response.map(_.content.status).getOrElse("UNKNOWN_STATUS") + if (PythonResponse.OK_STATUS.equalsIgnoreCase(status)) { + val output = response.map(_.content.getOutput()).getOrElse("") + val ename = response.map(_.content.getEname()).getOrElse("") + val evalue = response.map(_.content.getEvalue()).getOrElse("") + val traceback = response.map(_.content.getTraceback()).getOrElse(Seq.empty) + iter = + new ArrayFetchIterator[Row](Array(Row(output, status, ename, evalue, traceback))) + setState(OperationState.FINISHED) + } else { + throw KyuubiSQLException(s"Interpret error:\n$statement\n $response") + } } } catch { onError(cancel = true) } finally { shutdownTimeoutMonitor() } - } override protected def runInternal(): Unit = { addTimeoutMonitor(queryTimeout) @@ -180,12 +182,7 @@ case class SessionPythonWorker( new BufferedReader(new InputStreamReader(workerProcess.getInputStream), 1) private val lock = new ReentrantLock() - private def withLockRequired[T](block: => T): T = { - try { - lock.lock() - block - } finally lock.unlock() - } + private def withLockRequired[T](block: => T): T = Utils.withLockRequired(lock)(block) /** * Run the python code and return the response. This method maybe invoked internally, diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteScala.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteScala.scala index ff686cca0d0..691c4fb32d3 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteScala.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteScala.scala @@ -31,7 +31,7 @@ import org.apache.spark.sql.types.StructType import org.apache.kyuubi.KyuubiSQLException import org.apache.kyuubi.engine.spark.KyuubiSparkUtil._ import org.apache.kyuubi.engine.spark.repl.KyuubiSparkILoop -import org.apache.kyuubi.operation.{ArrayFetchIterator, OperationState} +import org.apache.kyuubi.operation.{ArrayFetchIterator, OperationHandle, OperationState} import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.session.Session @@ -51,7 +51,8 @@ class ExecuteScala( repl: KyuubiSparkILoop, override val statement: String, override val shouldRunAsync: Boolean, - queryTimeout: Long) + queryTimeout: Long, + override protected val handle: OperationHandle) extends SparkOperation(session) { private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) @@ -76,59 +77,60 @@ class ExecuteScala( OperationLog.removeCurrentOperationLog() } - private def executeScala(): Unit = withLocalProperties { + private def executeScala(): Unit = try { - setState(OperationState.RUNNING) - info(diagnostics) - Thread.currentThread().setContextClassLoader(spark.sharedState.jarClassLoader) - addOperationListener() - val legacyOutput = repl.getOutput - if (legacyOutput.nonEmpty) { - warn(s"Clearing legacy output from last interpreting:\n $legacyOutput") - } - val replUrls = repl.classLoader.getParent.asInstanceOf[URLClassLoader].getURLs - spark.sharedState.jarClassLoader.getURLs.filterNot(replUrls.contains).foreach { jar => - try { - if ("file".equals(jar.toURI.getScheme)) { - repl.addUrlsToClassPath(jar) - } else { - spark.sparkContext.addFile(jar.toString) - val localJarFile = new File(SparkFiles.get(new Path(jar.toURI.getPath).getName)) - val localJarUrl = localJarFile.toURI.toURL - if (!replUrls.contains(localJarUrl)) { - repl.addUrlsToClassPath(localJarUrl) + withLocalProperties { + setState(OperationState.RUNNING) + info(diagnostics) + Thread.currentThread().setContextClassLoader(spark.sharedState.jarClassLoader) + addOperationListener() + val legacyOutput = repl.getOutput + if (legacyOutput.nonEmpty) { + warn(s"Clearing legacy output from last interpreting:\n $legacyOutput") + } + val replUrls = repl.classLoader.getParent.asInstanceOf[URLClassLoader].getURLs + spark.sharedState.jarClassLoader.getURLs.filterNot(replUrls.contains).foreach { jar => + try { + if ("file".equals(jar.toURI.getScheme)) { + repl.addUrlsToClassPath(jar) + } else { + spark.sparkContext.addFile(jar.toString) + val localJarFile = new File(SparkFiles.get(new Path(jar.toURI.getPath).getName)) + val localJarUrl = localJarFile.toURI.toURL + if (!replUrls.contains(localJarUrl)) { + repl.addUrlsToClassPath(localJarUrl) + } } + } catch { + case e: Throwable => error(s"Error adding $jar to repl class path", e) } - } catch { - case e: Throwable => error(s"Error adding $jar to repl class path", e) } - } - repl.interpretWithRedirectOutError(statement) match { - case Success => - iter = { - result = repl.getResult(statementId) - if (result != null) { - new ArrayFetchIterator[Row](result.collect()) - } else { - val output = repl.getOutput - debug("scala repl output:\n" + output) - new ArrayFetchIterator[Row](Array(Row(output))) + repl.interpretWithRedirectOutError(statement) match { + case Success => + iter = { + result = repl.getResult(statementId) + if (result != null) { + new ArrayFetchIterator[Row](result.collect()) + } else { + val output = repl.getOutput + debug("scala repl output:\n" + output) + new ArrayFetchIterator[Row](Array(Row(output))) + } } - } - case Error => - throw KyuubiSQLException(s"Interpret error:\n$statement\n ${repl.getOutput}") - case Incomplete => - throw KyuubiSQLException(s"Incomplete code:\n$statement") + case Error => + throw KyuubiSQLException(s"Interpret error:\n$statement\n ${repl.getOutput}") + case Incomplete => + throw KyuubiSQLException(s"Incomplete code:\n$statement") + } + setState(OperationState.FINISHED) } - setState(OperationState.FINISHED) } catch { onError(cancel = true) } finally { repl.clearResult(statementId) shutdownTimeoutMonitor() } - } override protected def runInternal(): Unit = { addTimeoutMonitor(queryTimeout) diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteStatement.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteStatement.scala index b29d2ca9a7e..17d8a741269 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteStatement.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteStatement.scala @@ -21,15 +21,14 @@ import java.util.concurrent.RejectedExecutionException import scala.collection.JavaConverters._ -import org.apache.spark.rdd.RDD import org.apache.spark.sql.DataFrame -import org.apache.spark.sql.execution.SQLExecution -import org.apache.spark.sql.kyuubi.SparkDatasetHelper +import org.apache.spark.sql.kyuubi.SparkDatasetHelper._ import org.apache.spark.sql.types._ import org.apache.kyuubi.{KyuubiSQLException, Logging} import org.apache.kyuubi.config.KyuubiConf.OPERATION_RESULT_MAX_ROWS import org.apache.kyuubi.engine.spark.KyuubiSparkUtil._ +import org.apache.kyuubi.engine.spark.session.SparkSessionImpl import org.apache.kyuubi.operation.{ArrayFetchIterator, FetchIterator, IterableFetchIterator, OperationHandle, OperationState} import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.session.Session @@ -77,22 +76,23 @@ class ExecuteStatement( resultDF.take(maxRows) } - protected def executeStatement(): Unit = withLocalProperties { + protected def executeStatement(): Unit = try { - setState(OperationState.RUNNING) - info(diagnostics) - Thread.currentThread().setContextClassLoader(spark.sharedState.jarClassLoader) - addOperationListener() - result = spark.sql(statement) - iter = collectAsIterator(result) - setCompiledStateIfNeeded() - setState(OperationState.FINISHED) + withLocalProperties { + setState(OperationState.RUNNING) + info(diagnostics) + Thread.currentThread().setContextClassLoader(spark.sharedState.jarClassLoader) + addOperationListener() + result = spark.sql(statement) + iter = collectAsIterator(result) + setCompiledStateIfNeeded() + setState(OperationState.FINISHED) + } } catch { onError(cancel = true) } finally { shutdownTimeoutMonitor() } - } override protected def runInternal(): Unit = { addTimeoutMonitor(queryTimeout) @@ -186,35 +186,18 @@ class ArrowBasedExecuteStatement( incrementalCollect, handle) { + checkUseLargeVarType() + override protected def incrementalCollectResult(resultDF: DataFrame): Iterator[Any] = { - collectAsArrow(convertComplexType(resultDF)) { rdd => - rdd.toLocalIterator - } + toArrowBatchLocalIterator(convertComplexType(resultDF)) } override protected def fullCollectResult(resultDF: DataFrame): Array[_] = { - collectAsArrow(convertComplexType(resultDF)) { rdd => - rdd.collect() - } + executeCollect(convertComplexType(resultDF)) } override protected def takeResult(resultDF: DataFrame, maxRows: Int): Array[_] = { - // this will introduce shuffle and hurt performance - val limitedResult = resultDF.limit(maxRows) - collectAsArrow(convertComplexType(limitedResult)) { rdd => - rdd.collect() - } - } - - /** - * refer to org.apache.spark.sql.Dataset#withAction(), assign a new execution id for arrow-based - * operation, so that we can track the arrow-based queries on the UI tab. - */ - private def collectAsArrow[T](df: DataFrame)(action: RDD[Array[Byte]] => T): T = { - SQLExecution.withNewExecutionId(df.queryExecution, Some("collectAsArrow")) { - df.queryExecution.executedPlan.resetMetrics() - action(SparkDatasetHelper.toArrowBatchRdd(df)) - } + executeCollect(convertComplexType(resultDF.limit(maxRows))) } override protected def isArrowBasedOperation: Boolean = true @@ -222,7 +205,19 @@ class ArrowBasedExecuteStatement( override val resultFormat = "arrow" private def convertComplexType(df: DataFrame): DataFrame = { - SparkDatasetHelper.convertTopLevelComplexTypeToHiveString(df, timestampAsString) + convertTopLevelComplexTypeToHiveString(df, timestampAsString) } + def checkUseLargeVarType(): Unit = { + // TODO: largeVarType support, see SPARK-39979. + val useLargeVarType = session.asInstanceOf[SparkSessionImpl].spark + .conf + .get("spark.sql.execution.arrow.useLargeVarType", "false") + .toBoolean + if (useLargeVarType) { + throw new KyuubiSQLException( + "`spark.sql.execution.arrow.useLargeVarType = true` not support now.", + null) + } + } } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCatalogs.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCatalogs.scala index 6d818e53ed7..c8e58730096 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCatalogs.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCatalogs.scala @@ -19,7 +19,7 @@ package org.apache.kyuubi.engine.spark.operation import org.apache.spark.sql.types.StructType -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils import org.apache.kyuubi.operation.IterableFetchIterator import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_CAT import org.apache.kyuubi.session.Session @@ -33,7 +33,7 @@ class GetCatalogs(session: Session) extends SparkOperation(session) { override protected def runInternal(): Unit = { try { - iter = new IterableFetchIterator(SparkCatalogShim().getCatalogs(spark).toList) + iter = new IterableFetchIterator(SparkCatalogUtils.getCatalogs(spark)) } catch onError() } } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetColumns.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetColumns.scala index e785169812f..3a0ab7d5ba6 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetColumns.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetColumns.scala @@ -19,7 +19,7 @@ package org.apache.kyuubi.engine.spark.operation import org.apache.spark.sql.types._ -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils import org.apache.kyuubi.operation.IterableFetchIterator import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._ import org.apache.kyuubi.session.Session @@ -115,8 +115,8 @@ class GetColumns( val schemaPattern = toJavaRegex(schemaName) val tablePattern = toJavaRegex(tableName) val columnPattern = toJavaRegex(columnName) - iter = new IterableFetchIterator(SparkCatalogShim() - .getColumns(spark, catalogName, schemaPattern, tablePattern, columnPattern).toList) + iter = new IterableFetchIterator(SparkCatalogUtils + .getColumns(spark, catalogName, schemaPattern, tablePattern, columnPattern)) } catch { onError() } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentCatalog.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentCatalog.scala index 66d707ec033..1d85d3d5adc 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentCatalog.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentCatalog.scala @@ -17,15 +17,20 @@ package org.apache.kyuubi.engine.spark.operation +import org.apache.spark.sql.Row import org.apache.spark.sql.types.StructType -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim import org.apache.kyuubi.operation.IterableFetchIterator +import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_CAT import org.apache.kyuubi.session.Session class GetCurrentCatalog(session: Session) extends SparkOperation(session) { + private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) + + override def getOperationLog: Option[OperationLog] = Option(operationLog) + override protected def resultSchema: StructType = { new StructType() .add(TABLE_CAT, "string", nullable = true, "Catalog name.") @@ -33,7 +38,8 @@ class GetCurrentCatalog(session: Session) extends SparkOperation(session) { override protected def runInternal(): Unit = { try { - iter = new IterableFetchIterator(Seq(SparkCatalogShim().getCurrentCatalog(spark))) + val currentCatalogName = spark.sessionState.catalogManager.currentCatalog.name() + iter = new IterableFetchIterator(Seq(Row(currentCatalogName))) } catch onError() } } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentDatabase.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentDatabase.scala index bcf3ad2a5f0..2478fb6a49a 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentDatabase.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentDatabase.scala @@ -17,15 +17,21 @@ package org.apache.kyuubi.engine.spark.operation +import org.apache.spark.sql.Row import org.apache.spark.sql.types.StructType -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils.quoteIfNeeded import org.apache.kyuubi.operation.IterableFetchIterator +import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_SCHEM import org.apache.kyuubi.session.Session class GetCurrentDatabase(session: Session) extends SparkOperation(session) { + private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) + + override def getOperationLog: Option[OperationLog] = Option(operationLog) + override protected def resultSchema: StructType = { new StructType() .add(TABLE_SCHEM, "string", nullable = true, "Schema name.") @@ -33,7 +39,9 @@ class GetCurrentDatabase(session: Session) extends SparkOperation(session) { override protected def runInternal(): Unit = { try { - iter = new IterableFetchIterator(Seq(SparkCatalogShim().getCurrentDatabase(spark))) + val currentDatabaseName = + spark.sessionState.catalogManager.currentNamespace.map(quoteIfNeeded).mkString(".") + iter = new IterableFetchIterator(Seq(Row(currentDatabaseName))) } catch onError() } } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetSchemas.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetSchemas.scala index 3937f528d63..46dc7634acf 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetSchemas.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetSchemas.scala @@ -19,7 +19,7 @@ package org.apache.kyuubi.engine.spark.operation import org.apache.spark.sql.types.StructType -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils import org.apache.kyuubi.operation.IterableFetchIterator import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._ import org.apache.kyuubi.session.Session @@ -40,7 +40,7 @@ class GetSchemas(session: Session, catalogName: String, schema: String) override protected def runInternal(): Unit = { try { val schemaPattern = toJavaRegex(schema) - val rows = SparkCatalogShim().getSchemas(spark, catalogName, schemaPattern) + val rows = SparkCatalogUtils.getSchemas(spark, catalogName, schemaPattern) iter = new IterableFetchIterator(rows) } catch onError() } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetTableTypes.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetTableTypes.scala index 1d2cae3815f..1029175b21f 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetTableTypes.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetTableTypes.scala @@ -20,7 +20,7 @@ package org.apache.kyuubi.engine.spark.operation import org.apache.spark.sql.Row import org.apache.spark.sql.types.StructType -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils import org.apache.kyuubi.operation.IterableFetchIterator import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._ import org.apache.kyuubi.session.Session @@ -33,6 +33,6 @@ class GetTableTypes(session: Session) } override protected def runInternal(): Unit = { - iter = new IterableFetchIterator(SparkCatalogShim.sparkTableTypes.map(Row(_)).toList) + iter = new IterableFetchIterator(SparkCatalogUtils.sparkTableTypes.map(Row(_)).toList) } } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetTables.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetTables.scala index 4093c61c100..980e4fdb173 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetTables.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetTables.scala @@ -19,7 +19,8 @@ package org.apache.kyuubi.engine.spark.operation import org.apache.spark.sql.types.StructType -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim +import org.apache.kyuubi.config.KyuubiConf.OPERATION_GET_TABLES_IGNORE_TABLE_PROPERTIES +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils import org.apache.kyuubi.operation.IterableFetchIterator import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._ import org.apache.kyuubi.session.Session @@ -32,6 +33,12 @@ class GetTables( tableTypes: Set[String]) extends SparkOperation(session) { + protected val ignoreTableProperties = + spark.conf.getOption(OPERATION_GET_TABLES_IGNORE_TABLE_PROPERTIES.key) match { + case Some(s) => s.toBoolean + case _ => session.sessionManager.getConf.get(OPERATION_GET_TABLES_IGNORE_TABLE_PROPERTIES) + } + override def statement: String = { super.statement + s" [catalog: $catalog," + @@ -66,14 +73,19 @@ class GetTables( try { val schemaPattern = toJavaRegex(schema) val tablePattern = toJavaRegex(tableName) - val sparkShim = SparkCatalogShim() val catalogTablesAndViews = - sparkShim.getCatalogTablesOrViews(spark, catalog, schemaPattern, tablePattern, tableTypes) + SparkCatalogUtils.getCatalogTablesOrViews( + spark, + catalog, + schemaPattern, + tablePattern, + tableTypes, + ignoreTableProperties) val allTableAndViews = if (tableTypes.exists("VIEW".equalsIgnoreCase)) { catalogTablesAndViews ++ - sparkShim.getTempViews(spark, catalog, schemaPattern, tablePattern) + SparkCatalogUtils.getTempViews(spark, catalog, schemaPattern, tablePattern) } else { catalogTablesAndViews } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/PlanOnlyStatement.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/PlanOnlyStatement.scala index b7e5451ece2..4f88083130a 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/PlanOnlyStatement.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/PlanOnlyStatement.scala @@ -17,14 +17,17 @@ package org.apache.kyuubi.engine.spark.operation -import org.apache.spark.sql.Row +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.module.scala.DefaultScalaModule +import org.apache.spark.kyuubi.SparkUtilsHelper +import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types.StructType import org.apache.kyuubi.KyuubiSQLException -import org.apache.kyuubi.config.KyuubiConf.{OPERATION_PLAN_ONLY_EXCLUDES, OPERATION_PLAN_ONLY_OUT_STYLE} -import org.apache.kyuubi.operation.{AnalyzeMode, ArrayFetchIterator, ExecutionMode, IterableFetchIterator, JsonStyle, OptimizeMode, OptimizeWithStatsMode, ParseMode, PhysicalMode, PlainStyle, PlanOnlyMode, PlanOnlyStyle, UnknownMode, UnknownStyle} +import org.apache.kyuubi.config.KyuubiConf.{LINEAGE_PARSER_PLUGIN_PROVIDER, OPERATION_PLAN_ONLY_EXCLUDES, OPERATION_PLAN_ONLY_OUT_STYLE} +import org.apache.kyuubi.operation.{AnalyzeMode, ArrayFetchIterator, ExecutionMode, IterableFetchIterator, JsonStyle, LineageMode, OperationHandle, OptimizeMode, OptimizeWithStatsMode, ParseMode, PhysicalMode, PlainStyle, PlanOnlyMode, PlanOnlyStyle, UnknownMode, UnknownStyle} import org.apache.kyuubi.operation.PlanOnlyMode.{notSupportedModeError, unknownModeError} import org.apache.kyuubi.operation.PlanOnlyStyle.{notSupportedStyleError, unknownStyleError} import org.apache.kyuubi.operation.log.OperationLog @@ -36,12 +39,13 @@ import org.apache.kyuubi.session.Session class PlanOnlyStatement( session: Session, override val statement: String, - mode: PlanOnlyMode) + mode: PlanOnlyMode, + override protected val handle: OperationHandle) extends SparkOperation(session) { private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) - private val planExcludes: Seq[String] = { - spark.conf.getOption(OPERATION_PLAN_ONLY_EXCLUDES.key).map(_.split(",").map(_.trim).toSeq) + private val planExcludes: Set[String] = { + spark.conf.getOption(OPERATION_PLAN_ONLY_EXCLUDES.key).map(_.split(",").map(_.trim).toSet) .getOrElse(session.sessionManager.getConf.get(OPERATION_PLAN_ONLY_EXCLUDES)) } @@ -65,28 +69,29 @@ class PlanOnlyStatement( super.beforeRun() } - override protected def runInternal(): Unit = withLocalProperties { + override protected def runInternal(): Unit = try { - SQLConf.withExistingConf(spark.sessionState.conf) { - val parsed = spark.sessionState.sqlParser.parsePlan(statement) - - parsed match { - case cmd if planExcludes.contains(cmd.getClass.getSimpleName) => - result = spark.sql(statement) - iter = new ArrayFetchIterator(result.collect()) - - case plan => style match { - case PlainStyle => explainWithPlainStyle(plan) - case JsonStyle => explainWithJsonStyle(plan) - case UnknownStyle => unknownStyleError(style) - case other => throw notSupportedStyleError(other, "Spark SQL") - } + withLocalProperties { + SQLConf.withExistingConf(spark.sessionState.conf) { + val parsed = spark.sessionState.sqlParser.parsePlan(statement) + + parsed match { + case cmd if planExcludes.contains(cmd.getClass.getSimpleName) => + result = spark.sql(statement) + iter = new ArrayFetchIterator(result.collect()) + + case plan => style match { + case PlainStyle => explainWithPlainStyle(plan) + case JsonStyle => explainWithJsonStyle(plan) + case UnknownStyle => unknownStyleError(style) + case other => throw notSupportedStyleError(other, "Spark SQL") + } + } } } } catch { onError() } - } private def explainWithPlainStyle(plan: LogicalPlan): Unit = { mode match { @@ -117,6 +122,9 @@ class PlanOnlyStatement( case ExecutionMode => val executed = spark.sql(statement).queryExecution.executedPlan iter = new IterableFetchIterator(Seq(Row(executed.toString()))) + case LineageMode => + val result = parseLineage(spark, plan) + iter = new IterableFetchIterator(Seq(Row(result))) case UnknownMode => throw unknownModeError(mode) case _ => throw notSupportedModeError(mode, "Spark SQL") } @@ -141,10 +149,39 @@ class PlanOnlyStatement( case ExecutionMode => val executed = spark.sql(statement).queryExecution.executedPlan iter = new IterableFetchIterator(Seq(Row(executed.toJSON))) + case LineageMode => + val result = parseLineage(spark, plan) + iter = new IterableFetchIterator(Seq(Row(result))) case UnknownMode => throw unknownModeError(mode) case _ => throw KyuubiSQLException(s"The operation mode $mode" + " doesn't support in Spark SQL engine.") } } + + private def parseLineage(spark: SparkSession, plan: LogicalPlan): String = { + val analyzed = spark.sessionState.analyzer.execute(plan) + spark.sessionState.analyzer.checkAnalysis(analyzed) + val optimized = spark.sessionState.optimizer.execute(analyzed) + val parserProviderClass = session.sessionManager.getConf.get(LINEAGE_PARSER_PLUGIN_PROVIDER) + + try { + if (!SparkUtilsHelper.classesArePresent( + parserProviderClass)) { + throw new Exception(s"'$parserProviderClass' not found," + + " need to install kyuubi-spark-lineage plugin before using the 'lineage' mode") + } + + val lineage = Class.forName(parserProviderClass) + .getMethod("parse", classOf[SparkSession], classOf[LogicalPlan]) + .invoke(null, spark, optimized) + + val mapper = new ObjectMapper().registerModule(DefaultScalaModule) + mapper.writeValueAsString(lineage) + } catch { + case e: Throwable => + throw KyuubiSQLException(s"Extract columns lineage failed: ${e.getMessage}", e) + } + } + } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentCatalog.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentCatalog.scala index 4e8c0aa69a4..88105b086a9 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentCatalog.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentCatalog.scala @@ -19,18 +19,23 @@ package org.apache.kyuubi.engine.spark.operation import org.apache.spark.sql.types.StructType -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils +import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.session.Session class SetCurrentCatalog(session: Session, catalog: String) extends SparkOperation(session) { + private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) + + override def getOperationLog: Option[OperationLog] = Option(operationLog) + override protected def resultSchema: StructType = { new StructType() } override protected def runInternal(): Unit = { try { - SparkCatalogShim().setCurrentCatalog(spark, catalog) + SparkCatalogUtils.setCurrentCatalog(spark, catalog) setHasResultSet(false) } catch onError() } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentDatabase.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentDatabase.scala index 0a21bc83965..d227f5fd2ad 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentDatabase.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentDatabase.scala @@ -19,19 +19,23 @@ package org.apache.kyuubi.engine.spark.operation import org.apache.spark.sql.types.StructType -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim +import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.session.Session class SetCurrentDatabase(session: Session, database: String) extends SparkOperation(session) { + private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) + + override def getOperationLog: Option[OperationLog] = Option(operationLog) + override protected def resultSchema: StructType = { new StructType() } override protected def runInternal(): Unit = { try { - SparkCatalogShim().setCurrentDatabase(spark, database) + spark.sessionState.catalogManager.setCurrentNamespace(Array(database)) setHasResultSet(false) } catch onError() } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkOperation.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkOperation.scala index eb58407d47c..1de360f0715 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkOperation.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkOperation.scala @@ -20,7 +20,7 @@ package org.apache.kyuubi.engine.spark.operation import java.io.IOException import java.time.ZoneId -import org.apache.hive.service.rpc.thrift.{TGetResultSetMetadataResp, TProgressUpdateResp, TRowSet} +import org.apache.hive.service.rpc.thrift.{TFetchResultsResp, TGetResultSetMetadataResp, TProgressUpdateResp, TRowSet} import org.apache.spark.kyuubi.{SparkProgressMonitor, SQLOperationListener} import org.apache.spark.kyuubi.SparkUtilsHelper.redact import org.apache.spark.sql.{DataFrame, Row, SparkSession} @@ -101,13 +101,13 @@ abstract class SparkOperation(session: Session) super.getStatus } - override def cleanup(targetState: OperationState): Unit = state.synchronized { + override def cleanup(targetState: OperationState): Unit = withLockRequired { operationListener.foreach(_.cleanup()) if (!isTerminalState(state)) { setState(targetState) Option(getBackgroundHandle).foreach(_.cancel(true)) - if (!spark.sparkContext.isStopped) spark.sparkContext.cancelJobGroup(statementId) } + if (!spark.sparkContext.isStopped) spark.sparkContext.cancelJobGroup(statementId) } protected val forceCancel = @@ -174,15 +174,16 @@ abstract class SparkOperation(session: Session) // could be thrown. case e: Throwable => if (cancel && !spark.sparkContext.isStopped) spark.sparkContext.cancelJobGroup(statementId) - state.synchronized { + withLockRequired { val errMsg = Utils.stringifyException(e) if (state == OperationState.TIMEOUT) { val ke = KyuubiSQLException(s"Timeout operating $opType: $errMsg") setOperationException(ke) throw ke } else if (isTerminalState(state)) { - setOperationException(KyuubiSQLException(errMsg)) - warn(s"Ignore exception in terminal state with $statementId: $errMsg") + val ke = KyuubiSQLException(errMsg) + setOperationException(ke) + throw ke } else { error(s"Error operating $opType: $errMsg", e) val ke = KyuubiSQLException(s"Error operating $opType: $errMsg", e) @@ -200,7 +201,7 @@ abstract class SparkOperation(session: Session) } override protected def afterRun(): Unit = { - state.synchronized { + withLockRequired { if (!isTerminalState(state)) { setState(OperationState.FINISHED) } @@ -232,10 +233,12 @@ abstract class SparkOperation(session: Session) resp } - override def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet = - withLocalProperties { - var resultRowSet: TRowSet = null - try { + override def getNextRowSetInternal( + order: FetchOrientation, + rowSetSize: Int): TFetchResultsResp = { + var resultRowSet: TRowSet = null + try { + withLocalProperties { validateDefaultFetchOrientation(order) assertState(OperationState.FINISHED) setHasResultSet(true) @@ -260,10 +263,14 @@ abstract class SparkOperation(session: Session) getProtocolVersion) } resultRowSet.setStartRowOffset(iter.getPosition) - } catch onError(cancel = true) + } + } catch onError(cancel = true) - resultRowSet - } + val resp = new TFetchResultsResp(OK_STATUS) + resp.setResults(resultRowSet) + resp.setHasMoreRows(false) + resp + } override def shouldRunAsync: Boolean = false diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkSQLOperationManager.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkSQLOperationManager.scala index 8fd58b33875..ab082874630 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkSQLOperationManager.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkSQLOperationManager.scala @@ -26,7 +26,7 @@ import org.apache.kyuubi.config.KyuubiConf._ import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_OPERATION_HANDLE_KEY import org.apache.kyuubi.engine.spark.repl.KyuubiSparkILoop import org.apache.kyuubi.engine.spark.session.SparkSessionImpl -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils import org.apache.kyuubi.operation.{NoneMode, Operation, OperationHandle, OperationManager, PlanOnlyMode} import org.apache.kyuubi.session.{Session, SessionHandle} @@ -106,18 +106,18 @@ class SparkSQLOperationManager private (name: String) extends OperationManager(n opHandle) } case mode => - new PlanOnlyStatement(session, statement, mode) + new PlanOnlyStatement(session, statement, mode, opHandle) } case OperationLanguages.SCALA => val repl = sessionToRepl.getOrElseUpdate(session.handle, KyuubiSparkILoop(spark)) - new ExecuteScala(session, repl, statement, runAsync, queryTimeout) + new ExecuteScala(session, repl, statement, runAsync, queryTimeout, opHandle) case OperationLanguages.PYTHON => try { ExecutePython.init() val worker = sessionToPythonProcess.getOrElseUpdate( session.handle, ExecutePython.createSessionPythonWorker(spark, session)) - new ExecutePython(session, statement, runAsync, queryTimeout, worker) + new ExecutePython(session, statement, runAsync, queryTimeout, worker, opHandle) } catch { case e: Throwable => spark.conf.set(OPERATION_LANGUAGE.key, OperationLanguages.SQL.toString) @@ -179,7 +179,7 @@ class SparkSQLOperationManager private (name: String) extends OperationManager(n tableTypes: java.util.List[String]): Operation = { val tTypes = if (tableTypes == null || tableTypes.isEmpty) { - SparkCatalogShim.sparkTableTypes + SparkCatalogUtils.sparkTableTypes } else { tableTypes.asScala.toSet } @@ -231,6 +231,6 @@ class SparkSQLOperationManager private (name: String) extends OperationManager(n } override def getQueryId(operation: Operation): String = { - throw KyuubiSQLException.featureNotSupported() + operation.getHandle.identifier.toString } } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/session/SparkSQLSessionManager.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/session/SparkSQLSessionManager.scala index 677af9a0394..79f38ce35a4 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/session/SparkSQLSessionManager.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/session/SparkSQLSessionManager.scala @@ -20,6 +20,7 @@ package org.apache.kyuubi.engine.spark.session import java.util.concurrent.{ScheduledExecutorService, TimeUnit} import org.apache.hive.service.rpc.thrift.TProtocolVersion +import org.apache.spark.api.python.KyuubiPythonGatewayServer import org.apache.spark.sql.SparkSession import org.apache.kyuubi.KyuubiSQLException @@ -94,6 +95,7 @@ class SparkSQLSessionManager private (name: String, spark: SparkSession) override def stop(): Unit = { super.stop() + KyuubiPythonGatewayServer.shutdown() userIsolatedSparkSessionThread.foreach(_.shutdown()) } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/session/SparkSessionImpl.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/session/SparkSessionImpl.scala index 78164ff5fab..8d9012cbdc6 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/session/SparkSessionImpl.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/session/SparkSessionImpl.scala @@ -17,6 +17,7 @@ package org.apache.kyuubi.engine.spark.session +import org.apache.commons.lang3.StringUtils import org.apache.hive.service.rpc.thrift.{TGetInfoType, TGetInfoValue, TProtocolVersion} import org.apache.spark.sql.{AnalysisException, SparkSession} @@ -24,11 +25,11 @@ import org.apache.kyuubi.KyuubiSQLException import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_HANDLE_KEY import org.apache.kyuubi.engine.spark.events.SessionEvent import org.apache.kyuubi.engine.spark.operation.SparkSQLOperationManager -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim import org.apache.kyuubi.engine.spark.udf.KDFRegistry +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils import org.apache.kyuubi.events.EventBus import org.apache.kyuubi.operation.{Operation, OperationHandle} -import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager} +import org.apache.kyuubi.session._ class SparkSessionImpl( protocol: TProtocolVersion, @@ -54,22 +55,35 @@ class SparkSessionImpl( private val sessionEvent = SessionEvent(this) override def open(): Unit = { - normalizedConf.foreach { - case ("use:catalog", catalog) => - try { - SparkCatalogShim().setCurrentCatalog(spark, catalog) - } catch { - case e if e.getMessage.contains("Cannot find catalog plugin class for catalog") => - warn(e.getMessage()) - } - case ("use:database", database) => - try { - SparkCatalogShim().setCurrentDatabase(spark, database) - } catch { - case e - if database == "default" && e.getMessage != null && - e.getMessage.contains("not found") => - } + + val (useCatalogAndDatabaseConf, otherConf) = normalizedConf.partition { case (k, _) => + Array(USE_CATALOG, USE_DATABASE).contains(k) + } + + useCatalogAndDatabaseConf.get(USE_CATALOG).foreach { catalog => + try { + SparkCatalogUtils.setCurrentCatalog(spark, catalog) + } catch { + case e if e.getMessage.contains("Cannot find catalog plugin class for catalog") => + warn(e.getMessage()) + } + } + + useCatalogAndDatabaseConf.get("use:database").foreach { database => + try { + spark.sessionState.catalogManager.setCurrentNamespace(Array(database)) + } catch { + case e + if database == "default" && + StringUtils.containsAny( + e.getMessage, + "not found", + "SCHEMA_NOT_FOUND", + "is not authorized to perform: glue:GetDatabase") => + } + } + + otherConf.foreach { case (key, value) => setModifiableConfig(key, value) } KDFRegistry.registerAll(spark) diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/shim/CatalogShim_v2_4.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/shim/CatalogShim_v2_4.scala deleted file mode 100644 index 0f6195acf3f..00000000000 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/shim/CatalogShim_v2_4.scala +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kyuubi.engine.spark.shim - -import java.util.regex.Pattern - -import org.apache.spark.sql.{Row, SparkSession} -import org.apache.spark.sql.catalyst.TableIdentifier - -class CatalogShim_v2_4 extends SparkCatalogShim { - - override def getCatalogs(spark: SparkSession): Seq[Row] = { - Seq(Row(SparkCatalogShim.SESSION_CATALOG)) - } - - override protected def catalogExists(spark: SparkSession, catalog: String): Boolean = false - - override def setCurrentCatalog(spark: SparkSession, catalog: String): Unit = {} - - override def getCurrentCatalog(spark: SparkSession): Row = { - Row(SparkCatalogShim.SESSION_CATALOG) - } - - override def getSchemas( - spark: SparkSession, - catalogName: String, - schemaPattern: String): Seq[Row] = { - (spark.sessionState.catalog.listDatabases(schemaPattern) ++ - getGlobalTempViewManager(spark, schemaPattern)).map(Row(_, SparkCatalogShim.SESSION_CATALOG)) - } - - def setCurrentDatabase(spark: SparkSession, databaseName: String): Unit = { - spark.sessionState.catalog.setCurrentDatabase(databaseName) - } - - def getCurrentDatabase(spark: SparkSession): Row = { - Row(spark.sessionState.catalog.getCurrentDatabase) - } - - override protected def getGlobalTempViewManager( - spark: SparkSession, - schemaPattern: String): Seq[String] = { - val database = spark.sharedState.globalTempViewManager.database - Option(database).filter(_.matches(schemaPattern)).toSeq - } - - override def getCatalogTablesOrViews( - spark: SparkSession, - catalogName: String, - schemaPattern: String, - tablePattern: String, - tableTypes: Set[String]): Seq[Row] = { - val catalog = spark.sessionState.catalog - val databases = catalog.listDatabases(schemaPattern) - - databases.flatMap { db => - val identifiers = catalog.listTables(db, tablePattern, includeLocalTempViews = false) - catalog.getTablesByName(identifiers) - .filter(t => matched(tableTypes, t.tableType.name)).map { t => - val typ = if (t.tableType.name == "VIEW") "VIEW" else "TABLE" - Row( - catalogName, - t.database, - t.identifier.table, - typ, - t.comment.getOrElse(""), - null, - null, - null, - null, - null) - } - } - } - - override def getTempViews( - spark: SparkSession, - catalogName: String, - schemaPattern: String, - tablePattern: String): Seq[Row] = { - val views = getViews(spark, schemaPattern, tablePattern) - views.map { ident => - Row(catalogName, ident.database.orNull, ident.table, "VIEW", "", null, null, null, null, null) - } - } - - override protected def getViews( - spark: SparkSession, - schemaPattern: String, - tablePattern: String): Seq[TableIdentifier] = { - val db = getGlobalTempViewManager(spark, schemaPattern) - if (db.nonEmpty) { - spark.sessionState.catalog.listTables(db.head, tablePattern) - } else { - spark.sessionState.catalog.listLocalTempViews(tablePattern) - } - } - - override def getColumns( - spark: SparkSession, - catalogName: String, - schemaPattern: String, - tablePattern: String, - columnPattern: String): Seq[Row] = { - - val cp = columnPattern.r.pattern - val byCatalog = getColumnsByCatalog(spark, catalogName, schemaPattern, tablePattern, cp) - val byGlobalTmpDB = getColumnsByGlobalTempViewManager(spark, schemaPattern, tablePattern, cp) - val byLocalTmp = getColumnsByLocalTempViews(spark, tablePattern, cp) - - byCatalog ++ byGlobalTmpDB ++ byLocalTmp - } - - protected def getColumnsByCatalog( - spark: SparkSession, - catalogName: String, - schemaPattern: String, - tablePattern: String, - columnPattern: Pattern): Seq[Row] = { - val catalog = spark.sessionState.catalog - - val databases = catalog.listDatabases(schemaPattern) - - databases.flatMap { db => - val identifiers = catalog.listTables(db, tablePattern, includeLocalTempViews = true) - catalog.getTablesByName(identifiers).flatMap { t => - t.schema.zipWithIndex.filter(f => columnPattern.matcher(f._1.name).matches()) - .map { case (f, i) => toColumnResult(catalogName, t.database, t.identifier.table, f, i) } - } - } - } - - protected def getColumnsByGlobalTempViewManager( - spark: SparkSession, - schemaPattern: String, - tablePattern: String, - columnPattern: Pattern): Seq[Row] = { - val catalog = spark.sessionState.catalog - - getGlobalTempViewManager(spark, schemaPattern).flatMap { globalTmpDb => - catalog.globalTempViewManager.listViewNames(tablePattern).flatMap { v => - catalog.globalTempViewManager.get(v).map { plan => - plan.schema.zipWithIndex.filter(f => columnPattern.matcher(f._1.name).matches()) - .map { case (f, i) => - toColumnResult(SparkCatalogShim.SESSION_CATALOG, globalTmpDb, v, f, i) - } - } - }.flatten - } - } - - protected def getColumnsByLocalTempViews( - spark: SparkSession, - tablePattern: String, - columnPattern: Pattern): Seq[Row] = { - val catalog = spark.sessionState.catalog - - catalog.listLocalTempViews(tablePattern) - .map(v => (v, catalog.getTempView(v.table).get)) - .flatMap { case (v, plan) => - plan.schema.zipWithIndex - .filter(f => columnPattern.matcher(f._1.name).matches()) - .map { case (f, i) => - toColumnResult(SparkCatalogShim.SESSION_CATALOG, null, v.table, f, i) - } - } - } -} diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/shim/CatalogShim_v3_0.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/shim/CatalogShim_v3_0.scala deleted file mode 100644 index a663ba63638..00000000000 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/shim/CatalogShim_v3_0.scala +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kyuubi.engine.spark.shim - -import java.util.regex.Pattern - -import org.apache.spark.sql.{Row, SparkSession} -import org.apache.spark.sql.connector.catalog.{CatalogExtension, CatalogPlugin, SupportsNamespaces, TableCatalog} - -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim.SESSION_CATALOG - -class CatalogShim_v3_0 extends CatalogShim_v2_4 { - - override def getCatalogs(spark: SparkSession): Seq[Row] = { - - // A [[CatalogManager]] is session unique - val catalogMgr = spark.sessionState.catalogManager - // get the custom v2 session catalog or default spark_catalog - val sessionCatalog = invoke(catalogMgr, "v2SessionCatalog") - val defaultCatalog = catalogMgr.currentCatalog - - val defaults = Seq(sessionCatalog, defaultCatalog).distinct - .map(invoke(_, "name").asInstanceOf[String]) - val catalogs = getField(catalogMgr, "catalogs") - .asInstanceOf[scala.collection.Map[String, _]] - (catalogs.keys ++: defaults).distinct.map(Row(_)) - } - - private def getCatalog(spark: SparkSession, catalogName: String): CatalogPlugin = { - val catalogManager = spark.sessionState.catalogManager - if (catalogName == null || catalogName.isEmpty) { - catalogManager.currentCatalog - } else { - catalogManager.catalog(catalogName) - } - } - - override def catalogExists(spark: SparkSession, catalog: String): Boolean = { - spark.sessionState.catalogManager.isCatalogRegistered(catalog) - } - - override def setCurrentCatalog(spark: SparkSession, catalog: String): Unit = { - // SPARK-36841(3.3.0) Ensure setCurrentCatalog method catalog must exist - if (spark.sessionState.catalogManager.isCatalogRegistered(catalog)) { - spark.sessionState.catalogManager.setCurrentCatalog(catalog) - } else { - throw new IllegalArgumentException(s"Cannot find catalog plugin class for catalog '$catalog'") - } - } - - override def getCurrentCatalog(spark: SparkSession): Row = { - Row(spark.sessionState.catalogManager.currentCatalog.name()) - } - - private def listAllNamespaces( - catalog: SupportsNamespaces, - namespaces: Array[Array[String]]): Array[Array[String]] = { - val children = namespaces.flatMap { ns => - catalog.listNamespaces(ns) - } - if (children.isEmpty) { - namespaces - } else { - namespaces ++: listAllNamespaces(catalog, children) - } - } - - private def listAllNamespaces(catalog: CatalogPlugin): Array[Array[String]] = { - catalog match { - case catalog: CatalogExtension => - // DSv2 does not support pass schemaPattern transparently - catalog.defaultNamespace() +: catalog.listNamespaces(Array()) - case catalog: SupportsNamespaces => - val rootSchema = catalog.listNamespaces() - val allSchemas = listAllNamespaces(catalog, rootSchema) - allSchemas - } - } - - /** - * Forked from Apache Spark's org.apache.spark.sql.connector.catalog.CatalogV2Implicits - */ - private def quoteIfNeeded(part: String): String = { - if (part.contains(".") || part.contains("`")) { - s"`${part.replace("`", "``")}`" - } else { - part - } - } - - private def listNamespacesWithPattern( - catalog: CatalogPlugin, - schemaPattern: String): Array[Array[String]] = { - val p = schemaPattern.r.pattern - listAllNamespaces(catalog).filter { ns => - val quoted = ns.map(quoteIfNeeded).mkString(".") - p.matcher(quoted).matches() - }.distinct - } - - private def getSchemasWithPattern(catalog: CatalogPlugin, schemaPattern: String): Seq[String] = { - val p = schemaPattern.r.pattern - listAllNamespaces(catalog).flatMap { ns => - val quoted = ns.map(quoteIfNeeded).mkString(".") - if (p.matcher(quoted).matches()) { - Some(quoted) - } else { - None - } - }.distinct - } - - override def getSchemas( - spark: SparkSession, - catalogName: String, - schemaPattern: String): Seq[Row] = { - if (catalogName == SparkCatalogShim.SESSION_CATALOG) { - super.getSchemas(spark, catalogName, schemaPattern) - } else { - val catalog = getCatalog(spark, catalogName) - getSchemasWithPattern(catalog, schemaPattern).map(Row(_, catalog.name)) - } - } - - override def setCurrentDatabase(spark: SparkSession, databaseName: String): Unit = { - spark.sessionState.catalogManager.setCurrentNamespace(Array(databaseName)) - } - - override def getCurrentDatabase(spark: SparkSession): Row = { - Row(spark.sessionState.catalogManager.currentNamespace.map(quoteIfNeeded).mkString(".")) - } - - override def getCatalogTablesOrViews( - spark: SparkSession, - catalogName: String, - schemaPattern: String, - tablePattern: String, - tableTypes: Set[String]): Seq[Row] = { - val catalog = getCatalog(spark, catalogName) - val namespaces = listNamespacesWithPattern(catalog, schemaPattern) - catalog match { - case builtin if builtin.name() == SESSION_CATALOG => - super.getCatalogTablesOrViews( - spark, - SESSION_CATALOG, - schemaPattern, - tablePattern, - tableTypes) - case tc: TableCatalog => - val tp = tablePattern.r.pattern - val identifiers = namespaces.flatMap { ns => - tc.listTables(ns).filter(i => tp.matcher(quoteIfNeeded(i.name())).matches()) - } - identifiers.map { ident => - val table = tc.loadTable(ident) - // TODO: restore view type for session catalog - val comment = table.properties().getOrDefault(TableCatalog.PROP_COMMENT, "") - val schema = ident.namespace().map(quoteIfNeeded).mkString(".") - val tableName = quoteIfNeeded(ident.name()) - Row(catalog.name(), schema, tableName, "TABLE", comment, null, null, null, null, null) - } - case _ => Seq.empty[Row] - } - } - - override protected def getColumnsByCatalog( - spark: SparkSession, - catalogName: String, - schemaPattern: String, - tablePattern: String, - columnPattern: Pattern): Seq[Row] = { - val catalog = getCatalog(spark, catalogName) - - catalog match { - case tc: TableCatalog => - val namespaces = listNamespacesWithPattern(catalog, schemaPattern) - val tp = tablePattern.r.pattern - val identifiers = namespaces.flatMap { ns => - tc.listTables(ns).filter(i => tp.matcher(quoteIfNeeded(i.name())).matches()) - } - identifiers.flatMap { ident => - val table = tc.loadTable(ident) - val namespace = ident.namespace().map(quoteIfNeeded).mkString(".") - val tableName = quoteIfNeeded(ident.name()) - - table.schema.zipWithIndex.filter(f => columnPattern.matcher(f._1.name).matches()) - .map { case (f, i) => toColumnResult(tc.name(), namespace, tableName, f, i) } - } - - case builtin if builtin.name() == SESSION_CATALOG => - super.getColumnsByCatalog( - spark, - SESSION_CATALOG, - schemaPattern, - tablePattern, - columnPattern) - } - } -} diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/shim/SparkCatalogShim.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/shim/SparkCatalogShim.scala deleted file mode 100644 index bc5792823f7..00000000000 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/shim/SparkCatalogShim.scala +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kyuubi.engine.spark.shim - -import org.apache.spark.sql.{Row, SparkSession} -import org.apache.spark.sql.catalyst.TableIdentifier -import org.apache.spark.sql.types.StructField - -import org.apache.kyuubi.Logging -import org.apache.kyuubi.engine.spark.KyuubiSparkUtil.sparkMajorMinorVersion -import org.apache.kyuubi.engine.spark.schema.SchemaHelper - -/** - * A shim that defines the interface interact with Spark's catalogs - */ -trait SparkCatalogShim extends Logging { - - // /////////////////////////////////////////////////////////////////////////////////////////////// - // Catalog // - // /////////////////////////////////////////////////////////////////////////////////////////////// - - /** - * Get all register catalogs in Spark's `CatalogManager` - */ - def getCatalogs(spark: SparkSession): Seq[Row] - - protected def catalogExists(spark: SparkSession, catalog: String): Boolean - - def setCurrentCatalog(spark: SparkSession, catalog: String): Unit - - def getCurrentCatalog(spark: SparkSession): Row - - // /////////////////////////////////////////////////////////////////////////////////////////////// - // Schema // - // /////////////////////////////////////////////////////////////////////////////////////////////// - - /** - * a list of [[Row]]s, with 2 fields `schemaName: String, catalogName: String` - */ - def getSchemas(spark: SparkSession, catalogName: String, schemaPattern: String): Seq[Row] - - def setCurrentDatabase(spark: SparkSession, databaseName: String): Unit - - def getCurrentDatabase(spark: SparkSession): Row - - protected def getGlobalTempViewManager(spark: SparkSession, schemaPattern: String): Seq[String] - - // /////////////////////////////////////////////////////////////////////////////////////////////// - // Table & View // - // /////////////////////////////////////////////////////////////////////////////////////////////// - - def getCatalogTablesOrViews( - spark: SparkSession, - catalogName: String, - schemaPattern: String, - tablePattern: String, - tableTypes: Set[String]): Seq[Row] - - def getTempViews( - spark: SparkSession, - catalogName: String, - schemaPattern: String, - tablePattern: String): Seq[Row] - - protected def getViews( - spark: SparkSession, - schemaPattern: String, - tablePattern: String): Seq[TableIdentifier] - - // /////////////////////////////////////////////////////////////////////////////////////////////// - // Columns // - // /////////////////////////////////////////////////////////////////////////////////////////////// - - def getColumns( - spark: SparkSession, - catalogName: String, - schemaPattern: String, - tablePattern: String, - columnPattern: String): Seq[Row] - - protected def toColumnResult( - catalog: String, - db: String, - table: String, - col: StructField, - pos: Int): Row = { - // format: off - Row( - catalog, // TABLE_CAT - db, // TABLE_SCHEM - table, // TABLE_NAME - col.name, // COLUMN_NAME - SchemaHelper.toJavaSQLType(col.dataType), // DATA_TYPE - col.dataType.sql, // TYPE_NAME - SchemaHelper.getColumnSize(col.dataType).orNull, // COLUMN_SIZE - null, // BUFFER_LENGTH - SchemaHelper.getDecimalDigits(col.dataType).orNull, // DECIMAL_DIGITS - SchemaHelper.getNumPrecRadix(col.dataType).orNull, // NUM_PREC_RADIX - if (col.nullable) 1 else 0, // NULLABLE - col.getComment().getOrElse(""), // REMARKS - null, // COLUMN_DEF - null, // SQL_DATA_TYPE - null, // SQL_DATETIME_SUB - null, // CHAR_OCTET_LENGTH - pos, // ORDINAL_POSITION - "YES", // IS_NULLABLE - null, // SCOPE_CATALOG - null, // SCOPE_SCHEMA - null, // SCOPE_TABLE - null, // SOURCE_DATA_TYPE - "NO" // IS_AUTO_INCREMENT - ) - // format: on - } - - // /////////////////////////////////////////////////////////////////////////////////////////////// - // Miscellaneous // - // /////////////////////////////////////////////////////////////////////////////////////////////// - - protected def invoke( - obj: Any, - methodName: String, - args: (Class[_], AnyRef)*): Any = { - val (types, values) = args.unzip - val method = obj.getClass.getMethod(methodName, types: _*) - method.setAccessible(true) - method.invoke(obj, values.toSeq: _*) - } - - protected def invoke( - clazz: Class[_], - obj: AnyRef, - methodName: String, - args: (Class[_], AnyRef)*): AnyRef = { - val (types, values) = args.unzip - val method = clazz.getMethod(methodName, types: _*) - method.setAccessible(true) - method.invoke(obj, values.toSeq: _*) - } - - protected def getField(o: Any, fieldName: String): Any = { - val field = o.getClass.getDeclaredField(fieldName) - field.setAccessible(true) - field.get(o) - } - - protected def matched(tableTypes: Set[String], tableType: String): Boolean = { - val typ = if (tableType.equalsIgnoreCase("VIEW")) "VIEW" else "TABLE" - tableTypes.exists(typ.equalsIgnoreCase) - } - -} - -object SparkCatalogShim { - def apply(): SparkCatalogShim = { - sparkMajorMinorVersion match { - case (3, _) => new CatalogShim_v3_0 - case (2, _) => new CatalogShim_v2_4 - case _ => - throw new IllegalArgumentException(s"Not Support spark version $sparkMajorMinorVersion") - } - } - - val SESSION_CATALOG: String = "spark_catalog" - - val sparkTableTypes = Set("VIEW", "TABLE") -} diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/udf/KDFRegistry.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/udf/KDFRegistry.scala index f4612a3d0a3..a2d50d1515b 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/udf/KDFRegistry.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/udf/KDFRegistry.scala @@ -25,7 +25,7 @@ import org.apache.spark.sql.expressions.UserDefinedFunction import org.apache.spark.sql.functions.udf import org.apache.kyuubi.{KYUUBI_VERSION, Utils} -import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_USER_KEY +import org.apache.kyuubi.config.KyuubiReservedKeys.{KYUUBI_ENGINE_URL, KYUUBI_SESSION_USER_KEY} object KDFRegistry { @@ -73,6 +73,16 @@ object KDFRegistry { "string", "1.4.0") + val engine_url: KyuubiDefinedFunction = create( + "engine_url", + udf { () => + Option(TaskContext.get()).map(_.getLocalProperty(KYUUBI_ENGINE_URL)) + .getOrElse(throw new RuntimeException("Unable to get engine url")) + }, + "Return the engine url for the associated query engine", + "string", + "1.8.0") + def create( name: String, udf: UserDefinedFunction, diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/udf/KyuubiDefinedFunction.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/udf/KyuubiDefinedFunction.scala index 30228bf7264..6bc2e3ddb3e 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/udf/KyuubiDefinedFunction.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/udf/KyuubiDefinedFunction.scala @@ -20,7 +20,7 @@ package org.apache.kyuubi.engine.spark.udf import org.apache.spark.sql.expressions.UserDefinedFunction /** - * A wrapper for Spark' [[UserDefinedFunction]] + * A wrapper for Spark's [[UserDefinedFunction]] * * @param name function name * @param udf user-defined function diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/util/SparkCatalogUtils.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/util/SparkCatalogUtils.scala new file mode 100644 index 00000000000..18a14494e85 --- /dev/null +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/util/SparkCatalogUtils.scala @@ -0,0 +1,373 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.spark.util + +import java.util.regex.Pattern + +import org.apache.commons.lang3.StringUtils +import org.apache.spark.sql.{Row, SparkSession} +import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.connector.catalog.{CatalogExtension, CatalogPlugin, SupportsNamespaces, TableCatalog} +import org.apache.spark.sql.types.StructField + +import org.apache.kyuubi.Logging +import org.apache.kyuubi.engine.spark.schema.SchemaHelper +import org.apache.kyuubi.util.reflect.ReflectUtils._ + +/** + * A shim that defines the interface interact with Spark's catalogs + */ +object SparkCatalogUtils extends Logging { + + private val VIEW = "VIEW" + private val TABLE = "TABLE" + + val SESSION_CATALOG: String = "spark_catalog" + val sparkTableTypes: Set[String] = Set(VIEW, TABLE) + + // /////////////////////////////////////////////////////////////////////////////////////////////// + // Catalog // + // /////////////////////////////////////////////////////////////////////////////////////////////// + + /** + * Get all register catalogs in Spark's `CatalogManager` + */ + def getCatalogs(spark: SparkSession): Seq[Row] = { + + // A [[CatalogManager]] is session unique + val catalogMgr = spark.sessionState.catalogManager + // get the custom v2 session catalog or default spark_catalog + val sessionCatalog = invokeAs[AnyRef](catalogMgr, "v2SessionCatalog") + val defaultCatalog = catalogMgr.currentCatalog + + val defaults = Seq(sessionCatalog, defaultCatalog).distinct.map(invokeAs[String](_, "name")) + val catalogs = getField[scala.collection.Map[String, _]](catalogMgr, "catalogs") + (catalogs.keys ++: defaults).distinct.map(Row(_)) + } + + def getCatalog(spark: SparkSession, catalogName: String): CatalogPlugin = { + val catalogManager = spark.sessionState.catalogManager + if (StringUtils.isBlank(catalogName)) { + catalogManager.currentCatalog + } else { + catalogManager.catalog(catalogName) + } + } + + def setCurrentCatalog(spark: SparkSession, catalog: String): Unit = { + // SPARK-36841(3.3.0) Ensure setCurrentCatalog method catalog must exist + if (spark.sessionState.catalogManager.isCatalogRegistered(catalog)) { + spark.sessionState.catalogManager.setCurrentCatalog(catalog) + } else { + throw new IllegalArgumentException(s"Cannot find catalog plugin class for catalog '$catalog'") + } + } + + // /////////////////////////////////////////////////////////////////////////////////////////////// + // Schema // + // /////////////////////////////////////////////////////////////////////////////////////////////// + + /** + * a list of [[Row]]s, with 2 fields `schemaName: String, catalogName: String` + */ + def getSchemas( + spark: SparkSession, + catalogName: String, + schemaPattern: String): Seq[Row] = { + if (catalogName == SparkCatalogUtils.SESSION_CATALOG) { + (spark.sessionState.catalog.listDatabases(schemaPattern) ++ + getGlobalTempViewManager(spark, schemaPattern)) + .map(Row(_, SparkCatalogUtils.SESSION_CATALOG)) + } else { + val catalog = getCatalog(spark, catalogName) + getSchemasWithPattern(catalog, schemaPattern).map(Row(_, catalog.name)) + } + } + + private def getGlobalTempViewManager( + spark: SparkSession, + schemaPattern: String): Seq[String] = { + val database = spark.sharedState.globalTempViewManager.database + Option(database).filter(_.matches(schemaPattern)).toSeq + } + + private def listAllNamespaces( + catalog: SupportsNamespaces, + namespaces: Array[Array[String]]): Array[Array[String]] = { + val children = namespaces.flatMap { ns => + catalog.listNamespaces(ns) + } + if (children.isEmpty) { + namespaces + } else { + namespaces ++: listAllNamespaces(catalog, children) + } + } + + private def listAllNamespaces(catalog: CatalogPlugin): Array[Array[String]] = { + catalog match { + case catalog: CatalogExtension => + // DSv2 does not support pass schemaPattern transparently + catalog.defaultNamespace() +: catalog.listNamespaces(Array()) + case catalog: SupportsNamespaces => + val rootSchema = catalog.listNamespaces() + val allSchemas = listAllNamespaces(catalog, rootSchema) + allSchemas + } + } + + private def listNamespacesWithPattern( + catalog: CatalogPlugin, + schemaPattern: String): Array[Array[String]] = { + listAllNamespaces(catalog).filter { ns => + val quoted = ns.map(quoteIfNeeded).mkString(".") + schemaPattern.r.pattern.matcher(quoted).matches() + }.map(_.toList).toList.distinct.map(_.toArray).toArray + } + + private def getSchemasWithPattern(catalog: CatalogPlugin, schemaPattern: String): Seq[String] = { + val p = schemaPattern.r.pattern + listAllNamespaces(catalog).flatMap { ns => + val quoted = ns.map(quoteIfNeeded).mkString(".") + if (p.matcher(quoted).matches()) Some(quoted) else None + }.distinct + } + + // /////////////////////////////////////////////////////////////////////////////////////////////// + // Table & View // + // /////////////////////////////////////////////////////////////////////////////////////////////// + + def getCatalogTablesOrViews( + spark: SparkSession, + catalogName: String, + schemaPattern: String, + tablePattern: String, + tableTypes: Set[String], + ignoreTableProperties: Boolean = false): Seq[Row] = { + val catalog = getCatalog(spark, catalogName) + val namespaces = listNamespacesWithPattern(catalog, schemaPattern) + catalog match { + case builtin if builtin.name() == SESSION_CATALOG => + val catalog = spark.sessionState.catalog + val databases = catalog.listDatabases(schemaPattern) + + def isMatchedTableType(tableTypes: Set[String], tableType: String): Boolean = { + val typ = if (tableType.equalsIgnoreCase(VIEW)) VIEW else TABLE + tableTypes.exists(typ.equalsIgnoreCase) + } + + databases.flatMap { db => + val identifiers = catalog.listTables(db, tablePattern, includeLocalTempViews = false) + catalog.getTablesByName(identifiers) + .filter(t => isMatchedTableType(tableTypes, t.tableType.name)).map { t => + val typ = if (t.tableType.name == VIEW) VIEW else TABLE + Row( + catalogName, + t.database, + t.identifier.table, + typ, + t.comment.getOrElse(""), + null, + null, + null, + null, + null) + } + } + case tc: TableCatalog => + val tp = tablePattern.r.pattern + val identifiers = namespaces.flatMap { ns => + tc.listTables(ns).filter(i => tp.matcher(quoteIfNeeded(i.name())).matches()) + } + identifiers.map { ident => + // TODO: restore view type for session catalog + val comment = if (ignoreTableProperties) "" + else { // load table is a time consuming operation + tc.loadTable(ident).properties().getOrDefault(TableCatalog.PROP_COMMENT, "") + } + val schema = ident.namespace().map(quoteIfNeeded).mkString(".") + val tableName = quoteIfNeeded(ident.name()) + Row(catalog.name(), schema, tableName, TABLE, comment, null, null, null, null, null) + } + case _ => Seq.empty[Row] + } + } + + private def getColumnsByCatalog( + spark: SparkSession, + catalogName: String, + schemaPattern: String, + tablePattern: String, + columnPattern: Pattern): Seq[Row] = { + val catalog = getCatalog(spark, catalogName) + + catalog match { + case tc: TableCatalog => + val namespaces = listNamespacesWithPattern(catalog, schemaPattern) + val tp = tablePattern.r.pattern + val identifiers = namespaces.flatMap { ns => + tc.listTables(ns).filter(i => tp.matcher(quoteIfNeeded(i.name())).matches()) + } + identifiers.flatMap { ident => + val table = tc.loadTable(ident) + val namespace = ident.namespace().map(quoteIfNeeded).mkString(".") + val tableName = quoteIfNeeded(ident.name()) + + table.schema.zipWithIndex.filter(f => columnPattern.matcher(f._1.name).matches()) + .map { case (f, i) => toColumnResult(tc.name(), namespace, tableName, f, i) } + } + + case builtin if builtin.name() == SESSION_CATALOG => + val catalog = spark.sessionState.catalog + val databases = catalog.listDatabases(schemaPattern) + databases.flatMap { db => + val identifiers = catalog.listTables(db, tablePattern, includeLocalTempViews = true) + catalog.getTablesByName(identifiers).flatMap { t => + t.schema.zipWithIndex.filter(f => columnPattern.matcher(f._1.name).matches()) + .map { case (f, i) => + toColumnResult(catalogName, t.database, t.identifier.table, f, i) + } + } + } + } + } + + def getTempViews( + spark: SparkSession, + catalogName: String, + schemaPattern: String, + tablePattern: String): Seq[Row] = { + val views = getViews(spark, schemaPattern, tablePattern) + views.map { ident => + Row(catalogName, ident.database.orNull, ident.table, VIEW, "", null, null, null, null, null) + } + } + + private def getViews( + spark: SparkSession, + schemaPattern: String, + tablePattern: String): Seq[TableIdentifier] = { + val db = getGlobalTempViewManager(spark, schemaPattern) + if (db.nonEmpty) { + spark.sessionState.catalog.listTables(db.head, tablePattern) + } else { + spark.sessionState.catalog.listLocalTempViews(tablePattern) + } + } + + // /////////////////////////////////////////////////////////////////////////////////////////////// + // Columns // + // /////////////////////////////////////////////////////////////////////////////////////////////// + + def getColumns( + spark: SparkSession, + catalogName: String, + schemaPattern: String, + tablePattern: String, + columnPattern: String): Seq[Row] = { + + val cp = columnPattern.r.pattern + val byCatalog = getColumnsByCatalog(spark, catalogName, schemaPattern, tablePattern, cp) + val byGlobalTmpDB = getColumnsByGlobalTempViewManager(spark, schemaPattern, tablePattern, cp) + val byLocalTmp = getColumnsByLocalTempViews(spark, tablePattern, cp) + + byCatalog ++ byGlobalTmpDB ++ byLocalTmp + } + + private def getColumnsByGlobalTempViewManager( + spark: SparkSession, + schemaPattern: String, + tablePattern: String, + columnPattern: Pattern): Seq[Row] = { + val catalog = spark.sessionState.catalog + + getGlobalTempViewManager(spark, schemaPattern).flatMap { globalTmpDb => + catalog.globalTempViewManager.listViewNames(tablePattern).flatMap { v => + catalog.globalTempViewManager.get(v).map { plan => + plan.schema.zipWithIndex.filter(f => columnPattern.matcher(f._1.name).matches()) + .map { case (f, i) => + toColumnResult(SparkCatalogUtils.SESSION_CATALOG, globalTmpDb, v, f, i) + } + } + }.flatten + } + } + + private def getColumnsByLocalTempViews( + spark: SparkSession, + tablePattern: String, + columnPattern: Pattern): Seq[Row] = { + val catalog = spark.sessionState.catalog + + catalog.listLocalTempViews(tablePattern) + .map(v => (v, catalog.getTempView(v.table).get)) + .flatMap { case (v, plan) => + plan.schema.zipWithIndex + .filter(f => columnPattern.matcher(f._1.name).matches()) + .map { case (f, i) => + toColumnResult(SparkCatalogUtils.SESSION_CATALOG, null, v.table, f, i) + } + } + } + + private def toColumnResult( + catalog: String, + db: String, + table: String, + col: StructField, + pos: Int): Row = { + // format: off + Row( + catalog, // TABLE_CAT + db, // TABLE_SCHEM + table, // TABLE_NAME + col.name, // COLUMN_NAME + SchemaHelper.toJavaSQLType(col.dataType), // DATA_TYPE + col.dataType.sql, // TYPE_NAME + SchemaHelper.getColumnSize(col.dataType).orNull, // COLUMN_SIZE + null, // BUFFER_LENGTH + SchemaHelper.getDecimalDigits(col.dataType).orNull, // DECIMAL_DIGITS + SchemaHelper.getNumPrecRadix(col.dataType).orNull, // NUM_PREC_RADIX + if (col.nullable) 1 else 0, // NULLABLE + col.getComment().getOrElse(""), // REMARKS + null, // COLUMN_DEF + null, // SQL_DATA_TYPE + null, // SQL_DATETIME_SUB + null, // CHAR_OCTET_LENGTH + pos, // ORDINAL_POSITION + "YES", // IS_NULLABLE + null, // SCOPE_CATALOG + null, // SCOPE_SCHEMA + null, // SCOPE_TABLE + null, // SOURCE_DATA_TYPE + "NO" // IS_AUTO_INCREMENT + ) + // format: on + } + + /** + * Forked from Apache Spark's [[org.apache.spark.sql.catalyst.util.quoteIfNeeded]] + */ + def quoteIfNeeded(part: String): String = { + if (part.matches("[a-zA-Z0-9_]+") && !part.matches("\\d+")) { + part + } else { + s"`${part.replace("`", "``")}`" + } + } +} diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/api/python/KyuubiPythonGatewayServer.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/api/python/KyuubiPythonGatewayServer.scala index 7e15ffe05a6..8cf8d685c86 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/api/python/KyuubiPythonGatewayServer.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/api/python/KyuubiPythonGatewayServer.scala @@ -30,10 +30,12 @@ object KyuubiPythonGatewayServer extends Logging { val CONNECTION_FILE_PATH = Utils.createTempDir() + "/connection.info" - def start(): Unit = { + private var gatewayServer: Py4JServer = _ + + def start(): Unit = synchronized { val sparkConf = new SparkConf() - val gatewayServer: Py4JServer = new Py4JServer(sparkConf) + gatewayServer = new Py4JServer(sparkConf) gatewayServer.start() val boundPort: Int = gatewayServer.getListeningPort @@ -65,4 +67,11 @@ object KyuubiPythonGatewayServer extends Logging { System.exit(1) } } + + def shutdown(): Unit = synchronized { + if (gatewayServer != null) { + logInfo("shutting down the python gateway server.") + gatewayServer.shutdown() + } + } } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SQLOperationListener.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SQLOperationListener.scala index 00bcf591389..5e7e33712ec 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SQLOperationListener.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SQLOperationListener.scala @@ -44,8 +44,10 @@ class SQLOperationListener( spark: SparkSession) extends StatsReportListener with Logging { private val operationId: String = operation.getHandle.identifier.toString + private lazy val activeJobs = new ConcurrentHashMap[Int, JobInfo]() private lazy val activeStages = new ConcurrentHashMap[StageAttempt, StageInfo]() + private var executionId: Option[Long] = None private val conf: KyuubiConf = operation.getSession.sessionManager.getConf private lazy val consoleProgressBar = @@ -124,10 +126,10 @@ class SQLOperationListener( val stageInfo = stageSubmitted.stageInfo val stageId = stageInfo.stageId val attemptNumber = stageInfo.attemptNumber() - val stageAttempt = StageAttempt(stageId, attemptNumber) + val stageAttempt = SparkStageAttempt(stageId, attemptNumber) activeStages.put( stageAttempt, - new StageInfo(stageId, stageInfo.numTasks)) + new SparkStageInfo(stageId, stageInfo.numTasks)) withOperationLog { info(s"Query [$operationId]: Stage $stageId.$attemptNumber started " + s"with ${stageInfo.numTasks} tasks, ${activeStages.size()} active stages running") @@ -144,6 +146,7 @@ class SQLOperationListener( jobInfo.numCompleteStages += 1 } }) + activeStages.synchronized { if (activeStages.remove(stageAttempt) != null) { withOperationLog(super.onStageCompleted(stageCompleted)) @@ -153,19 +156,19 @@ class SQLOperationListener( override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = activeStages.synchronized { - val stageAttempt = StageAttempt(taskStart.stageId, taskStart.stageAttemptId) + val stageAttempt = SparkStageAttempt(taskStart.stageId, taskStart.stageAttemptId) if (activeStages.containsKey(stageAttempt)) { - activeStages.get(stageAttempt).numActiveTasks += 1 + activeStages.get(stageAttempt).numActiveTasks.getAndIncrement() super.onTaskStart(taskStart) } } override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = activeStages.synchronized { - val stageAttempt = StageAttempt(taskEnd.stageId, taskEnd.stageAttemptId) + val stageAttempt = SparkStageAttempt(taskEnd.stageId, taskEnd.stageAttemptId) if (activeStages.containsKey(stageAttempt)) { - activeStages.get(stageAttempt).numActiveTasks -= 1 + activeStages.get(stageAttempt).numActiveTasks.getAndDecrement() if (taskEnd.reason == org.apache.spark.Success) { - activeStages.get(stageAttempt).numCompleteTasks += 1 + activeStages.get(stageAttempt).numCompleteTasks.getAndIncrement() } super.onTaskEnd(taskEnd) } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkConsoleProgressBar.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkConsoleProgressBar.scala index 3fb859617a6..148427214fa 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkConsoleProgressBar.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkConsoleProgressBar.scala @@ -78,7 +78,7 @@ class SparkConsoleProgressBar( * after your last output, keeps overwriting itself to hold in one line. The logging will follow * the progress bar, then progress bar will be showed in next line without overwrite logs. */ - private def show(now: Long, stages: Seq[StageInfo]): Unit = { + private def show(now: Long, stages: Seq[SparkStageInfo]): Unit = { val width = TerminalWidth / stages.size val bar = stages.map { s => // build job log info @@ -94,7 +94,7 @@ class SparkConsoleProgressBar( val w = width - header.length - tailer.length val bar = if (w > 0) { - val percent = w * s.numCompleteTasks / total + val percent = w * s.numCompleteTasks.get / total (0 until w).map { i => if (i < percent) "=" else if (i == percent) ">" else " " }.mkString("") diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkProgressMonitor.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkProgressMonitor.scala index a46cbecc22e..1d9ef53eae9 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkProgressMonitor.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkProgressMonitor.scala @@ -136,12 +136,8 @@ class SparkProgressMonitor(spark: SparkSession, jobGroup: String) { trimmedVName = s.substring(0, COLUMN_1_WIDTH - 2) trimmedVName += ".." } else trimmedVName += " " - val result = new StringBuilder(trimmedVName) val toFill = (spaceRemaining * percent).toInt - for (i <- 0 until toFill) { - result.append(".") - } - result.toString + s"$trimmedVName${"." * toFill}" } private def getCompletedStages: Int = { diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkSQLEngineListener.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkSQLEngineListener.scala index 8e32b53291a..48f157a43d6 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkSQLEngineListener.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkSQLEngineListener.scala @@ -40,9 +40,9 @@ import org.apache.kyuubi.service.{Serverable, ServiceState} class SparkSQLEngineListener(server: Serverable) extends SparkListener with Logging { // the conf of server is null before initialized, use lazy val here - private lazy val deregisterExceptions: Seq[String] = + private lazy val deregisterExceptions: Set[String] = server.getConf.get(ENGINE_DEREGISTER_EXCEPTION_CLASSES) - private lazy val deregisterMessages: Seq[String] = + private lazy val deregisterMessages: Set[String] = server.getConf.get(ENGINE_DEREGISTER_EXCEPTION_MESSAGES) private lazy val deregisterExceptionTTL: Long = server.getConf.get(ENGINE_DEREGISTER_EXCEPTION_TTL) @@ -74,7 +74,7 @@ class SparkSQLEngineListener(server: Serverable) extends SparkListener with Logg case JobFailed(e) if e != null => val cause = findCause(e) var deregisterInfo: Option[String] = None - if (deregisterExceptions.exists(_.equals(cause.getClass.getCanonicalName))) { + if (deregisterExceptions.contains(cause.getClass.getCanonicalName)) { deregisterInfo = Some("Job failed exception class is in the set of " + s"${ENGINE_DEREGISTER_EXCEPTION_CLASSES.key}, deregistering the engine.") } else if (deregisterMessages.exists(stringifyException(cause).contains)) { diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkUtilsHelper.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkUtilsHelper.scala index e2f51e648c0..106be3fc789 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkUtilsHelper.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/SparkUtilsHelper.scala @@ -43,4 +43,13 @@ object SparkUtilsHelper extends Logging { def getLocalDir(conf: SparkConf): String = { Utils.getLocalDir(conf) } + + def classesArePresent(className: String): Boolean = { + try { + Utils.classForName(className) + true + } catch { + case _: ClassNotFoundException | _: NoClassDefFoundError => false + } + } } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/StageStatus.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/StageStatus.scala index ae9f2ff9d82..1e78e1c2f4a 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/StageStatus.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/kyuubi/StageStatus.scala @@ -17,13 +17,16 @@ package org.apache.spark.kyuubi -case class StageAttempt(stageId: Int, stageAttemptId: Int) { +import java.util.concurrent.atomic.AtomicInteger + +case class SparkStageAttempt(stageId: Int, stageAttemptId: Int) { override def toString: String = s"Stage $stageId (Attempt $stageAttemptId)" } class JobInfo(val numStages: Int, val stageIds: Seq[Int]) { var numCompleteStages = 0 } + class StageInfo(val stageId: Int, val numTasks: Int) { var numActiveTasks = 0 var numCompleteTasks = 0 diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala new file mode 100644 index 00000000000..5c4d7086ff3 --- /dev/null +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala @@ -0,0 +1,352 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.arrow + +import java.io.{ByteArrayInputStream, ByteArrayOutputStream} +import java.lang.{Boolean => JBoolean} +import java.nio.channels.Channels + +import scala.collection.JavaConverters._ +import scala.collection.mutable.ArrayBuffer + +import org.apache.arrow.vector._ +import org.apache.arrow.vector.ipc.{ArrowStreamWriter, ReadChannel, WriteChannel} +import org.apache.arrow.vector.ipc.message.{IpcOption, MessageSerializer} +import org.apache.arrow.vector.types.pojo.{Schema => ArrowSchema} +import org.apache.spark.TaskContext +import org.apache.spark.internal.Logging +import org.apache.spark.sql.SparkSession +import org.apache.spark.sql.catalyst.{InternalRow, SQLConfHelper} +import org.apache.spark.sql.catalyst.expressions.UnsafeRow +import org.apache.spark.sql.execution.CollectLimitExec +import org.apache.spark.sql.types._ +import org.apache.spark.sql.util.ArrowUtils +import org.apache.spark.util.Utils + +import org.apache.kyuubi.util.reflect.DynMethods + +object KyuubiArrowConverters extends SQLConfHelper with Logging { + + type Batch = (Array[Byte], Long) + + /** + * this method is to slice the input Arrow record batch byte array `bytes`, starting from `start` + * and taking `length` number of elements. + */ + def slice( + schema: StructType, + timeZoneId: String, + bytes: Array[Byte], + start: Int, + length: Int): Array[Byte] = { + val in = new ByteArrayInputStream(bytes) + val out = new ByteArrayOutputStream(bytes.length) + + var vectorSchemaRoot: VectorSchemaRoot = null + var slicedVectorSchemaRoot: VectorSchemaRoot = null + + val sliceAllocator = ArrowUtils.rootAllocator.newChildAllocator( + "slice", + 0, + Long.MaxValue) + val arrowSchema = toArrowSchema(schema, timeZoneId, true, false) + vectorSchemaRoot = VectorSchemaRoot.create(arrowSchema, sliceAllocator) + try { + val recordBatch = MessageSerializer.deserializeRecordBatch( + new ReadChannel(Channels.newChannel(in)), + sliceAllocator) + val vectorLoader = new VectorLoader(vectorSchemaRoot) + vectorLoader.load(recordBatch) + recordBatch.close() + slicedVectorSchemaRoot = vectorSchemaRoot.slice(start, length) + + val unloader = new VectorUnloader(slicedVectorSchemaRoot) + val writeChannel = new WriteChannel(Channels.newChannel(out)) + val batch = unloader.getRecordBatch() + MessageSerializer.serialize(writeChannel, batch) + batch.close() + out.toByteArray() + } finally { + in.close() + out.close() + if (vectorSchemaRoot != null) { + vectorSchemaRoot.getFieldVectors.asScala.foreach(_.close()) + vectorSchemaRoot.close() + } + if (slicedVectorSchemaRoot != null) { + slicedVectorSchemaRoot.getFieldVectors.asScala.foreach(_.close()) + slicedVectorSchemaRoot.close() + } + sliceAllocator.close() + } + } + + /** + * Forked from `org.apache.spark.sql.execution.SparkPlan#executeTake()`, the algorithm can be + * summarized in the following steps: + * 1. If the limit specified in the CollectLimitExec object is 0, the function returns an empty + * array of batches. + * 2. Otherwise, execute the child query plan of the CollectLimitExec object to obtain an RDD of + * data to collect. + * 3. Use an iterative approach to collect data in batches until the specified limit is reached. + * In each iteration, it selects a subset of the partitions of the RDD to scan and tries to + * collect data from them. + * 4. For each partition subset, we use the runJob method of the Spark context to execute a + * closure that scans the partition data and converts it to Arrow batches. + * 5. Check if the collected data reaches the specified limit. If not, it selects another subset + * of partitions to scan and repeats the process until the limit is reached or all partitions + * have been scanned. + * 6. Return an array of all the collected Arrow batches. + * + * Note that: + * 1. The returned Arrow batches row count >= limit, if the input df has more than the `limit` + * row count + * 2. We don't implement the `takeFromEnd` logical + * + * @return + */ + def takeAsArrowBatches( + collectLimitExec: CollectLimitExec, + maxRecordsPerBatch: Long, + maxEstimatedBatchSize: Long, + timeZoneId: String): Array[Batch] = { + val n = collectLimitExec.limit + val schema = collectLimitExec.schema + if (n == 0) { + new Array[Batch](0) + } else { + val limitScaleUpFactor = Math.max(conf.limitScaleUpFactor, 2) + // TODO: refactor and reuse the code from RDD's take() + val childRDD = collectLimitExec.child.execute() + val buf = new ArrayBuffer[Batch] + var bufferedRowSize = 0L + val totalParts = childRDD.partitions.length + var partsScanned = 0 + while (bufferedRowSize < n && partsScanned < totalParts) { + // The number of partitions to try in this iteration. It is ok for this number to be + // greater than totalParts because we actually cap it at totalParts in runJob. + var numPartsToTry = limitInitialNumPartitions + if (partsScanned > 0) { + // If we didn't find any rows after the previous iteration, multiply by + // limitScaleUpFactor and retry. Otherwise, interpolate the number of partitions we need + // to try, but overestimate it by 50%. We also cap the estimation in the end. + if (buf.isEmpty) { + numPartsToTry = partsScanned * limitScaleUpFactor + } else { + val left = n - bufferedRowSize + // As left > 0, numPartsToTry is always >= 1 + numPartsToTry = Math.ceil(1.5 * left * partsScanned / bufferedRowSize).toInt + numPartsToTry = Math.min(numPartsToTry, partsScanned * limitScaleUpFactor) + } + } + + val partsToScan = + partsScanned.until(math.min(partsScanned + numPartsToTry, totalParts)) + + // TODO: SparkPlan.session introduced in SPARK-35798, replace with SparkPlan.session once we + // drop Spark-3.1.x support. + val sc = SparkSession.active.sparkContext + val res = sc.runJob( + childRDD, + (it: Iterator[InternalRow]) => { + val batches = toBatchIterator( + it, + schema, + maxRecordsPerBatch, + maxEstimatedBatchSize, + n, + timeZoneId) + batches.map(b => b -> batches.rowCountInLastBatch).toArray + }, + partsToScan) + + var i = 0 + while (bufferedRowSize < n && i < res.length) { + var j = 0 + val batches = res(i) + while (j < batches.length && n > bufferedRowSize) { + val batch = batches(j) + val (_, batchSize) = batch + buf += batch + bufferedRowSize += batchSize + j += 1 + } + i += 1 + } + partsScanned += partsToScan.size + } + + buf.toArray + } + } + + /** + * Spark introduced the config `spark.sql.limit.initialNumPartitions` since 3.4.0. see SPARK-40211 + */ + private def limitInitialNumPartitions: Int = { + conf.getConfString("spark.sql.limit.initialNumPartitions", "1") + .toInt + } + + /** + * Different from [[org.apache.spark.sql.execution.arrow.ArrowConverters.toBatchIterator]], + * each output arrow batch contains this batch row count. + */ + def toBatchIterator( + rowIter: Iterator[InternalRow], + schema: StructType, + maxRecordsPerBatch: Long, + maxEstimatedBatchSize: Long, + limit: Long, + timeZoneId: String): ArrowBatchIterator = { + new ArrowBatchIterator( + rowIter, + schema, + maxRecordsPerBatch, + maxEstimatedBatchSize, + limit, + timeZoneId, + TaskContext.get) + } + + /** + * This class ArrowBatchIterator is derived from + * [[org.apache.spark.sql.execution.arrow.ArrowConverters.ArrowBatchWithSchemaIterator]], + * with two key differences: + * 1. there is no requirement to write the schema at the batch header + * 2. iteration halts when `rowCount` equals `limit` + * Note that `limit < 0` means no limit, and return all rows the in the iterator. + */ + private[sql] class ArrowBatchIterator( + rowIter: Iterator[InternalRow], + schema: StructType, + maxRecordsPerBatch: Long, + maxEstimatedBatchSize: Long, + limit: Long, + timeZoneId: String, + context: TaskContext) + extends Iterator[Array[Byte]] { + + protected val arrowSchema = toArrowSchema(schema, timeZoneId, true, false) + private val allocator = + ArrowUtils.rootAllocator.newChildAllocator( + s"to${this.getClass.getSimpleName}", + 0, + Long.MaxValue) + + private val root = VectorSchemaRoot.create(arrowSchema, allocator) + protected val unloader = new VectorUnloader(root) + protected val arrowWriter = ArrowWriter.create(root) + + Option(context).foreach { + _.addTaskCompletionListener[Unit] { _ => + root.close() + allocator.close() + } + } + + override def hasNext: Boolean = (rowIter.hasNext && (rowCount < limit || limit < 0)) || { + root.close() + allocator.close() + false + } + + var rowCountInLastBatch: Long = 0 + var rowCount: Long = 0 + + override def next(): Array[Byte] = { + val out = new ByteArrayOutputStream() + val writeChannel = new WriteChannel(Channels.newChannel(out)) + + rowCountInLastBatch = 0 + var estimatedBatchSize = 0L + Utils.tryWithSafeFinally { + + // Always write the first row. + while (rowIter.hasNext && ( + // For maxBatchSize and maxRecordsPerBatch, respect whatever smaller. + // If the size in bytes is positive (set properly), always write the first row. + rowCountInLastBatch == 0 && maxEstimatedBatchSize > 0 || + // If the size in bytes of rows are 0 or negative, unlimit it. + estimatedBatchSize <= 0 || + estimatedBatchSize < maxEstimatedBatchSize || + // If the size of rows are 0 or negative, unlimit it. + maxRecordsPerBatch <= 0 || + rowCountInLastBatch < maxRecordsPerBatch || + rowCount < limit || + limit < 0)) { + val row = rowIter.next() + arrowWriter.write(row) + estimatedBatchSize += (row match { + case ur: UnsafeRow => ur.getSizeInBytes + // Trying to estimate the size of the current row + case _: InternalRow => schema.defaultSize + }) + rowCountInLastBatch += 1 + rowCount += 1 + } + arrowWriter.finish() + val batch = unloader.getRecordBatch() + MessageSerializer.serialize(writeChannel, batch) + + // Always write the Ipc options at the end. + ArrowStreamWriter.writeEndOfStream(writeChannel, ARROW_IPC_OPTION_DEFAULT) + + batch.close() + } { + arrowWriter.reset() + } + + out.toByteArray + } + } + + // the signature of function [[ArrowUtils.toArrowSchema]] is changed in SPARK-41971 (since Spark + // 3.5) + private lazy val toArrowSchemaMethod = DynMethods.builder("toArrowSchema") + .impl( // for Spark 3.4 or previous + "org.apache.spark.sql.util.ArrowUtils", + classOf[StructType], + classOf[String]) + .impl( // for Spark 3.5 or later + "org.apache.spark.sql.util.ArrowUtils", + classOf[StructType], + classOf[String], + classOf[Boolean], + classOf[Boolean]) + .build() + + /** + * this function uses reflective calls to the [[ArrowUtils.toArrowSchema]]. + */ + private def toArrowSchema( + schema: StructType, + timeZone: String, + errorOnDuplicatedFieldNames: JBoolean, + largeVarTypes: JBoolean): ArrowSchema = { + toArrowSchemaMethod.invoke[ArrowSchema]( + ArrowUtils, + schema, + timeZone, + errorOnDuplicatedFieldNames, + largeVarTypes) + } + + // IpcOption.DEFAULT was introduced in ARROW-11081(ARROW-4.0.0), add this for adapt Spark-3.1/3.2 + final private val ARROW_IPC_OPTION_DEFAULT = new IpcOption() +} diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala index 1a542937338..c0f9d61c210 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala @@ -17,18 +17,87 @@ package org.apache.spark.sql.kyuubi +import scala.collection.mutable.ArrayBuffer + +import org.apache.spark.SparkContext +import org.apache.spark.internal.Logging +import org.apache.spark.network.util.{ByteUnit, JavaUtils} import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{DataFrame, Dataset, Row} +import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession} +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.execution.{CollectLimitExec, LocalTableScanExec, SparkPlan, SQLExecution} +import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec +import org.apache.spark.sql.execution.arrow.KyuubiArrowConverters +import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics} import org.apache.spark.sql.functions._ import org.apache.spark.sql.types._ +import org.apache.kyuubi.engine.spark.KyuubiSparkUtil import org.apache.kyuubi.engine.spark.schema.RowSet +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils.quoteIfNeeded +import org.apache.kyuubi.util.reflect.DynMethods +import org.apache.kyuubi.util.reflect.ReflectUtils._ + +object SparkDatasetHelper extends Logging { + + def executeCollect(df: DataFrame): Array[Array[Byte]] = withNewExecutionId(df) { + executeArrowBatchCollect(df.queryExecution.executedPlan) + } + + def executeArrowBatchCollect: SparkPlan => Array[Array[Byte]] = { + case adaptiveSparkPlan: AdaptiveSparkPlanExec => + executeArrowBatchCollect(finalPhysicalPlan(adaptiveSparkPlan)) + // TODO: avoid extra shuffle if `offset` > 0 + case collectLimit: CollectLimitExec if offset(collectLimit) > 0 => + logWarning("unsupported offset > 0, an extra shuffle will be introduced.") + toArrowBatchRdd(collectLimit).collect() + case collectLimit: CollectLimitExec if collectLimit.limit >= 0 => + doCollectLimit(collectLimit) + case collectLimit: CollectLimitExec if collectLimit.limit < 0 => + executeArrowBatchCollect(collectLimit.child) + // TODO: replace with pattern match once we drop Spark 3.1 support. + case command: SparkPlan if isCommandResultExec(command) => + doCommandResultExec(command) + case localTableScan: LocalTableScanExec => + doLocalTableScan(localTableScan) + case plan: SparkPlan => + toArrowBatchRdd(plan).collect() + } -object SparkDatasetHelper { def toArrowBatchRdd[T](ds: Dataset[T]): RDD[Array[Byte]] = { ds.toArrowBatchRdd } + /** + * Forked from [[Dataset.toArrowBatchRdd(plan: SparkPlan)]]. + * Convert to an RDD of serialized ArrowRecordBatches. + */ + def toArrowBatchRdd(plan: SparkPlan): RDD[Array[Byte]] = { + val schemaCaptured = plan.schema + // TODO: SparkPlan.session introduced in SPARK-35798, replace with SparkPlan.session once we + // drop Spark 3.1 support. + val maxRecordsPerBatch = SparkSession.active.sessionState.conf.arrowMaxRecordsPerBatch + val timeZoneId = SparkSession.active.sessionState.conf.sessionLocalTimeZone + // note that, we can't pass the lazy variable `maxBatchSize` directly, this is because input + // arguments are serialized and sent to the executor side for execution. + val maxBatchSizePerBatch = maxBatchSize + plan.execute().mapPartitionsInternal { iter => + KyuubiArrowConverters.toBatchIterator( + iter, + schemaCaptured, + maxRecordsPerBatch, + maxBatchSizePerBatch, + -1, + timeZoneId) + } + } + + def toArrowBatchLocalIterator(df: DataFrame): Iterator[Array[Byte]] = { + withNewExecutionId(df) { + toArrowBatchRdd(df).toLocalIterator + } + } + def convertTopLevelComplexTypeToHiveString( df: DataFrame, timestampAsString: Boolean): DataFrame = { @@ -64,15 +133,149 @@ object SparkDatasetHelper { df.select(cols: _*) } + private lazy val maxBatchSize: Long = { + // respect spark connect config + KyuubiSparkUtil.globalSparkContext + .getConf + .getOption("spark.connect.grpc.arrow.maxBatchSize") + .orElse(Option("4m")) + .map(JavaUtils.byteStringAs(_, ByteUnit.MiB)) + .get + } + + private def doCollectLimit(collectLimit: CollectLimitExec): Array[Array[Byte]] = { + // TODO: SparkPlan.session introduced in SPARK-35798, replace with SparkPlan.session once we + // drop Spark-3.1.x support. + val timeZoneId = SparkSession.active.sessionState.conf.sessionLocalTimeZone + val maxRecordsPerBatch = SparkSession.active.sessionState.conf.arrowMaxRecordsPerBatch + + val batches = KyuubiArrowConverters.takeAsArrowBatches( + collectLimit, + maxRecordsPerBatch, + maxBatchSize, + timeZoneId) + + // note that the number of rows in the returned arrow batches may be >= `limit`, perform + // the slicing operation of result + val result = ArrayBuffer[Array[Byte]]() + var i = 0 + var rest = collectLimit.limit + while (i < batches.length && rest > 0) { + val (batch, size) = batches(i) + if (size <= rest) { + result += batch + // returned ArrowRecordBatch has less than `limit` row count, safety to do conversion + rest -= size.toInt + } else { // size > rest + result += KyuubiArrowConverters.slice(collectLimit.schema, timeZoneId, batch, 0, rest) + rest = 0 + } + i += 1 + } + result.toArray + } + + private lazy val commandResultExecRowsMethod = DynMethods.builder("rows") + .impl("org.apache.spark.sql.execution.CommandResultExec") + .build() + + private def doCommandResultExec(command: SparkPlan): Array[Array[Byte]] = { + val spark = SparkSession.active + // TODO: replace with `command.rows` once we drop Spark 3.1 support. + val rows = commandResultExecRowsMethod.invoke[Seq[InternalRow]](command) + command.longMetric("numOutputRows").add(rows.size) + sendDriverMetrics(spark.sparkContext, command.metrics) + KyuubiArrowConverters.toBatchIterator( + rows.iterator, + command.schema, + spark.sessionState.conf.arrowMaxRecordsPerBatch, + maxBatchSize, + -1, + spark.sessionState.conf.sessionLocalTimeZone).toArray + } + + private def doLocalTableScan(localTableScan: LocalTableScanExec): Array[Array[Byte]] = { + val spark = SparkSession.active + localTableScan.longMetric("numOutputRows").add(localTableScan.rows.size) + sendDriverMetrics(spark.sparkContext, localTableScan.metrics) + KyuubiArrowConverters.toBatchIterator( + localTableScan.rows.iterator, + localTableScan.schema, + spark.sessionState.conf.arrowMaxRecordsPerBatch, + maxBatchSize, + -1, + spark.sessionState.conf.sessionLocalTimeZone).toArray + } + /** - * Fork from Apache Spark-3.3.1 org.apache.spark.sql.catalyst.util.quoteIfNeeded to adapt to - * Spark-3.1.x + * This method provides a reflection-based implementation of + * [[AdaptiveSparkPlanExec.finalPhysicalPlan]] that enables us to adapt to the Spark runtime + * without patching SPARK-41914. + * + * TODO: Once we drop support for Spark 3.1.x, we can directly call + * [[AdaptiveSparkPlanExec.finalPhysicalPlan]]. */ - def quoteIfNeeded(part: String): String = { - if (part.matches("[a-zA-Z0-9_]+") && !part.matches("\\d+")) { - part - } else { - s"`${part.replace("`", "``")}`" + def finalPhysicalPlan(adaptiveSparkPlanExec: AdaptiveSparkPlanExec): SparkPlan = { + withFinalPlanUpdate(adaptiveSparkPlanExec, identity) + } + + /** + * A reflection-based implementation of [[AdaptiveSparkPlanExec.withFinalPlanUpdate]]. + */ + private def withFinalPlanUpdate[T]( + adaptiveSparkPlanExec: AdaptiveSparkPlanExec, + fun: SparkPlan => T): T = { + val plan = invokeAs[SparkPlan](adaptiveSparkPlanExec, "getFinalPhysicalPlan") + val result = fun(plan) + invokeAs[Unit](adaptiveSparkPlanExec, "finalPlanUpdate") + result + } + + /** + * offset support was add since Spark-3.4(set SPARK-28330), to ensure backward compatibility with + * earlier versions of Spark, this function uses reflective calls to the "offset". + */ + private def offset(collectLimitExec: CollectLimitExec): Int = { + Option( + DynMethods.builder("offset") + .impl(collectLimitExec.getClass) + .orNoop() + .build() + .invoke[Int](collectLimitExec)) + .getOrElse(0) + } + + private def isCommandResultExec(sparkPlan: SparkPlan): Boolean = { + // scalastyle:off line.size.limit + // the CommandResultExec was introduced in SPARK-35378 (Spark 3.2), after SPARK-35378 the + // physical plan of runnable command is CommandResultExec. + // for instance: + // ``` + // scala> spark.sql("show tables").queryExecution.executedPlan + // res0: org.apache.spark.sql.execution.SparkPlan = + // CommandResult , [namespace#0, tableName#1, isTemporary#2] + // +- ShowTables [namespace#0, tableName#1, isTemporary#2], V2SessionCatalog(spark_catalog), [default] + // + // scala > spark.sql("show tables").queryExecution.executedPlan.getClass + // res1: Class[_ <: org.apache.spark.sql.execution.SparkPlan] = class org.apache.spark.sql.execution.CommandResultExec + // ``` + // scalastyle:on line.size.limit + sparkPlan.getClass.getName == "org.apache.spark.sql.execution.CommandResultExec" + } + + /** + * refer to org.apache.spark.sql.Dataset#withAction(), assign a new execution id for arrow-based + * operation, so that we can track the arrow-based queries on the UI tab. + */ + private def withNewExecutionId[T](df: DataFrame)(body: => T): T = { + SQLExecution.withNewExecutionId(df.queryExecution, Some("collectAsArrow")) { + df.queryExecution.executedPlan.resetMetrics() + body } } + + private def sendDriverMetrics(sc: SparkContext, metrics: Map[String, SQLMetric]): Unit = { + val executionId = sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY) + SQLMetrics.postDriverMetricUpdates(sc, executionId, metrics.values.toSeq) + } } diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/ui/EnginePage.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/ui/EnginePage.scala index a2a2931f411..7188ac62f62 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/ui/EnginePage.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/ui/EnginePage.scala @@ -29,7 +29,7 @@ import org.apache.commons.text.StringEscapeUtils import org.apache.spark.ui.TableSourceUtil._ import org.apache.spark.ui.UIUtils._ -import org.apache.kyuubi.{KYUUBI_VERSION, Utils} +import org.apache.kyuubi._ import org.apache.kyuubi.engine.spark.events.{SessionEvent, SparkOperationEvent} case class EnginePage(parent: EngineTab) extends WebUIPage("") { @@ -58,6 +58,15 @@ case class EnginePage(parent: EngineTab) extends WebUIPage("") { Kyuubi Version: {KYUUBI_VERSION} +
    • + Compilation Revision: + {REVISION.substring(0, 7)} ({REVISION_TIME}), branch {BRANCH} +
    • +
    • + Compilation with: + Spark {SPARK_COMPILE_VERSION}, Scala {SCALA_COMPILE_VERSION}, + Hadoop {HADOOP_COMPILE_VERSION}, Hive {HIVE_COMPILE_VERSION} +
    • Started at: {new Date(parent.startTime)} @@ -292,7 +301,7 @@ case class EnginePage(parent: EngineTab) extends WebUIPage("") { {session.name} {formatDate(session.startTime)} {if (session.endTime > 0) formatDate(session.endTime)} - {formatDurationVerbose(session.duration)} + {formatDuration(session.duration)} {session.totalOperations} } @@ -386,7 +395,7 @@ private class StatementStatsPagedTable( {if (event.completeTime > 0) formatDate(event.completeTime)} - {formatDurationVerbose(event.duration)} + {formatDuration(event.duration)} diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/ui/EngineSessionPage.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/ui/EngineSessionPage.scala index 1f34ae64f12..cdfc6d31355 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/ui/EngineSessionPage.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/ui/EngineSessionPage.scala @@ -42,7 +42,7 @@ case class EngineSessionPage(parent: EngineTab) require(parameterId != null && parameterId.nonEmpty, "Missing id parameter") val content = store.synchronized { // make sure all parts in this page are consistent - val sessionStat = store.getSession(parameterId).getOrElse(null) + val sessionStat = store.getSession(parameterId).orNull require(sessionStat != null, "Invalid sessionID[" + parameterId + "]") val redactionPattern = parent.sparkUI match { @@ -51,7 +51,7 @@ case class EngineSessionPage(parent: EngineTab) } val sessionPropertiesTable = - if (sessionStat.conf != null && !sessionStat.conf.isEmpty) { + if (sessionStat.conf != null && sessionStat.conf.nonEmpty) { val table = UIUtils.listingTable( propertyHeader, propertyRow, @@ -78,8 +78,18 @@ case class EngineSessionPage(parent: EngineTab)

      User {sessionStat.username}, IP {sessionStat.ip}, - Server {sessionStat.serverIp}, + Server {sessionStat.serverIp} +

      ++ +

      Session created at {formatDate(sessionStat.startTime)}, + { + if (sessionStat.endTime > 0) { + s""" + | ended at ${formatDate(sessionStat.endTime)}, + | after ${formatDuration(sessionStat.duration)}. + |""".stripMargin + } + } Total run {sessionStat.totalOperations} SQL

      ++ sessionPropertiesTable ++ diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/ui/EngineTab.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/ui/EngineTab.scala index b7cebbd97eb..52edcf2200a 100644 --- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/ui/EngineTab.scala +++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/ui/EngineTab.scala @@ -26,7 +26,7 @@ import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.engine.spark.SparkSQLEngine import org.apache.kyuubi.engine.spark.events.EngineEventsStore import org.apache.kyuubi.service.ServiceState -import org.apache.kyuubi.util.ClassUtils +import org.apache.kyuubi.util.reflect.{DynClasses, DynMethods} /** * Note that [[SparkUITab]] is private for Spark @@ -62,31 +62,35 @@ case class EngineTab( sparkUI.foreach { ui => try { - // Spark shade the jetty package so here we use reflection - val sparkServletContextHandlerClz = loadSparkServletContextHandler - val attachHandlerMethod = Class.forName("org.apache.spark.ui.SparkUI") - .getMethod("attachHandler", sparkServletContextHandlerClz) - val createRedirectHandlerMethod = Class.forName("org.apache.spark.ui.JettyUtils") - .getMethod( - "createRedirectHandler", + // [KYUUBI #3627]: the official spark release uses the shaded and relocated jetty classes, + // but if we use sbt to build for testing, e.g. docker image, it still uses the vanilla + // jetty classes. + val sparkServletContextHandlerClz = DynClasses.builder() + .impl("org.sparkproject.jetty.servlet.ServletContextHandler") + .impl("org.eclipse.jetty.servlet.ServletContextHandler") + .buildChecked() + val attachHandlerMethod = DynMethods.builder("attachHandler") + .impl("org.apache.spark.ui.SparkUI", sparkServletContextHandlerClz) + .buildChecked(ui) + val createRedirectHandlerMethod = DynMethods.builder("createRedirectHandler") + .impl( + "org.apache.spark.ui.JettyUtils", classOf[String], classOf[String], - classOf[(HttpServletRequest) => Unit], + classOf[HttpServletRequest => Unit], classOf[String], classOf[Set[String]]) + .buildStaticChecked() attachHandlerMethod .invoke( - ui, createRedirectHandlerMethod - .invoke(null, "/kyuubi/stop", "/kyuubi", handleKillRequest _, "", Set("GET", "POST"))) + .invoke("/kyuubi/stop", "/kyuubi", handleKillRequest _, "", Set("GET", "POST"))) attachHandlerMethod .invoke( - ui, createRedirectHandlerMethod .invoke( - null, "/kyuubi/gracefulstop", "/kyuubi", handleGracefulKillRequest _, @@ -105,18 +109,6 @@ case class EngineTab( cause) } - private def loadSparkServletContextHandler: Class[_] = { - // [KYUUBI #3627]: the official spark release uses the shaded and relocated jetty classes, - // but if use sbt to build for testing, e.g. docker image, it still uses vanilla jetty classes. - val shaded = "org.sparkproject.jetty.servlet.ServletContextHandler" - val vanilla = "org.eclipse.jetty.servlet.ServletContextHandler" - if (ClassUtils.classIsLoadable(shaded)) { - Class.forName(shaded) - } else { - Class.forName(vanilla) - } - } - def handleKillRequest(request: HttpServletRequest): Unit = { if (killEnabled && engine.isDefined && engine.get.getServiceState != ServiceState.STOPPED) { engine.get.stop() diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/EtcdShareLevelSparkEngineSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/EtcdShareLevelSparkEngineSuite.scala index 46dc3b54c13..727b232e3f8 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/EtcdShareLevelSparkEngineSuite.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/EtcdShareLevelSparkEngineSuite.scala @@ -17,9 +17,7 @@ package org.apache.kyuubi.engine.spark -import org.apache.kyuubi.config.KyuubiConf.ENGINE_CHECK_INTERVAL -import org.apache.kyuubi.config.KyuubiConf.ENGINE_SHARE_LEVEL -import org.apache.kyuubi.config.KyuubiConf.ENGINE_SPARK_MAX_LIFETIME +import org.apache.kyuubi.config.KyuubiConf.{ENGINE_CHECK_INTERVAL, ENGINE_SHARE_LEVEL, ENGINE_SPARK_MAX_INITIAL_WAIT, ENGINE_SPARK_MAX_LIFETIME} import org.apache.kyuubi.engine.ShareLevel import org.apache.kyuubi.engine.ShareLevel.ShareLevel @@ -30,6 +28,7 @@ trait EtcdShareLevelSparkEngineSuite etcdConf ++ Map( ENGINE_SHARE_LEVEL.key -> shareLevel.toString, ENGINE_SPARK_MAX_LIFETIME.key -> "PT20s", + ENGINE_SPARK_MAX_INITIAL_WAIT.key -> "0", ENGINE_CHECK_INTERVAL.key -> "PT5s") } } diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala index af8c90cf29e..a07f7d78382 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala @@ -19,6 +19,9 @@ package org.apache.kyuubi.engine.spark import java.util.concurrent.Executors +import scala.concurrent.duration.SECONDS + +import org.apache.spark.KyuubiSparkContextHelper import org.apache.spark.scheduler.{SparkListener, SparkListenerJobEnd, SparkListenerJobStart} import org.scalatest.concurrent.PatienceConfiguration.Timeout import org.scalatest.time.SpanSugar.convertIntToGrainOfTime @@ -76,33 +79,36 @@ class SchedulerPoolSuite extends WithSparkSQLEngine with HiveJDBCTestHelper { eventually(Timeout(3.seconds)) { assert(job0Started) } - Seq(1, 0).foreach { priority => - threads.execute(() => { - priority match { - case 0 => - withJdbcStatement() { statement => - statement.execute("SET kyuubi.operation.scheduler.pool=p0") - statement.execute("SELECT java_method('java.lang.Thread', 'sleep', 1500l)" + - "FROM range(1, 3, 1, 2)") - } - - case 1 => - withJdbcStatement() { statement => - statement.execute("SET kyuubi.operation.scheduler.pool=p1") - statement.execute("SELECT java_method('java.lang.Thread', 'sleep', 1500l)" + - " FROM range(1, 3, 1, 2)") - } - } - }) + threads.execute(() => { + // job name job1 + withJdbcStatement() { statement => + statement.execute("SET kyuubi.operation.scheduler.pool=p1") + statement.execute("SELECT java_method('java.lang.Thread', 'sleep', 1500l)" + + " FROM range(1, 3, 1, 2)") + } + }) + // make sure job1 started before job2 + eventually(Timeout(2.seconds)) { + assert(job1StartTime > 0) } + + threads.execute(() => { + // job name job2 + withJdbcStatement() { statement => + statement.execute("SET kyuubi.operation.scheduler.pool=p0") + statement.execute("SELECT java_method('java.lang.Thread', 'sleep', 1500l)" + + "FROM range(1, 3, 1, 2)") + } + }) threads.shutdown() - eventually(Timeout(20.seconds)) { - // We can not ensure that job1 is started before job2 so here using abs. - assert(Math.abs(job1StartTime - job2StartTime) < 1000) - // Job1 minShare is 2(total resource) so that job2 should be allocated tasks after - // job1 finished. - assert(job2FinishTime - job1FinishTime >= 1000) - } + threads.awaitTermination(20, SECONDS) + // make sure the SparkListener has received the finished events for job1 and job2. + KyuubiSparkContextHelper.waitListenerBus(spark) + // job1 should be started before job2 + assert(job1StartTime < job2StartTime) + // job2 minShare is 2(total resource) so that job1 should be allocated tasks after + // job2 finished. + assert(job2FinishTime < job1FinishTime) } finally { spark.sparkContext.removeSparkListener(listener) } diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SparkEngineRegisterSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SparkEngineRegisterSuite.scala new file mode 100644 index 00000000000..8c636af7612 --- /dev/null +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SparkEngineRegisterSuite.scala @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.spark + +import java.util.UUID + +import org.apache.kyuubi.config.KyuubiReservedKeys.{KYUUBI_ENGINE_ID, KYUUBI_ENGINE_URL} + +trait SparkEngineRegisterSuite extends WithDiscoverySparkSQLEngine { + + override def withKyuubiConf: Map[String, String] = + super.withKyuubiConf ++ Map("spark.ui.enabled" -> "true") + + override val namespace: String = s"/kyuubi/deregister_test/${UUID.randomUUID.toString}" + + test("Spark Engine Register Zookeeper with spark ui info") { + withDiscoveryClient(client => { + val info = client.getChildren(namespace).head.split(";") + assert(info.exists(_.startsWith(KYUUBI_ENGINE_ID))) + assert(info.exists(_.startsWith(KYUUBI_ENGINE_URL))) + }) + } +} + +class ZookeeperSparkEngineRegisterSuite extends SparkEngineRegisterSuite + with WithEmbeddedZookeeper { + + override def withKyuubiConf: Map[String, String] = + super.withKyuubiConf ++ zookeeperConf +} + +class EtcdSparkEngineRegisterSuite extends SparkEngineRegisterSuite + with WithEtcdCluster { + override def withKyuubiConf: Map[String, String] = super.withKyuubiConf ++ etcdConf +} diff --git a/extensions/spark/kyuubi-spark-connector-kudu/src/test/scala/org/apache/kyuubi/spark/connector/kudu/KuduClientSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SparkTBinaryFrontendServiceSuite.scala similarity index 70% rename from extensions/spark/kyuubi-spark-connector-kudu/src/test/scala/org/apache/kyuubi/spark/connector/kudu/KuduClientSuite.scala rename to externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SparkTBinaryFrontendServiceSuite.scala index eebb4719cc2..5f81e51f825 100644 --- a/extensions/spark/kyuubi-spark-connector-kudu/src/test/scala/org/apache/kyuubi/spark/connector/kudu/KuduClientSuite.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SparkTBinaryFrontendServiceSuite.scala @@ -15,18 +15,15 @@ * limitations under the License. */ -package org.apache.kyuubi.spark.connector.kudu +package org.apache.kyuubi.engine.spark -import org.apache.kudu.client.KuduClient +import org.apache.hadoop.conf.Configuration import org.apache.kyuubi.KyuubiFunSuite -class KuduClientSuite extends KyuubiFunSuite with KuduMixin { - - test("kudu client") { - val builder = new KuduClient.KuduClientBuilder(kuduMasterUrl) - val kuduClient = builder.build() - - assert(kuduClient.findLeaderMasterServer().getPort === kuduMasterPort) +class SparkTBinaryFrontendServiceSuite extends KyuubiFunSuite { + test("new hive conf") { + val hiveConf = SparkTBinaryFrontendService.hiveConf(new Configuration()) + assert(hiveConf.getClass().getName == SparkTBinaryFrontendService.HIVE_CONF_CLASSNAME) } } diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/WithSparkSQLEngine.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/WithSparkSQLEngine.scala index 629a8374b12..3b98c2efb16 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/WithSparkSQLEngine.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/WithSparkSQLEngine.scala @@ -21,7 +21,7 @@ import org.apache.spark.sql.SparkSession import org.apache.kyuubi.{KyuubiFunSuite, Utils} import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.engine.spark.KyuubiSparkUtil.sparkMajorMinorVersion +import org.apache.kyuubi.engine.spark.KyuubiSparkUtil.SPARK_ENGINE_RUNTIME_VERSION trait WithSparkSQLEngine extends KyuubiFunSuite { protected var spark: SparkSession = _ @@ -34,14 +34,8 @@ trait WithSparkSQLEngine extends KyuubiFunSuite { // Affected by such configuration' default value // engine.initialize.sql='SHOW DATABASES' - protected var initJobId: Int = { - sparkMajorMinorVersion match { - case (3, minor) if minor >= 2 => 1 // SPARK-35378 - case (3, _) => 0 - case _ => - throw new IllegalArgumentException(s"Not Support spark version $sparkMajorMinorVersion") - } - } + // SPARK-35378 + protected lazy val initJobId: Int = if (SPARK_ENGINE_RUNTIME_VERSION >= "3.2") 1 else 0 override def beforeAll(): Unit = { startSparkEngine() diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/ZookeeperShareLevelSparkEngineSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/ZookeeperShareLevelSparkEngineSuite.scala index 4ef96e61a58..f24abb36c0e 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/ZookeeperShareLevelSparkEngineSuite.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/ZookeeperShareLevelSparkEngineSuite.scala @@ -19,6 +19,7 @@ package org.apache.kyuubi.engine.spark import org.apache.kyuubi.config.KyuubiConf.ENGINE_CHECK_INTERVAL import org.apache.kyuubi.config.KyuubiConf.ENGINE_SHARE_LEVEL +import org.apache.kyuubi.config.KyuubiConf.ENGINE_SPARK_MAX_INITIAL_WAIT import org.apache.kyuubi.config.KyuubiConf.ENGINE_SPARK_MAX_LIFETIME import org.apache.kyuubi.engine.ShareLevel import org.apache.kyuubi.engine.ShareLevel.ShareLevel @@ -30,6 +31,7 @@ trait ZookeeperShareLevelSparkEngineSuite zookeeperConf ++ Map( ENGINE_SHARE_LEVEL.key -> shareLevel.toString, ENGINE_SPARK_MAX_LIFETIME.key -> "PT20s", + ENGINE_SPARK_MAX_INITIAL_WAIT.key -> "0", ENGINE_CHECK_INTERVAL.key -> "PT5s") } } diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkArrowbasedOperationSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkArrowbasedOperationSuite.scala index ae6237bb59c..d3d4a56d783 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkArrowbasedOperationSuite.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkArrowbasedOperationSuite.scala @@ -17,19 +17,36 @@ package org.apache.kyuubi.engine.spark.operation +import java.lang.{Boolean => JBoolean} import java.sql.Statement +import java.util.{Locale, Set => JSet} -import org.apache.spark.KyuubiSparkContextHelper -import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project} -import org.apache.spark.sql.execution.QueryExecution +import org.apache.spark.{KyuubiSparkContextHelper, TaskContext} +import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart} +import org.apache.spark.sql.{QueryTest, Row, SparkSession} +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.plans.logical.Project +import org.apache.spark.sql.execution.{CollectLimitExec, LocalTableScanExec, QueryExecution, SparkPlan} +import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec +import org.apache.spark.sql.execution.exchange.Exchange +import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec} +import org.apache.spark.sql.execution.metric.SparkMetricsTestUtils +import org.apache.spark.sql.functions.col +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.kyuubi.SparkDatasetHelper +import org.apache.spark.sql.types.StructType import org.apache.spark.sql.util.QueryExecutionListener +import org.apache.kyuubi.KyuubiException import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.engine.spark.{SparkSQLEngine, WithSparkSQLEngine} import org.apache.kyuubi.engine.spark.session.SparkSessionImpl import org.apache.kyuubi.operation.SparkDataTypeTests +import org.apache.kyuubi.util.reflect.{DynFields, DynMethods} +import org.apache.kyuubi.util.reflect.ReflectUtils._ -class SparkArrowbasedOperationSuite extends WithSparkSQLEngine with SparkDataTypeTests { +class SparkArrowbasedOperationSuite extends WithSparkSQLEngine with SparkDataTypeTests + with SparkMetricsTestUtils { override protected def jdbcUrl: String = getJdbcUrl @@ -46,6 +63,16 @@ class SparkArrowbasedOperationSuite extends WithSparkSQLEngine with SparkDataTyp withJdbcStatement() { statement => checkResultSetFormat(statement, "arrow") } + spark.catalog.listTables() + .collect() + .foreach { table => + if (table.isTemporary) { + spark.catalog.dropTempView(table.name) + } else { + spark.sql(s"DROP TABLE IF EXISTS ${table.name}") + } + () + } } test("detect resultSet format") { @@ -92,52 +119,277 @@ class SparkArrowbasedOperationSuite extends WithSparkSQLEngine with SparkDataTyp } test("assign a new execution id for arrow-based result") { - var plan: LogicalPlan = null - - val listener = new QueryExecutionListener { - override def onSuccess(funcName: String, qe: QueryExecution, durationNs: Long): Unit = { - plan = qe.analyzed + val listener = new SQLMetricsListener + withJdbcStatement() { statement => + withSparkListener(listener) { + val result = statement.executeQuery("select 1 as c1") + assert(result.next()) + assert(result.getInt("c1") == 1) } - override def onFailure(funcName: String, qe: QueryExecution, exception: Exception): Unit = {} } + + assert(listener.queryExecution.analyzed.isInstanceOf[Project]) + } + + test("arrow-based query metrics") { + val listener = new SQLMetricsListener withJdbcStatement() { statement => - // since all the new sessions have their owner listener bus, we should register the listener - // in the current session. - registerListener(listener) + withSparkListener(listener) { + val result = statement.executeQuery("select 1 as c1") + assert(result.next()) + assert(result.getInt("c1") == 1) + } + } + + val metrics = listener.queryExecution.executedPlan.collectLeaves().head.metrics + assert(metrics.contains("numOutputRows")) + assert(metrics("numOutputRows").value === 1) + } + + test("SparkDatasetHelper.executeArrowBatchCollect should return expect row count") { + val returnSize = Seq( + 0, // spark optimizer guaranty the `limit != 0`, it's just for the sanity check + 7, // less than one partition + 10, // equal to one partition + 13, // between one and two partitions, run two jobs + 20, // equal to two partitions + 29, // between two and three partitions + 1000, // all partitions + 1001) // more than total row count + + def runAndCheck(sparkPlan: SparkPlan, expectSize: Int): Unit = { + val arrowBinary = SparkDatasetHelper.executeArrowBatchCollect(sparkPlan) + val rows = fromBatchIterator( + arrowBinary.iterator, + sparkPlan.schema, + "", + true, + KyuubiSparkContextHelper.dummyTaskContext()) + assert(rows.size == expectSize) + } + + val excludedRules = Seq( + "org.apache.spark.sql.catalyst.optimizer.EliminateLimits", + "org.apache.spark.sql.catalyst.optimizer.OptimizeLimitZero", + "org.apache.spark.sql.execution.adaptive.AQEPropagateEmptyRelation").mkString(",") + withSQLConf( + SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> excludedRules, + SQLConf.ADAPTIVE_OPTIMIZER_EXCLUDED_RULES.key -> excludedRules) { + // aqe + // outermost AdaptiveSparkPlanExec + spark.range(1000) + .repartitionByRange(100, col("id")) + .createOrReplaceTempView("t_1") + spark.sql("select * from t_1") + .foreachPartition { p: Iterator[Row] => + assert(p.length == 10) + () + } + returnSize.foreach { size => + val df = spark.sql(s"select * from t_1 limit $size") + val headPlan = df.queryExecution.executedPlan.collectLeaves().head + if (SPARK_ENGINE_RUNTIME_VERSION >= "3.2") { + assert(headPlan.isInstanceOf[AdaptiveSparkPlanExec]) + val finalPhysicalPlan = + SparkDatasetHelper.finalPhysicalPlan(headPlan.asInstanceOf[AdaptiveSparkPlanExec]) + assert(finalPhysicalPlan.isInstanceOf[CollectLimitExec]) + } + if (size > 1000) { + runAndCheck(df.queryExecution.executedPlan, 1000) + } else { + runAndCheck(df.queryExecution.executedPlan, size) + } + } - val result = statement.executeQuery("select 1 as c1") - assert(result.next()) - assert(result.getInt("c1") == 1) + // outermost CollectLimitExec + spark.range(0, 1000, 1, numPartitions = 100) + .createOrReplaceTempView("t_2") + spark.sql("select * from t_2") + .foreachPartition { p: Iterator[Row] => + assert(p.length == 10) + () + } + returnSize.foreach { size => + val df = spark.sql(s"select * from t_2 limit $size") + val plan = df.queryExecution.executedPlan + assert(plan.isInstanceOf[CollectLimitExec]) + if (size > 1000) { + runAndCheck(df.queryExecution.executedPlan, 1000) + } else { + runAndCheck(df.queryExecution.executedPlan, size) + } + } } - KyuubiSparkContextHelper.waitListenerBus(spark) - unregisterListener(listener) - assert(plan.isInstanceOf[Project]) } - test("arrow-based query metrics") { - var queryExecution: QueryExecution = null + test("aqe should work properly") { + + val s = spark + import s.implicits._ + + spark.sparkContext.parallelize( + (1 to 100).map(i => TestData(i, i.toString))).toDF() + .createOrReplaceTempView("testData") + spark.sparkContext.parallelize( + TestData2(1, 1) :: + TestData2(1, 2) :: + TestData2(2, 1) :: + TestData2(2, 2) :: + TestData2(3, 1) :: + TestData2(3, 2) :: Nil, + 2).toDF() + .createOrReplaceTempView("testData2") + + withSQLConf( + SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true", + SQLConf.SHUFFLE_PARTITIONS.key -> "5", + SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") { + val (plan, adaptivePlan) = runAdaptiveAndVerifyResult( + """ + |SELECT * FROM( + | SELECT * FROM testData join testData2 ON key = a where value = '1' + |) LIMIT 1 + |""".stripMargin) + val smj = plan.collect { case smj: SortMergeJoinExec => smj } + val bhj = adaptivePlan.collect { case bhj: BroadcastHashJoinExec => bhj } + assert(smj.size == 1) + assert(bhj.size == 1) + } + } + + test("result offset support") { + assume(SPARK_ENGINE_RUNTIME_VERSION >= "3.4") + var numStages = 0 + val listener = new SparkListener { + override def onJobStart(jobStart: SparkListenerJobStart): Unit = { + numStages = jobStart.stageInfos.length + } + } + withJdbcStatement() { statement => + withSparkListener(listener) { + withPartitionedTable("t_3") { + statement.executeQuery("select * from t_3 limit 10 offset 10") + } + } + } + // the extra shuffle be introduced if the `offset` > 0 + assert(numStages == 2) + } - val listener = new QueryExecutionListener { - override def onSuccess(funcName: String, qe: QueryExecution, durationNs: Long): Unit = { - queryExecution = qe + test("arrow serialization should not introduce extra shuffle for outermost limit") { + var numStages = 0 + val listener = new SparkListener { + override def onJobStart(jobStart: SparkListenerJobStart): Unit = { + numStages = jobStart.stageInfos.length } - override def onFailure(funcName: String, qe: QueryExecution, exception: Exception): Unit = {} } withJdbcStatement() { statement => - registerListener(listener) - val result = statement.executeQuery("select 1 as c1") - assert(result.next()) - assert(result.getInt("c1") == 1) + withSparkListener(listener) { + withPartitionedTable("t_3") { + statement.executeQuery("select * from t_3 limit 1000") + } + } + } + // Should be only one stage since there is no shuffle. + assert(numStages == 1) + } + + test("CommandResultExec should not trigger job") { + val listener = new JobCountListener + val l2 = new SQLMetricsListener + val nodeName = spark.sql("SHOW TABLES").queryExecution.executedPlan.getClass.getName + if (SPARK_ENGINE_RUNTIME_VERSION < "3.2") { + assert(nodeName == "org.apache.spark.sql.execution.command.ExecutedCommandExec") + } else { + assert(nodeName == "org.apache.spark.sql.execution.CommandResultExec") } + withJdbcStatement("table_1") { statement => + statement.executeQuery("CREATE TABLE table_1 (id bigint) USING parquet") + withSparkListener(listener) { + withSparkListener(l2) { + val resultSet = statement.executeQuery("SHOW TABLES") + assert(resultSet.next()) + assert(resultSet.getString("tableName") == "table_1") + } + } + } + + if (SPARK_ENGINE_RUNTIME_VERSION < "3.2") { + // Note that before Spark 3.2, a LocalTableScan SparkPlan will be submitted, and the issue of + // preventing LocalTableScan from triggering a job submission was addressed in [KYUUBI #4710]. + assert(l2.queryExecution.executedPlan.getClass.getName == + "org.apache.spark.sql.execution.LocalTableScanExec") + } else { + assert(l2.queryExecution.executedPlan.getClass.getName == + "org.apache.spark.sql.execution.CommandResultExec") + } + assert(listener.numJobs == 0) + } + + test("LocalTableScanExec should not trigger job") { + val listener = new JobCountListener + withJdbcStatement("view_1") { statement => + withSparkListener(listener) { + withAllSessions { s => + import s.implicits._ + Seq((1, "a")).toDF("c1", "c2").createOrReplaceTempView("view_1") + val plan = s.sql("select * from view_1").queryExecution.executedPlan + assert(plan.isInstanceOf[LocalTableScanExec]) + } + val resultSet = statement.executeQuery("select * from view_1") + assert(resultSet.next()) + assert(!resultSet.next()) + } + } + assert(listener.numJobs == 0) + } - KyuubiSparkContextHelper.waitListenerBus(spark) - unregisterListener(listener) + test("LocalTableScanExec metrics") { + val listener = new SQLMetricsListener + withJdbcStatement("view_1") { statement => + withSparkListener(listener) { + withAllSessions { s => + import s.implicits._ + Seq((1, "a")).toDF("c1", "c2").createOrReplaceTempView("view_1") + } + val result = statement.executeQuery("select * from view_1") + assert(result.next()) + assert(!result.next()) + } + } - val metrics = queryExecution.executedPlan.collectLeaves().head.metrics + val metrics = listener.queryExecution.executedPlan.collectLeaves().head.metrics assert(metrics.contains("numOutputRows")) assert(metrics("numOutputRows").value === 1) } + test("post LocalTableScanExec driver-side metrics") { + val expectedMetrics = Map( + 0L -> (("LocalTableScan", Map("number of output rows" -> "2")))) + withTables("view_1") { + val s = spark + import s.implicits._ + Seq((1, "a"), (2, "b")).toDF("c1", "c2").createOrReplaceTempView("view_1") + val df = spark.sql("SELECT * FROM view_1") + val metrics = getSparkPlanMetrics(df) + assert(metrics == expectedMetrics) + } + } + + test("post CommandResultExec driver-side metrics") { + spark.sql("show tables").show(truncate = false) + assume(SPARK_ENGINE_RUNTIME_VERSION >= "3.2") + val expectedMetrics = Map( + 0L -> (("CommandResult", Map("number of output rows" -> "2")))) + withTables("table_1", "table_2") { + spark.sql("CREATE TABLE table_1 (id bigint) USING parquet") + spark.sql("CREATE TABLE table_2 (id bigint) USING parquet") + val df = spark.sql("SHOW TABLES") + val metrics = getSparkPlanMetrics(df) + assert(metrics == expectedMetrics) + } + } + private def checkResultSetFormat(statement: Statement, expectFormat: String): Unit = { val query = s""" @@ -160,21 +412,184 @@ class SparkArrowbasedOperationSuite extends WithSparkSQLEngine with SparkDataTyp assert(resultSet.getString("col") === expect) } - private def registerListener(listener: QueryExecutionListener): Unit = { - // since all the new sessions have their owner listener bus, we should register the listener - // in the current session. - SparkSQLEngine.currentEngine.get - .backendService - .sessionManager - .allSessions() - .foreach(_.asInstanceOf[SparkSessionImpl].spark.listenerManager.register(listener)) + // since all the new sessions have their owner listener bus, we should register the listener + // in the current session. + private def withSparkListener[T](listener: QueryExecutionListener)(body: => T): T = { + withAllSessions(s => s.listenerManager.register(listener)) + try { + val result = body + KyuubiSparkContextHelper.waitListenerBus(spark) + result + } finally { + withAllSessions(s => s.listenerManager.unregister(listener)) + } + } + + // since all the new sessions have their owner listener bus, we should register the listener + // in the current session. + private def withSparkListener[T](listener: SparkListener)(body: => T): T = { + withAllSessions(s => s.sparkContext.addSparkListener(listener)) + try { + val result = body + KyuubiSparkContextHelper.waitListenerBus(spark) + result + } finally { + withAllSessions(s => s.sparkContext.removeSparkListener(listener)) + } + } + + private def withPartitionedTable[T](viewName: String)(body: => T): T = { + withAllSessions { spark => + spark.range(0, 1000, 1, numPartitions = 100) + .createOrReplaceTempView(viewName) + } + try { + body + } finally { + withAllSessions { spark => + spark.sql(s"DROP VIEW IF EXISTS $viewName") + } + } } - private def unregisterListener(listener: QueryExecutionListener): Unit = { + private def withAllSessions(op: SparkSession => Unit): Unit = { SparkSQLEngine.currentEngine.get .backendService .sessionManager .allSessions() - .foreach(_.asInstanceOf[SparkSessionImpl].spark.listenerManager.unregister(listener)) + .map(_.asInstanceOf[SparkSessionImpl].spark) + .foreach(op(_)) + } + + private def runAdaptiveAndVerifyResult(query: String): (SparkPlan, SparkPlan) = { + val dfAdaptive = spark.sql(query) + val planBefore = dfAdaptive.queryExecution.executedPlan + val result = dfAdaptive.collect() + withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { + val df = spark.sql(query) + QueryTest.checkAnswer(df, df.collect().toSeq) + } + val planAfter = dfAdaptive.queryExecution.executedPlan + val adaptivePlan = planAfter.asInstanceOf[AdaptiveSparkPlanExec].executedPlan + val exchanges = adaptivePlan.collect { + case e: Exchange => e + } + assert(exchanges.isEmpty, "The final plan should not contain any Exchange node.") + (dfAdaptive.queryExecution.sparkPlan, adaptivePlan) + } + + /** + * Sets all SQL configurations specified in `pairs`, calls `f`, and then restores all SQL + * configurations. + */ + protected def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = { + val conf = SQLConf.get + val (keys, values) = pairs.unzip + val currentValues = keys.map { key => + if (conf.contains(key)) { + Some(conf.getConfString(key)) + } else { + None + } + } + (keys, values).zipped.foreach { (k, v) => + if (isStaticConfigKey(k)) { + throw new KyuubiException(s"Cannot modify the value of a static config: $k") + } + conf.setConfString(k, v) + } + try f + finally { + keys.zip(currentValues).foreach { + case (key, Some(value)) => conf.setConfString(key, value) + case (key, None) => conf.unsetConf(key) + } + } + } + + private def withTables[T](tableNames: String*)(f: => T): T = { + try { + f + } finally { + tableNames.foreach { name => + if (name.toUpperCase(Locale.ROOT).startsWith("VIEW")) { + spark.sql(s"DROP VIEW IF EXISTS $name") + } else { + spark.sql(s"DROP TABLE IF EXISTS $name") + } + } + } + } + + /** + * This method provides a reflection-based implementation of [[SQLConf.isStaticConfigKey]] to + * adapt Spark-3.1.x + * + * TODO: Once we drop support for Spark 3.1.x, we can directly call + * [[SQLConf.isStaticConfigKey()]]. + */ + private def isStaticConfigKey(key: String): Boolean = + getField[JSet[String]]((SQLConf.getClass, SQLConf), "staticConfKeys").contains(key) + + // the signature of function [[ArrowConverters.fromBatchIterator]] is changed in SPARK-43528 + // (since Spark 3.5) + private lazy val fromBatchIteratorMethod = DynMethods.builder("fromBatchIterator") + .hiddenImpl( // for Spark 3.4 or previous + "org.apache.spark.sql.execution.arrow.ArrowConverters$", + classOf[Iterator[Array[Byte]]], + classOf[StructType], + classOf[String], + classOf[TaskContext]) + .hiddenImpl( // for Spark 3.5 or later + "org.apache.spark.sql.execution.arrow.ArrowConverters$", + classOf[Iterator[Array[Byte]]], + classOf[StructType], + classOf[String], + classOf[Boolean], + classOf[TaskContext]) + .build() + + def fromBatchIterator( + arrowBatchIter: Iterator[Array[Byte]], + schema: StructType, + timeZoneId: String, + errorOnDuplicatedFieldNames: JBoolean, + context: TaskContext): Iterator[InternalRow] = { + val className = "org.apache.spark.sql.execution.arrow.ArrowConverters$" + val instance = DynFields.builder().impl(className, "MODULE$").build[Object]().get(null) + if (SPARK_ENGINE_RUNTIME_VERSION >= "3.5") { + fromBatchIteratorMethod.invoke[Iterator[InternalRow]]( + instance, + arrowBatchIter, + schema, + timeZoneId, + errorOnDuplicatedFieldNames, + context) + } else { + fromBatchIteratorMethod.invoke[Iterator[InternalRow]]( + instance, + arrowBatchIter, + schema, + timeZoneId, + context) + } + } + + class JobCountListener extends SparkListener { + var numJobs = 0 + override def onJobStart(jobStart: SparkListenerJobStart): Unit = { + numJobs += 1 + } + } + + class SQLMetricsListener extends QueryExecutionListener { + var queryExecution: QueryExecution = _ + override def onSuccess(funcName: String, qe: QueryExecution, durationNs: Long): Unit = { + queryExecution = qe + } + override def onFailure(funcName: String, qe: QueryExecution, exception: Exception): Unit = {} } } + +case class TestData(key: Int, value: String) +case class TestData2(a: Int, b: Int) diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkCatalogDatabaseOperationSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkCatalogDatabaseOperationSuite.scala index 46208bff1e5..5ee01bda16e 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkCatalogDatabaseOperationSuite.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkCatalogDatabaseOperationSuite.scala @@ -22,7 +22,7 @@ import org.apache.spark.sql.util.CaseInsensitiveStringMap import org.apache.kyuubi.config.KyuubiConf.ENGINE_OPERATION_CONVERT_CATALOG_DATABASE_ENABLED import org.apache.kyuubi.engine.spark.WithSparkSQLEngine -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils import org.apache.kyuubi.operation.HiveJDBCTestHelper class SparkCatalogDatabaseOperationSuite extends WithSparkSQLEngine with HiveJDBCTestHelper { @@ -37,7 +37,7 @@ class SparkCatalogDatabaseOperationSuite extends WithSparkSQLEngine with HiveJDB test("set/get current catalog") { withJdbcStatement() { statement => val catalog = statement.getConnection.getCatalog - assert(catalog == SparkCatalogShim.SESSION_CATALOG) + assert(catalog == SparkCatalogUtils.SESSION_CATALOG) statement.getConnection.setCatalog("dummy") val changedCatalog = statement.getConnection.getCatalog assert(changedCatalog == "dummy") @@ -61,7 +61,7 @@ class DummyCatalog extends CatalogPlugin { _name = name } - private var _name: String = null + private var _name: String = _ override def name(): String = _name diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkOperationSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkOperationSuite.scala index af514ceb3c0..adab0231d63 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkOperationSuite.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkOperationSuite.scala @@ -32,13 +32,14 @@ import org.apache.spark.sql.catalyst.analysis.FunctionRegistry import org.apache.spark.sql.types._ import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.engine.SemanticVersion import org.apache.kyuubi.engine.spark.WithSparkSQLEngine import org.apache.kyuubi.engine.spark.schema.SchemaHelper.TIMESTAMP_NTZ -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils +import org.apache.kyuubi.jdbc.hive.KyuubiStatement import org.apache.kyuubi.operation.{HiveMetadataTests, SparkQueryTests} import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._ import org.apache.kyuubi.util.KyuubiHadoopUtils +import org.apache.kyuubi.util.SemanticVersion class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with SparkQueryTests { @@ -49,7 +50,7 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with withJdbcStatement() { statement => val meta = statement.getConnection.getMetaData val types = meta.getTableTypes - val expected = SparkCatalogShim.sparkTableTypes.toIterator + val expected = SparkCatalogUtils.sparkTableTypes.toIterator while (types.next()) { assert(types.getString(TABLE_TYPE) === expected.next()) } @@ -143,7 +144,7 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with var pos = 0 while (rowSet.next()) { - assert(rowSet.getString(TABLE_CAT) === SparkCatalogShim.SESSION_CATALOG) + assert(rowSet.getString(TABLE_CAT) === SparkCatalogUtils.SESSION_CATALOG) assert(rowSet.getString(TABLE_SCHEM) === defaultSchema) assert(rowSet.getString(TABLE_NAME) === tableName) assert(rowSet.getString(COLUMN_NAME) === schema(pos).name) @@ -201,7 +202,7 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with val data = statement.getConnection.getMetaData val rowSet = data.getColumns("", "global_temp", viewName, null) while (rowSet.next()) { - assert(rowSet.getString(TABLE_CAT) === SparkCatalogShim.SESSION_CATALOG) + assert(rowSet.getString(TABLE_CAT) === SparkCatalogUtils.SESSION_CATALOG) assert(rowSet.getString(TABLE_SCHEM) === "global_temp") assert(rowSet.getString(TABLE_NAME) === viewName) assert(rowSet.getString(COLUMN_NAME) === "i") @@ -228,7 +229,7 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with val data = statement.getConnection.getMetaData val rowSet = data.getColumns("", "global_temp", viewName, "n") while (rowSet.next()) { - assert(rowSet.getString(TABLE_CAT) === SparkCatalogShim.SESSION_CATALOG) + assert(rowSet.getString(TABLE_CAT) === SparkCatalogUtils.SESSION_CATALOG) assert(rowSet.getString(TABLE_SCHEM) === "global_temp") assert(rowSet.getString(TABLE_NAME) === viewName) assert(rowSet.getString(COLUMN_NAME) === "n") @@ -306,28 +307,28 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with val tFetchResultsReq1 = new TFetchResultsReq(opHandle, TFetchOrientation.FETCH_NEXT, 1) val tFetchResultsResp1 = client.FetchResults(tFetchResultsReq1) assert(tFetchResultsResp1.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) - val idSeq1 = tFetchResultsResp1.getResults.getColumns.get(0).getI64Val.getValues.asScala.toSeq + val idSeq1 = tFetchResultsResp1.getResults.getColumns.get(0).getI64Val.getValues.asScala assertResult(Seq(0L))(idSeq1) // fetch next from first row val tFetchResultsReq2 = new TFetchResultsReq(opHandle, TFetchOrientation.FETCH_NEXT, 1) val tFetchResultsResp2 = client.FetchResults(tFetchResultsReq2) assert(tFetchResultsResp2.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) - val idSeq2 = tFetchResultsResp2.getResults.getColumns.get(0).getI64Val.getValues.asScala.toSeq + val idSeq2 = tFetchResultsResp2.getResults.getColumns.get(0).getI64Val.getValues.asScala assertResult(Seq(1L))(idSeq2) // fetch prior from second row, expected got first row val tFetchResultsReq3 = new TFetchResultsReq(opHandle, TFetchOrientation.FETCH_PRIOR, 1) val tFetchResultsResp3 = client.FetchResults(tFetchResultsReq3) assert(tFetchResultsResp3.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) - val idSeq3 = tFetchResultsResp3.getResults.getColumns.get(0).getI64Val.getValues.asScala.toSeq + val idSeq3 = tFetchResultsResp3.getResults.getColumns.get(0).getI64Val.getValues.asScala assertResult(Seq(0L))(idSeq3) // fetch first val tFetchResultsReq4 = new TFetchResultsReq(opHandle, TFetchOrientation.FETCH_FIRST, 3) val tFetchResultsResp4 = client.FetchResults(tFetchResultsReq4) assert(tFetchResultsResp4.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) - val idSeq4 = tFetchResultsResp4.getResults.getColumns.get(0).getI64Val.getValues.asScala.toSeq + val idSeq4 = tFetchResultsResp4.getResults.getColumns.get(0).getI64Val.getValues.asScala assertResult(Seq(0L, 1L))(idSeq4) } } @@ -349,7 +350,7 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with val tFetchResultsResp1 = client.FetchResults(tFetchResultsReq1) assert(tFetchResultsResp1.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) val idSeq1 = tFetchResultsResp1.getResults.getColumns.get(0) - .getI64Val.getValues.asScala.toSeq + .getI64Val.getValues.asScala assertResult(Seq(0L))(idSeq1) // fetch next from first row @@ -357,7 +358,7 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with val tFetchResultsResp2 = client.FetchResults(tFetchResultsReq2) assert(tFetchResultsResp2.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) val idSeq2 = tFetchResultsResp2.getResults.getColumns.get(0) - .getI64Val.getValues.asScala.toSeq + .getI64Val.getValues.asScala assertResult(Seq(1L))(idSeq2) // fetch prior from second row, expected got first row @@ -365,7 +366,7 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with val tFetchResultsResp3 = client.FetchResults(tFetchResultsReq3) assert(tFetchResultsResp3.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) val idSeq3 = tFetchResultsResp3.getResults.getColumns.get(0) - .getI64Val.getValues.asScala.toSeq + .getI64Val.getValues.asScala assertResult(Seq(0L))(idSeq3) // fetch first @@ -373,7 +374,7 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with val tFetchResultsResp4 = client.FetchResults(tFetchResultsReq4) assert(tFetchResultsResp4.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) val idSeq4 = tFetchResultsResp4.getResults.getColumns.get(0) - .getI64Val.getValues.asScala.toSeq + .getI64Val.getValues.asScala assertResult(Seq(0L, 1L))(idSeq4) } } @@ -728,6 +729,14 @@ class SparkOperationSuite extends WithSparkSQLEngine with HiveMetadataTests with } } + test("KYUUBI #5030: Support get query id in Spark engine") { + withJdbcStatement() { stmt => + stmt.executeQuery("SELECT 1") + val queryId = stmt.asInstanceOf[KyuubiStatement].getQueryId + assert(queryId != null && queryId.nonEmpty) + } + } + private def whenMetaStoreURIsSetTo(uris: String)(func: String => Unit): Unit = { val conf = spark.sparkContext.hadoopConfiguration val origin = conf.get("hive.metastore.uris", "") diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/session/SessionSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/session/SessionSuite.scala index 5e0b6c28e0f..b89c560b30c 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/session/SessionSuite.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/session/SessionSuite.scala @@ -27,7 +27,9 @@ import org.apache.kyuubi.service.ServiceState._ class SessionSuite extends WithSparkSQLEngine with HiveJDBCTestHelper { override def withKyuubiConf: Map[String, String] = { - Map(ENGINE_SHARE_LEVEL.key -> "CONNECTION") + Map( + ENGINE_SHARE_LEVEL.key -> "CONNECTION", + ENGINE_SPARK_MAX_INITIAL_WAIT.key -> "0") } override protected def beforeEach(): Unit = { diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/udf/KyuubiDefinedFunctionSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/udf/KyuubiDefinedFunctionSuite.scala index f355e1e6b51..7a3f8c94071 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/udf/KyuubiDefinedFunctionSuite.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/udf/KyuubiDefinedFunctionSuite.scala @@ -19,24 +19,23 @@ package org.apache.kyuubi.engine.spark.udf import java.nio.file.Paths -import org.apache.kyuubi.{KyuubiFunSuite, MarkdownBuilder, MarkdownUtils, Utils} +import org.apache.kyuubi.{KyuubiFunSuite, MarkdownBuilder, Utils} +import org.apache.kyuubi.util.GoldenFileUtils._ -// scalastyle:off line.size.limit /** * End-to-end test cases for configuration doc file - * The golden result file is "docs/sql/functions.md". + * The golden result file is "docs/extensions/engines/spark/functions.md". * * To run the entire test suite: * {{{ - * build/mvn clean test -pl externals/kyuubi-spark-sql-engine -am -Pflink-provided,spark-provided,hive-provided -DwildcardSuites=org.apache.kyuubi.engine.spark.udf.KyuubiDefinedFunctionSuite + * KYUUBI_UPDATE=0 dev/gen/gen_spark_kdf_docs.sh * }}} * * To re-generate golden files for entire suite, run: * {{{ - * KYUUBI_UPDATE=1 build/mvn clean test -pl externals/kyuubi-spark-sql-engine -am -Pflink-provided,spark-provided,hive-provided -DwildcardSuites=org.apache.kyuubi.engine.spark.udf.KyuubiDefinedFunctionSuite + * dev/gen/gen_spark_kdf_docs.sh * }}} */ -// scalastyle:on line.size.limit class KyuubiDefinedFunctionSuite extends KyuubiFunSuite { private val kyuubiHome: String = Utils.getCodeSourceLocation(getClass) @@ -48,24 +47,18 @@ class KyuubiDefinedFunctionSuite extends KyuubiFunSuite { test("verify or update kyuubi spark sql functions") { val builder = MarkdownBuilder(licenced = true, getClass.getName) - builder - .line("# Auxiliary SQL Functions") - .line("""Kyuubi provides several auxiliary SQL functions as supplement to Spark's + builder += "# Auxiliary SQL Functions" += + """Kyuubi provides several auxiliary SQL functions as supplement to Spark's | [Built-in Functions](https://spark.apache.org/docs/latest/api/sql/index.html# - |built-in-functions)""") - .lines(""" + |built-in-functions)""" ++= + """ | Name | Description | Return Type | Since | --- | --- | --- | --- - | - |""") + |""" KDFRegistry.registeredFunctions.foreach { func => - builder.line(s"${func.name} | ${func.description} | ${func.returnType} | ${func.since}") + builder += s"${func.name} | ${func.description} | ${func.returnType} | ${func.since}" } - MarkdownUtils.verifyOutput( - markdown, - builder, - getClass.getCanonicalName, - "externals/kyuubi-spark-sql-engine") + verifyOrRegenerateGoldenFile(markdown, builder.toMarkdown, "dev/gen/gen_spark_kdf_docs.sh") } } diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/jdbc/KyuubiHiveDriverSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/jdbc/KyuubiHiveDriverSuite.scala index 4d3c754980d..ae68440df3e 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/jdbc/KyuubiHiveDriverSuite.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/jdbc/KyuubiHiveDriverSuite.scala @@ -22,7 +22,7 @@ import java.util.Properties import org.apache.kyuubi.IcebergSuiteMixin import org.apache.kyuubi.engine.spark.WithSparkSQLEngine -import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim +import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils import org.apache.kyuubi.jdbc.hive.{KyuubiConnection, KyuubiStatement} import org.apache.kyuubi.tags.IcebergTest @@ -47,15 +47,15 @@ class KyuubiHiveDriverSuite extends WithSparkSQLEngine with IcebergSuiteMixin { val metaData = connection.getMetaData assert(metaData.getClass.getName === "org.apache.kyuubi.jdbc.hive.KyuubiDatabaseMetaData") val statement = connection.createStatement() - val table1 = s"${SparkCatalogShim.SESSION_CATALOG}.default.kyuubi_hive_jdbc" + val table1 = s"${SparkCatalogUtils.SESSION_CATALOG}.default.kyuubi_hive_jdbc" val table2 = s"$catalog.default.hdp_cat_tbl" try { statement.execute(s"CREATE TABLE $table1(key int) USING parquet") statement.execute(s"CREATE TABLE $table2(key int) USING $format") - val resultSet1 = metaData.getTables(SparkCatalogShim.SESSION_CATALOG, "default", "%", null) + val resultSet1 = metaData.getTables(SparkCatalogUtils.SESSION_CATALOG, "default", "%", null) assert(resultSet1.next()) - assert(resultSet1.getString(1) === SparkCatalogShim.SESSION_CATALOG) + assert(resultSet1.getString(1) === SparkCatalogUtils.SESSION_CATALOG) assert(resultSet1.getString(2) === "default") assert(resultSet1.getString(3) === "kyuubi_hive_jdbc") diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/KyuubiSparkContextHelper.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/KyuubiSparkContextHelper.scala index 8293123ead7..1b662eadf96 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/KyuubiSparkContextHelper.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/KyuubiSparkContextHelper.scala @@ -27,4 +27,6 @@ object KyuubiSparkContextHelper { def waitListenerBus(spark: SparkSession): Unit = { spark.sparkContext.listenerBus.waitUntilEmpty() } + + def dummyTaskContext(): TaskContextImpl = TaskContext.empty() } diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/kyuubi/SparkSQLEngineDeregisterSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/kyuubi/SparkSQLEngineDeregisterSuite.scala index 8dc93759b93..4dddcd4eef3 100644 --- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/kyuubi/SparkSQLEngineDeregisterSuite.scala +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/kyuubi/SparkSQLEngineDeregisterSuite.scala @@ -24,9 +24,8 @@ import org.apache.spark.sql.internal.SQLConf.ANSI_ENABLED import org.scalatest.time.SpanSugar.convertIntToGrainOfTime import org.apache.kyuubi.config.KyuubiConf._ -import org.apache.kyuubi.engine.spark.KyuubiSparkUtil.sparkMajorMinorVersion -import org.apache.kyuubi.engine.spark.WithDiscoverySparkSQLEngine -import org.apache.kyuubi.engine.spark.WithEmbeddedZookeeper +import org.apache.kyuubi.engine.spark.{WithDiscoverySparkSQLEngine, WithEmbeddedZookeeper} +import org.apache.kyuubi.engine.spark.KyuubiSparkUtil.SPARK_ENGINE_RUNTIME_VERSION import org.apache.kyuubi.service.ServiceState abstract class SparkSQLEngineDeregisterSuite @@ -61,10 +60,11 @@ abstract class SparkSQLEngineDeregisterSuite class SparkSQLEngineDeregisterExceptionSuite extends SparkSQLEngineDeregisterSuite { override def withKyuubiConf: Map[String, String] = { super.withKyuubiConf ++ Map(ENGINE_DEREGISTER_EXCEPTION_CLASSES.key -> { - sparkMajorMinorVersion match { + if (SPARK_ENGINE_RUNTIME_VERSION >= "3.3") { // see https://issues.apache.org/jira/browse/SPARK-35958 - case (3, minor) if minor > 2 => "org.apache.spark.SparkArithmeticException" - case _ => classOf[ArithmeticException].getCanonicalName + "org.apache.spark.SparkArithmeticException" + } else { + classOf[ArithmeticException].getCanonicalName } }) @@ -94,10 +94,11 @@ class SparkSQLEngineDeregisterExceptionTTLSuite zookeeperConf ++ Map( ANSI_ENABLED.key -> "true", ENGINE_DEREGISTER_EXCEPTION_CLASSES.key -> { - sparkMajorMinorVersion match { + if (SPARK_ENGINE_RUNTIME_VERSION >= "3.3") { // see https://issues.apache.org/jira/browse/SPARK-35958 - case (3, minor) if minor > 2 => "org.apache.spark.SparkArithmeticException" - case _ => classOf[ArithmeticException].getCanonicalName + "org.apache.spark.SparkArithmeticException" + } else { + classOf[ArithmeticException].getCanonicalName } }, ENGINE_DEREGISTER_JOB_MAX_FAILURES.key -> maxJobFailures.toString, diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/sql/execution/metric/SparkMetricsTestUtils.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/sql/execution/metric/SparkMetricsTestUtils.scala new file mode 100644 index 00000000000..7ab06f0ef18 --- /dev/null +++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/sql/execution/metric/SparkMetricsTestUtils.scala @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.metric + +import org.apache.spark.sql.DataFrame +import org.apache.spark.sql.execution.SparkPlanInfo +import org.apache.spark.sql.execution.ui.SparkPlanGraph +import org.apache.spark.sql.kyuubi.SparkDatasetHelper + +import org.apache.kyuubi.engine.spark.WithSparkSQLEngine + +trait SparkMetricsTestUtils { + this: WithSparkSQLEngine => + + private lazy val statusStore = spark.sharedState.statusStore + private def currentExecutionIds(): Set[Long] = { + spark.sparkContext.listenerBus.waitUntilEmpty(10000) + statusStore.executionsList.map(_.executionId).toSet + } + + protected def getSparkPlanMetrics(df: DataFrame): Map[Long, (String, Map[String, Any])] = { + val previousExecutionIds = currentExecutionIds() + SparkDatasetHelper.executeCollect(df) + spark.sparkContext.listenerBus.waitUntilEmpty(10000) + val executionIds = currentExecutionIds().diff(previousExecutionIds) + assert(executionIds.size === 1) + val executionId = executionIds.head + val metricValues = statusStore.executionMetrics(executionId) + SparkPlanGraph(SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan)).allNodes + .map { node => + val nodeMetrics = node.metrics.map { metric => + val metricValue = metricValues(metric.accumulatorId) + (metric.name, metricValue) + }.toMap + (node.id, node.name -> nodeMetrics) + }.toMap + } +} diff --git a/externals/kyuubi-trino-engine/pom.xml b/externals/kyuubi-trino-engine/pom.xml index 7aea8f33a6f..7d91e4a864f 100644 --- a/externals/kyuubi-trino-engine/pom.xml +++ b/externals/kyuubi-trino-engine/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../../pom.xml - kyuubi-trino-engine_2.12 + kyuubi-trino-engine_${scala.binary.version} jar Kyuubi Project Engine Trino https://kyuubi.apache.org/ diff --git a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/ExecuteStatement.scala b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/ExecuteStatement.scala index eb1b273007d..3e7cce80cdf 100644 --- a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/ExecuteStatement.scala +++ b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/ExecuteStatement.scala @@ -19,7 +19,7 @@ package org.apache.kyuubi.engine.trino.operation import java.util.concurrent.RejectedExecutionException -import org.apache.hive.service.rpc.thrift.TRowSet +import org.apache.hive.service.rpc.thrift.TFetchResultsResp import org.apache.kyuubi.{KyuubiSQLException, Logging} import org.apache.kyuubi.engine.trino.TrinoStatement @@ -82,7 +82,9 @@ class ExecuteStatement( } } - override def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet = { + override def getNextRowSetInternal( + order: FetchOrientation, + rowSetSize: Int): TFetchResultsResp = { validateDefaultFetchOrientation(order) assertState(OperationState.FINISHED) setHasResultSet(true) @@ -97,7 +99,10 @@ class ExecuteStatement( val taken = iter.take(rowSetSize) val resultRowSet = RowSet.toTRowSet(taken.toList, schema, getProtocolVersion) resultRowSet.setStartRowOffset(iter.getPosition) - resultRowSet + val fetchResultsResp = new TFetchResultsResp(OK_STATUS) + fetchResultsResp.setResults(resultRowSet) + fetchResultsResp.setHasMoreRows(false) + fetchResultsResp } private def executeStatement(trinoStatement: TrinoStatement): Unit = { diff --git a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentCatalog.scala b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentCatalog.scala index 3d8c7fd6c5b..504a53a4149 100644 --- a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentCatalog.scala +++ b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentCatalog.scala @@ -23,11 +23,16 @@ import io.trino.client.ClientStandardTypes.VARCHAR import io.trino.client.ClientTypeSignature.VARCHAR_UNBOUNDED_LENGTH import org.apache.kyuubi.operation.IterableFetchIterator +import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.session.Session class GetCurrentCatalog(session: Session) extends TrinoOperation(session) { + private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) + + override def getOperationLog: Option[OperationLog] = Option(operationLog) + override protected def runInternal(): Unit = { try { val session = trinoContext.clientSession.get diff --git a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentDatabase.scala b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentDatabase.scala index 3bf2987b46a..3ab598ef09e 100644 --- a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentDatabase.scala +++ b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentDatabase.scala @@ -23,11 +23,16 @@ import io.trino.client.ClientStandardTypes.VARCHAR import io.trino.client.ClientTypeSignature.VARCHAR_UNBOUNDED_LENGTH import org.apache.kyuubi.operation.IterableFetchIterator +import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.session.Session class GetCurrentDatabase(session: Session) extends TrinoOperation(session) { + private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) + + override def getOperationLog: Option[OperationLog] = Option(operationLog) + override protected def runInternal(): Unit = { try { val session = trinoContext.clientSession.get diff --git a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentCatalog.scala b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentCatalog.scala index 09ba4262f70..16836b0a97d 100644 --- a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentCatalog.scala +++ b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentCatalog.scala @@ -19,11 +19,16 @@ package org.apache.kyuubi.engine.trino.operation import io.trino.client.ClientSession +import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.session.Session class SetCurrentCatalog(session: Session, catalog: String) extends TrinoOperation(session) { + private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) + + override def getOperationLog: Option[OperationLog] = Option(operationLog) + override protected def runInternal(): Unit = { try { val session = trinoContext.clientSession.get diff --git a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentDatabase.scala b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentDatabase.scala index f25cc9e0c6d..aa4697f5f0e 100644 --- a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentDatabase.scala +++ b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentDatabase.scala @@ -19,11 +19,16 @@ package org.apache.kyuubi.engine.trino.operation import io.trino.client.ClientSession +import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.session.Session class SetCurrentDatabase(session: Session, database: String) extends TrinoOperation(session) { + private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle) + + override def getOperationLog: Option[OperationLog] = Option(operationLog) + override protected def runInternal(): Unit = { try { val session = trinoContext.clientSession.get diff --git a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/TrinoOperation.scala b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/TrinoOperation.scala index 6e40f65f290..11eaa1bc1d7 100644 --- a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/TrinoOperation.scala +++ b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/TrinoOperation.scala @@ -21,7 +21,7 @@ import java.io.IOException import io.trino.client.Column import io.trino.client.StatementClient -import org.apache.hive.service.rpc.thrift.{TGetResultSetMetadataResp, TRowSet} +import org.apache.hive.service.rpc.thrift.{TFetchResultsResp, TGetResultSetMetadataResp} import org.apache.kyuubi.KyuubiSQLException import org.apache.kyuubi.Utils @@ -54,7 +54,9 @@ abstract class TrinoOperation(session: Session) extends AbstractOperation(sessio resp } - override def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet = { + override def getNextRowSetInternal( + order: FetchOrientation, + rowSetSize: Int): TFetchResultsResp = { validateDefaultFetchOrientation(order) assertState(OperationState.FINISHED) setHasResultSet(true) @@ -66,7 +68,10 @@ abstract class TrinoOperation(session: Session) extends AbstractOperation(sessio val taken = iter.take(rowSetSize) val resultRowSet = RowSet.toTRowSet(taken.toList, schema, getProtocolVersion) resultRowSet.setStartRowOffset(iter.getPosition) - resultRowSet + val resp = new TFetchResultsResp(OK_STATUS) + resp.setResults(resultRowSet) + resp.setHasMoreRows(false) + resp } override protected def beforeRun(): Unit = { @@ -75,7 +80,7 @@ abstract class TrinoOperation(session: Session) extends AbstractOperation(sessio } override protected def afterRun(): Unit = { - state.synchronized { + withLockRequired { if (!isTerminalState(state)) { setState(OperationState.FINISHED) } @@ -108,7 +113,7 @@ abstract class TrinoOperation(session: Session) extends AbstractOperation(sessio // could be thrown. case e: Throwable => if (cancel && trino.isRunning) trino.cancelLeafStage() - state.synchronized { + withLockRequired { val errMsg = Utils.stringifyException(e) if (state == OperationState.TIMEOUT) { val ke = KyuubiSQLException(s"Timeout operating $opType: $errMsg") diff --git a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/session/TrinoSessionImpl.scala b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/session/TrinoSessionImpl.scala index 81f973b1b5e..362ee3ed06a 100644 --- a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/session/TrinoSessionImpl.scala +++ b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/session/TrinoSessionImpl.scala @@ -24,6 +24,7 @@ import java.util.concurrent.TimeUnit import io.airlift.units.Duration import io.trino.client.ClientSession +import io.trino.client.OkHttpUtil import okhttp3.OkHttpClient import org.apache.hive.service.rpc.thrift.{TGetInfoType, TGetInfoValue, TProtocolVersion} @@ -35,7 +36,7 @@ import org.apache.kyuubi.engine.trino.{TrinoConf, TrinoContext, TrinoStatement} import org.apache.kyuubi.engine.trino.event.TrinoSessionEvent import org.apache.kyuubi.events.EventBus import org.apache.kyuubi.operation.{Operation, OperationHandle} -import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager} +import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager, USE_CATALOG, USE_DATABASE} class TrinoSessionImpl( protocol: TProtocolVersion, @@ -46,50 +47,51 @@ class TrinoSessionImpl( sessionManager: SessionManager) extends AbstractSession(protocol, user, password, ipAddress, conf, sessionManager) { + val sessionConf: KyuubiConf = sessionManager.getConf + override val handle: SessionHandle = conf.get(KYUUBI_SESSION_HANDLE_KEY).map(SessionHandle.fromUUID).getOrElse(SessionHandle()) + private val username: String = sessionConf + .getOption(KyuubiReservedKeys.KYUUBI_SESSION_USER_KEY).getOrElse(currentUser) + var trinoContext: TrinoContext = _ private var clientSession: ClientSession = _ - private var catalogName: String = null - private var databaseName: String = null - + private var catalogName: String = _ + private var databaseName: String = _ private val sessionEvent = TrinoSessionEvent(this) override def open(): Unit = { - normalizedConf.foreach { - case ("use:catalog", catalog) => catalogName = catalog - case ("use:database", database) => databaseName = database - case _ => // do nothing + + val (useCatalogAndDatabaseConf, _) = normalizedConf.partition { case (k, _) => + Array(USE_CATALOG, USE_DATABASE).contains(k) } - val httpClient = new OkHttpClient.Builder().build() + useCatalogAndDatabaseConf.foreach { + case (USE_CATALOG, catalog) => catalogName = catalog + case (USE_DATABASE, database) => databaseName = database + } + if (catalogName == null) { + catalogName = sessionConf.get(KyuubiConf.ENGINE_TRINO_CONNECTION_CATALOG) + .getOrElse(throw KyuubiSQLException("Trino default catalog can not be null!")) + } clientSession = createClientSession() - trinoContext = TrinoContext(httpClient, clientSession) + trinoContext = TrinoContext(createHttpClient(), clientSession) super.open() EventBus.post(sessionEvent) } private def createClientSession(): ClientSession = { - val sessionConf = sessionManager.getConf val connectionUrl = sessionConf.get(KyuubiConf.ENGINE_TRINO_CONNECTION_URL).getOrElse( throw KyuubiSQLException("Trino server url can not be null!")) - if (catalogName == null) { - catalogName = sessionConf.get( - KyuubiConf.ENGINE_TRINO_CONNECTION_CATALOG).getOrElse( - throw KyuubiSQLException("Trino default catalog can not be null!")) - } - - val user = sessionConf - .getOption(KyuubiReservedKeys.KYUUBI_SESSION_USER_KEY).getOrElse(currentUser) val clientRequestTimeout = sessionConf.get(TrinoConf.CLIENT_REQUEST_TIMEOUT) new ClientSession( URI.create(connectionUrl), - user, + username, Optional.empty(), "kyuubi", Optional.empty(), @@ -110,6 +112,37 @@ class TrinoSessionImpl( true) } + private def createHttpClient(): OkHttpClient = { + val keystorePath = sessionConf.get(KyuubiConf.ENGINE_TRINO_CONNECTION_KEYSTORE_PATH) + val keystorePassword = sessionConf.get(KyuubiConf.ENGINE_TRINO_CONNECTION_KEYSTORE_PASSWORD) + val keystoreType = sessionConf.get(KyuubiConf.ENGINE_TRINO_CONNECTION_KEYSTORE_TYPE) + val truststorePath = sessionConf.get(KyuubiConf.ENGINE_TRINO_CONNECTION_TRUSTSTORE_PATH) + val truststorePassword = sessionConf.get(KyuubiConf.ENGINE_TRINO_CONNECTION_TRUSTSTORE_PASSWORD) + val truststoreType = sessionConf.get(KyuubiConf.ENGINE_TRINO_CONNECTION_TRUSTSTORE_TYPE) + + val serverScheme = clientSession.getServer.getScheme + + val builder = new OkHttpClient.Builder() + + OkHttpUtil.setupSsl( + builder, + Optional.ofNullable(keystorePath.orNull), + Optional.ofNullable(keystorePassword.orNull), + Optional.ofNullable(keystoreType.orNull), + Optional.ofNullable(truststorePath.orNull), + Optional.ofNullable(truststorePassword.orNull), + Optional.ofNullable(truststoreType.orNull)) + + sessionConf.get(KyuubiConf.ENGINE_TRINO_CONNECTION_PASSWORD).foreach { password => + require( + serverScheme.equalsIgnoreCase("https"), + "Trino engine using username/password requires HTTPS to be enabled") + builder.addInterceptor(OkHttpUtil.basicAuth(username, password)) + } + + builder.build() + } + override protected def runOperation(operation: Operation): OperationHandle = { sessionEvent.totalOperations += 1 super.runOperation(operation) diff --git a/externals/kyuubi-trino-engine/src/test/scala/org/apache/kyuubi/engine/trino/TrinoStatementSuite.scala b/externals/kyuubi-trino-engine/src/test/scala/org/apache/kyuubi/engine/trino/TrinoStatementSuite.scala index fc9f1af5f79..dec753ad4f6 100644 --- a/externals/kyuubi-trino-engine/src/test/scala/org/apache/kyuubi/engine/trino/TrinoStatementSuite.scala +++ b/externals/kyuubi-trino-engine/src/test/scala/org/apache/kyuubi/engine/trino/TrinoStatementSuite.scala @@ -30,15 +30,15 @@ class TrinoStatementSuite extends WithTrinoContainerServer { assert(schema.size === 1) assert(schema(0).getName === "_col0") - assert(resultSet.toIterator.hasNext) - assert(resultSet.toIterator.next() === List(1)) + assert(resultSet.hasNext) + assert(resultSet.next() === List(1)) val trinoStatement2 = TrinoStatement(trinoContext, kyuubiConf, "show schemas") val schema2 = trinoStatement2.getColumns val resultSet2 = trinoStatement2.execute() assert(schema2.size === 1) - assert(resultSet2.toIterator.hasNext) + assert(resultSet2.hasNext) } } diff --git a/externals/kyuubi-trino-engine/src/test/scala/org/apache/kyuubi/engine/trino/operation/TrinoOperationSuite.scala b/externals/kyuubi-trino-engine/src/test/scala/org/apache/kyuubi/engine/trino/operation/TrinoOperationSuite.scala index a6f125af52c..90939a3e4e0 100644 --- a/externals/kyuubi-trino-engine/src/test/scala/org/apache/kyuubi/engine/trino/operation/TrinoOperationSuite.scala +++ b/externals/kyuubi-trino-engine/src/test/scala/org/apache/kyuubi/engine/trino/operation/TrinoOperationSuite.scala @@ -590,14 +590,14 @@ class TrinoOperationSuite extends WithTrinoEngine with TrinoQueryTests { val tFetchResultsReq1 = new TFetchResultsReq(opHandle, TFetchOrientation.FETCH_NEXT, 1) val tFetchResultsResp1 = client.FetchResults(tFetchResultsReq1) assert(tFetchResultsResp1.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) - val idSeq1 = tFetchResultsResp1.getResults.getColumns.get(0).getI32Val.getValues.asScala.toSeq + val idSeq1 = tFetchResultsResp1.getResults.getColumns.get(0).getI32Val.getValues.asScala assertResult(Seq(0L))(idSeq1) // fetch next from first row val tFetchResultsReq2 = new TFetchResultsReq(opHandle, TFetchOrientation.FETCH_NEXT, 1) val tFetchResultsResp2 = client.FetchResults(tFetchResultsReq2) assert(tFetchResultsResp2.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) - val idSeq2 = tFetchResultsResp2.getResults.getColumns.get(0).getI32Val.getValues.asScala.toSeq + val idSeq2 = tFetchResultsResp2.getResults.getColumns.get(0).getI32Val.getValues.asScala assertResult(Seq(1L))(idSeq2) val tFetchResultsReq3 = new TFetchResultsReq(opHandle, TFetchOrientation.FETCH_PRIOR, 1) @@ -607,7 +607,7 @@ class TrinoOperationSuite extends WithTrinoEngine with TrinoQueryTests { } else { assert(tFetchResultsResp3.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) val idSeq3 = - tFetchResultsResp3.getResults.getColumns.get(0).getI32Val.getValues.asScala.toSeq + tFetchResultsResp3.getResults.getColumns.get(0).getI32Val.getValues.asScala assertResult(Seq(0L))(idSeq3) } @@ -618,7 +618,7 @@ class TrinoOperationSuite extends WithTrinoEngine with TrinoQueryTests { } else { assert(tFetchResultsResp4.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) val idSeq4 = - tFetchResultsResp4.getResults.getColumns.get(0).getI32Val.getValues.asScala.toSeq + tFetchResultsResp4.getResults.getColumns.get(0).getI32Val.getValues.asScala assertResult(Seq(0L, 1L))(idSeq4) } } @@ -771,8 +771,8 @@ class TrinoOperationSuite extends WithTrinoEngine with TrinoQueryTests { assert(schema.size === 1) assert(schema(0).getName === "_col0") - assert(resultSet.toIterator.hasNext) - version = resultSet.toIterator.next().head.toString + assert(resultSet.hasNext) + version = resultSet.next().head.toString } version } diff --git a/integration-tests/kyuubi-flink-it/pom.xml b/integration-tests/kyuubi-flink-it/pom.xml index 8ccc14e5e93..3c0e3f31a7c 100644 --- a/integration-tests/kyuubi-flink-it/pom.xml +++ b/integration-tests/kyuubi-flink-it/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi integration-tests - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../pom.xml - kyuubi-flink-it_2.12 + kyuubi-flink-it_${scala.binary.version} Kyuubi Test Flink SQL IT https://kyuubi.apache.org/ @@ -75,7 +75,38 @@ org.apache.flink - flink-table-runtime${flink.module.scala.suffix} + flink-table-runtime + test + + + + + org.apache.hadoop + hadoop-client-minicluster + test + + + + org.bouncycastle + bcprov-jdk15on + test + + + + org.bouncycastle + bcpkix-jdk15on + test + + + + jakarta.activation + jakarta.activation-api + test + + + + jakarta.xml.bind + jakarta.xml.bind-api test diff --git a/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/WithKyuubiServerAndYarnMiniCluster.scala b/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/WithKyuubiServerAndYarnMiniCluster.scala new file mode 100644 index 00000000000..de9a8ae2d28 --- /dev/null +++ b/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/WithKyuubiServerAndYarnMiniCluster.scala @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.it.flink + +import java.io.{File, FileWriter} +import java.nio.file.Paths + +import org.apache.hadoop.yarn.conf.YarnConfiguration + +import org.apache.kyuubi.{KyuubiFunSuite, Utils, WithKyuubiServer} +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.KyuubiConf.KYUUBI_ENGINE_ENV_PREFIX +import org.apache.kyuubi.server.{MiniDFSService, MiniYarnService} + +trait WithKyuubiServerAndYarnMiniCluster extends KyuubiFunSuite with WithKyuubiServer { + + val kyuubiHome: String = Utils.getCodeSourceLocation(getClass).split("integration-tests").head + + override protected val conf: KyuubiConf = new KyuubiConf(false) + + protected var miniHdfsService: MiniDFSService = _ + + protected var miniYarnService: MiniYarnService = _ + + private val yarnConf: YarnConfiguration = { + val yarnConfig = new YarnConfiguration() + + // configurations copied from org.apache.flink.yarn.YarnTestBase + yarnConfig.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 32) + yarnConfig.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 4096) + + yarnConfig.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true) + yarnConfig.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2) + yarnConfig.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, 2) + yarnConfig.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, 4) + yarnConfig.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 3600) + yarnConfig.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false) + // memory is overwritten in the MiniYARNCluster. + // so we have to change the number of cores for testing. + yarnConfig.setInt(YarnConfiguration.NM_VCORES, 666) + yarnConfig.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 99.0f) + yarnConfig.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, 1000) + yarnConfig.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, 5000) + + // capacity-scheduler.xml is missing in hadoop-client-minicluster so this is a workaround + yarnConfig.set("yarn.scheduler.capacity.root.queues", "default,four_cores_queue") + + yarnConfig.setInt("yarn.scheduler.capacity.root.default.capacity", 100) + yarnConfig.setFloat("yarn.scheduler.capacity.root.default.user-limit-factor", 1) + yarnConfig.setInt("yarn.scheduler.capacity.root.default.maximum-capacity", 100) + yarnConfig.set("yarn.scheduler.capacity.root.default.state", "RUNNING") + yarnConfig.set("yarn.scheduler.capacity.root.default.acl_submit_applications", "*") + yarnConfig.set("yarn.scheduler.capacity.root.default.acl_administer_queue", "*") + + yarnConfig.setInt("yarn.scheduler.capacity.root.four_cores_queue.maximum-capacity", 100) + yarnConfig.setInt("yarn.scheduler.capacity.root.four_cores_queue.maximum-applications", 10) + yarnConfig.setInt("yarn.scheduler.capacity.root.four_cores_queue.maximum-allocation-vcores", 4) + yarnConfig.setFloat("yarn.scheduler.capacity.root.four_cores_queue.user-limit-factor", 1) + yarnConfig.set("yarn.scheduler.capacity.root.four_cores_queue.acl_submit_applications", "*") + yarnConfig.set("yarn.scheduler.capacity.root.four_cores_queue.acl_administer_queue", "*") + + yarnConfig.setInt("yarn.scheduler.capacity.node-locality-delay", -1) + // Set bind host to localhost to avoid java.net.BindException + yarnConfig.set(YarnConfiguration.RM_BIND_HOST, "localhost") + yarnConfig.set(YarnConfiguration.NM_BIND_HOST, "localhost") + + yarnConfig + } + + override def beforeAll(): Unit = { + miniHdfsService = new MiniDFSService() + miniHdfsService.initialize(conf) + miniHdfsService.start() + + val hdfsServiceUrl = s"hdfs://localhost:${miniHdfsService.getDFSPort}" + yarnConf.set("fs.defaultFS", hdfsServiceUrl) + yarnConf.addResource(miniHdfsService.getHadoopConf) + + val cp = System.getProperty("java.class.path") + // exclude kyuubi flink engine jar that has SPI for EmbeddedExecutorFactory + // which can't be initialized on the client side + val hadoopJars = cp.split(":").filter(s => !s.contains("flink")) + val hadoopClasspath = hadoopJars.mkString(":") + yarnConf.set("yarn.application.classpath", hadoopClasspath) + + miniYarnService = new MiniYarnService() + miniYarnService.setYarnConf(yarnConf) + miniYarnService.initialize(conf) + miniYarnService.start() + + val hadoopConfDir = Utils.createTempDir().toFile + val writer = new FileWriter(new File(hadoopConfDir, "core-site.xml")) + yarnConf.writeXml(writer) + writer.close() + + val flinkHome = { + val candidates = Paths.get(kyuubiHome, "externals", "kyuubi-download", "target") + .toFile.listFiles(f => f.getName.contains("flink")) + if (candidates == null) None else candidates.map(_.toPath).headOption + } + if (flinkHome.isEmpty) { + throw new IllegalStateException(s"Flink home not found in $kyuubiHome/externals") + } + + conf.set(s"$KYUUBI_ENGINE_ENV_PREFIX.KYUUBI_HOME", kyuubiHome) + conf.set(s"$KYUUBI_ENGINE_ENV_PREFIX.FLINK_HOME", flinkHome.get.toString) + conf.set( + s"$KYUUBI_ENGINE_ENV_PREFIX.FLINK_CONF_DIR", + s"${flinkHome.get.toString}${File.separator}conf") + conf.set(s"$KYUUBI_ENGINE_ENV_PREFIX.HADOOP_CLASSPATH", hadoopClasspath) + conf.set(s"$KYUUBI_ENGINE_ENV_PREFIX.HADOOP_CONF_DIR", hadoopConfDir.getAbsolutePath) + conf.set(s"flink.containerized.master.env.HADOOP_CLASSPATH", hadoopClasspath) + conf.set(s"flink.containerized.master.env.HADOOP_CONF_DIR", hadoopConfDir.getAbsolutePath) + conf.set(s"flink.containerized.taskmanager.env.HADOOP_CONF_DIR", hadoopConfDir.getAbsolutePath) + + super.beforeAll() + } + + override def afterAll(): Unit = { + super.afterAll() + if (miniYarnService != null) { + miniYarnService.stop() + miniYarnService = null + } + if (miniHdfsService != null) { + miniHdfsService.stop() + miniHdfsService = null + } + } +} diff --git a/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/operation/FlinkOperationSuite.scala b/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/operation/FlinkOperationSuite.scala index 893e0020a6a..55476bfd003 100644 --- a/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/operation/FlinkOperationSuite.scala +++ b/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/operation/FlinkOperationSuite.scala @@ -31,7 +31,7 @@ class FlinkOperationSuite extends WithKyuubiServerAndFlinkMiniCluster override val conf: KyuubiConf = KyuubiConf() .set(s"$KYUUBI_ENGINE_ENV_PREFIX.$KYUUBI_HOME", kyuubiHome) .set(ENGINE_TYPE, "FLINK_SQL") - .set("flink.parallelism.default", "6") + .set("flink.parallelism.default", "2") override protected def jdbcUrl: String = getJdbcUrl @@ -72,7 +72,7 @@ class FlinkOperationSuite extends WithKyuubiServerAndFlinkMiniCluster var success = false while (resultSet.next() && !success) { if (resultSet.getString(1) == "parallelism.default" && - resultSet.getString(2) == "6") { + resultSet.getString(2) == "2") { success = true } } diff --git a/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/operation/FlinkOperationSuiteOnYarn.scala b/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/operation/FlinkOperationSuiteOnYarn.scala new file mode 100644 index 00000000000..ee6b9bb98ea --- /dev/null +++ b/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/operation/FlinkOperationSuiteOnYarn.scala @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.it.flink.operation + +import org.apache.hive.service.rpc.thrift.{TGetInfoReq, TGetInfoType} + +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.KyuubiConf._ +import org.apache.kyuubi.it.flink.WithKyuubiServerAndYarnMiniCluster +import org.apache.kyuubi.operation.HiveJDBCTestHelper +import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_CAT + +class FlinkOperationSuiteOnYarn extends WithKyuubiServerAndYarnMiniCluster + with HiveJDBCTestHelper { + + override protected def jdbcUrl: String = { + // delay the access to thrift service because the thrift service + // may not be ready although it's registered + Thread.sleep(3000L) + getJdbcUrl + } + + override def beforeAll(): Unit = { + conf + .set(s"$KYUUBI_ENGINE_ENV_PREFIX.$KYUUBI_HOME", kyuubiHome) + .set(ENGINE_TYPE, "FLINK_SQL") + .set("flink.execution.target", "yarn-application") + .set("flink.parallelism.default", "2") + super.beforeAll() + } + + test("get catalogs for flink sql") { + withJdbcStatement() { statement => + val meta = statement.getConnection.getMetaData + val catalogs = meta.getCatalogs + val expected = Set("default_catalog").toIterator + while (catalogs.next()) { + assert(catalogs.getString(TABLE_CAT) === expected.next()) + } + assert(!expected.hasNext) + assert(!catalogs.next()) + } + } + + test("execute statement - create/alter/drop table") { + withJdbcStatement() { statement => + statement.executeQuery("create table tbl_a (a string) with ('connector' = 'blackhole')") + assert(statement.execute("alter table tbl_a rename to tbl_b")) + assert(statement.execute("drop table tbl_b")) + } + } + + test("execute statement - select column name with dots") { + withJdbcStatement() { statement => + val resultSet = statement.executeQuery("select 'tmp.hello'") + assert(resultSet.next()) + assert(resultSet.getString(1) === "tmp.hello") + } + } + + test("set kyuubi conf into flink conf") { + withJdbcStatement() { statement => + val resultSet = statement.executeQuery("SET") + // Flink does not support set key without value currently, + // thus read all rows to find the desired one + var success = false + while (resultSet.next() && !success) { + if (resultSet.getString(1) == "parallelism.default" && + resultSet.getString(2) == "2") { + success = true + } + } + assert(success) + } + } + + test("server info provider - server") { + withSessionConf(Map(KyuubiConf.SERVER_INFO_PROVIDER.key -> "SERVER"))()() { + withSessionHandle { (client, handle) => + val req = new TGetInfoReq() + req.setSessionHandle(handle) + req.setInfoType(TGetInfoType.CLI_DBMS_NAME) + assert(client.GetInfo(req).getInfoValue.getStringValue === "Apache Kyuubi") + } + } + } + + test("server info provider - engine") { + withSessionConf(Map(KyuubiConf.SERVER_INFO_PROVIDER.key -> "ENGINE"))()() { + withSessionHandle { (client, handle) => + val req = new TGetInfoReq() + req.setSessionHandle(handle) + req.setInfoType(TGetInfoType.CLI_DBMS_NAME) + assert(client.GetInfo(req).getInfoValue.getStringValue === "Apache Flink") + } + } + } +} diff --git a/integration-tests/kyuubi-hive-it/pom.xml b/integration-tests/kyuubi-hive-it/pom.xml index ff9a6b35ea6..24e5529a2d3 100644 --- a/integration-tests/kyuubi-hive-it/pom.xml +++ b/integration-tests/kyuubi-hive-it/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi integration-tests - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../pom.xml - kyuubi-hive-it_2.12 + kyuubi-hive-it_${scala.binary.version} Kyuubi Test Hive IT https://kyuubi.apache.org/ diff --git a/integration-tests/kyuubi-hive-it/src/test/scala/org/apache/kyuubi/it/hive/operation/KyuubiOperationHiveEnginePerUserSuite.scala b/integration-tests/kyuubi-hive-it/src/test/scala/org/apache/kyuubi/it/hive/operation/KyuubiOperationHiveEnginePerUserSuite.scala index a4e6bb150b9..07e2bc0f2c7 100644 --- a/integration-tests/kyuubi-hive-it/src/test/scala/org/apache/kyuubi/it/hive/operation/KyuubiOperationHiveEnginePerUserSuite.scala +++ b/integration-tests/kyuubi-hive-it/src/test/scala/org/apache/kyuubi/it/hive/operation/KyuubiOperationHiveEnginePerUserSuite.scala @@ -61,4 +61,21 @@ class KyuubiOperationHiveEnginePerUserSuite extends WithKyuubiServer with HiveEn } } } + + test("kyuubi defined function - system_user, session_user") { + withJdbcStatement("hive_engine_test") { statement => + val rs = statement.executeQuery("SELECT system_user(), session_user()") + assert(rs.next()) + assert(rs.getString(1) === Utils.currentUser) + assert(rs.getString(2) === Utils.currentUser) + } + } + + test("kyuubi defined function - engine_id") { + withJdbcStatement("hive_engine_test") { statement => + val rs = statement.executeQuery("SELECT engine_id()") + assert(rs.next()) + assert(rs.getString(1).nonEmpty) + } + } } diff --git a/integration-tests/kyuubi-jdbc-it/pom.xml b/integration-tests/kyuubi-jdbc-it/pom.xml index 2d95de78ed8..08f74512e90 100644 --- a/integration-tests/kyuubi-jdbc-it/pom.xml +++ b/integration-tests/kyuubi-jdbc-it/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi integration-tests - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../pom.xml - kyuubi-jdbc-it_2.12 + kyuubi-jdbc-it_${scala.binary.version} Kyuubi Test Jdbc IT https://kyuubi.apache.org/ diff --git a/integration-tests/kyuubi-kubernetes-it/pom.xml b/integration-tests/kyuubi-kubernetes-it/pom.xml index ef56a770fb6..a4334e497c7 100644 --- a/integration-tests/kyuubi-kubernetes-it/pom.xml +++ b/integration-tests/kyuubi-kubernetes-it/pom.xml @@ -15,17 +15,15 @@ ~ See the License for the specific language governing permissions and ~ limitations under the License. --> - - + 4.0.0 org.apache.kyuubi integration-tests - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../pom.xml - 4.0.0 kubernetes-integration-tests_2.12 Kyuubi Test Kubernetes IT @@ -62,12 +60,6 @@ test - - io.fabric8 - kubernetes-client - test - - org.apache.hadoop hadoop-client-minicluster diff --git a/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/MiniKube.scala b/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/MiniKube.scala index cd373873a6a..f4cd557bb0f 100644 --- a/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/MiniKube.scala +++ b/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/MiniKube.scala @@ -17,7 +17,11 @@ package org.apache.kyuubi.kubernetes.test -import io.fabric8.kubernetes.client.{Config, DefaultKubernetesClient} +import io.fabric8.kubernetes.client.{Config, KubernetesClient, KubernetesClientBuilder} +import io.fabric8.kubernetes.client.okhttp.OkHttpClientFactory +import okhttp3.{Dispatcher, OkHttpClient} + +import org.apache.kyuubi.util.ThreadUtils /** * This code copied from Aapache Spark @@ -44,7 +48,7 @@ object MiniKube { executeMinikube(true, "ip").head } - def getKubernetesClient: DefaultKubernetesClient = { + def getKubernetesClient: KubernetesClient = { // only the three-part version number is matched (the optional suffix like "-beta.0" is dropped) val versionArrayOpt = "\\d+\\.\\d+\\.\\d+".r .findFirstIn(minikubeVersionString.split(VERSION_PREFIX)(1)) @@ -65,7 +69,18 @@ object MiniKube { "For minikube version a three-part version number is expected (the optional " + "non-numeric suffix is intentionally dropped)") } + // https://github.com/fabric8io/kubernetes-client/issues/3547 + val dispatcher = new Dispatcher( + ThreadUtils.newDaemonCachedThreadPool("kubernetes-dispatcher")) + val factoryWithCustomDispatcher = new OkHttpClientFactory() { + override protected def additionalConfig(builder: OkHttpClient.Builder): Unit = { + builder.dispatcher(dispatcher) + } + } - new DefaultKubernetesClient(Config.autoConfigure("minikube")) + new KubernetesClientBuilder() + .withConfig(Config.autoConfigure("minikube")) + .withHttpClientFactory(factoryWithCustomDispatcher) + .build() } } diff --git a/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/WithKyuubiServerOnKubernetes.scala b/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/WithKyuubiServerOnKubernetes.scala index ed9cbce09fe..595fdd4314e 100644 --- a/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/WithKyuubiServerOnKubernetes.scala +++ b/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/WithKyuubiServerOnKubernetes.scala @@ -18,14 +18,14 @@ package org.apache.kyuubi.kubernetes.test import io.fabric8.kubernetes.api.model.Pod -import io.fabric8.kubernetes.client.DefaultKubernetesClient +import io.fabric8.kubernetes.client.KubernetesClient import org.apache.kyuubi.KyuubiFunSuite trait WithKyuubiServerOnKubernetes extends KyuubiFunSuite { protected def connectionConf: Map[String, String] = Map.empty - lazy val miniKubernetesClient: DefaultKubernetesClient = MiniKube.getKubernetesClient + lazy val miniKubernetesClient: KubernetesClient = MiniKube.getKubernetesClient lazy val kyuubiPod: Pod = miniKubernetesClient.pods().withName("kyuubi-test").get() lazy val kyuubiServerIp: String = kyuubiPod.getStatus.getPodIP lazy val miniKubeIp: String = MiniKube.getIp diff --git a/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/deployment/KyuubiOnKubernetesTestsSuite.scala b/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/deployment/KyuubiOnKubernetesTestsSuite.scala index bc7c98a80c7..73cb5620a51 100644 --- a/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/deployment/KyuubiOnKubernetesTestsSuite.scala +++ b/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/deployment/KyuubiOnKubernetesTestsSuite.scala @@ -54,7 +54,7 @@ class KyuubiOnKubernetesWithSparkTestsBase extends WithKyuubiServerOnKubernetes super.connectionConf ++ Map( "spark.master" -> s"k8s://$miniKubeApiMaster", - "spark.kubernetes.container.image" -> "apache/spark:v3.3.2", + "spark.kubernetes.container.image" -> "apache/spark:3.4.1", "spark.executor.memory" -> "512M", "spark.driver.memory" -> "1024M", "spark.kubernetes.driver.request.cores" -> "250m", diff --git a/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/spark/SparkOnKubernetesTestsSuite.scala b/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/spark/SparkOnKubernetesTestsSuite.scala index 14db8b408ba..3f591e604dc 100644 --- a/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/spark/SparkOnKubernetesTestsSuite.scala +++ b/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/spark/SparkOnKubernetesTestsSuite.scala @@ -19,7 +19,6 @@ package org.apache.kyuubi.kubernetes.test.spark import java.util.UUID -import scala.collection.JavaConverters._ import scala.concurrent.duration._ import org.apache.hadoop.conf.Configuration @@ -29,12 +28,12 @@ import org.apache.kyuubi._ import org.apache.kyuubi.client.util.BatchUtils._ import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_HOST -import org.apache.kyuubi.engine.{ApplicationInfo, ApplicationOperation, KubernetesApplicationOperation} +import org.apache.kyuubi.engine.{ApplicationInfo, ApplicationManagerInfo, ApplicationOperation, KubernetesApplicationOperation} import org.apache.kyuubi.engine.ApplicationState.{FAILED, NOT_FOUND, RUNNING} import org.apache.kyuubi.engine.spark.SparkProcessBuilder import org.apache.kyuubi.kubernetes.test.MiniKube import org.apache.kyuubi.operation.SparkQueryTests -import org.apache.kyuubi.session.{KyuubiBatchSessionImpl, KyuubiSessionManager} +import org.apache.kyuubi.session.KyuubiSessionManager import org.apache.kyuubi.util.Validator.KUBERNETES_EXECUTOR_POD_NAME_PREFIX import org.apache.kyuubi.zookeeper.ZookeeperConf.ZK_CLIENT_PORT_ADDRESS @@ -44,11 +43,14 @@ abstract class SparkOnKubernetesSuiteBase MiniKube.getKubernetesClient.getMasterUrl.toString } + protected val appMgrInfo = + ApplicationManagerInfo(Some(s"k8s://$apiServerAddress"), Some("minikube"), None) + protected def sparkOnK8sConf: KyuubiConf = { // TODO Support more Spark version // Spark official docker image: https://hub.docker.com/r/apache/spark/tags KyuubiConf().set("spark.master", s"k8s://$apiServerAddress") - .set("spark.kubernetes.container.image", "apache/spark:v3.3.2") + .set("spark.kubernetes.container.image", "apache/spark:3.4.1") .set("spark.kubernetes.container.image.pullPolicy", "IfNotPresent") .set("spark.executor.instances", "1") .set("spark.executor.memory", "512M") @@ -57,6 +59,7 @@ abstract class SparkOnKubernetesSuiteBase .set("spark.kubernetes.executor.request.cores", "250m") .set("kyuubi.kubernetes.context", "minikube") .set("kyuubi.frontend.protocols", "THRIFT_BINARY,REST") + .set("kyuubi.session.engine.initialize.timeout", "PT10M") } } @@ -125,6 +128,7 @@ class SparkClusterModeOnKubernetesSuite override protected def jdbcUrl: String = getJdbcUrl } +// [KYUUBI #4467] KubernetesApplicationOperator doesn't support client mode class KyuubiOperationKubernetesClusterClientModeSuite extends SparkClientModeOnKubernetesSuiteBase { private lazy val k8sOperation: KubernetesApplicationOperation = { @@ -136,7 +140,7 @@ class KyuubiOperationKubernetesClusterClientModeSuite private def sessionManager: KyuubiSessionManager = server.backendService.sessionManager.asInstanceOf[KyuubiSessionManager] - test("Spark Client Mode On Kubernetes Kyuubi KubernetesApplicationOperation Suite") { + ignore("Spark Client Mode On Kubernetes Kyuubi KubernetesApplicationOperation Suite") { val batchRequest = newSparkBatchRequest(conf.getAll ++ Map( KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString)) @@ -144,24 +148,31 @@ class KyuubiOperationKubernetesClusterClientModeSuite "kyuubi", "passwd", "localhost", - batchRequest.getConf.asScala.toMap, batchRequest) eventually(timeout(3.minutes), interval(50.milliseconds)) { - val state = k8sOperation.getApplicationInfoByTag(sessionHandle.identifier.toString) + val state = k8sOperation.getApplicationInfoByTag( + appMgrInfo, + sessionHandle.identifier.toString) assert(state.id != null) assert(state.name != null) assert(state.state == RUNNING) } - val killResponse = k8sOperation.killApplicationByTag(sessionHandle.identifier.toString) + val killResponse = k8sOperation.killApplicationByTag( + appMgrInfo, + sessionHandle.identifier.toString) assert(killResponse._1) assert(killResponse._2 startsWith "Succeeded to terminate:") - val appInfo = k8sOperation.getApplicationInfoByTag(sessionHandle.identifier.toString) + val appInfo = k8sOperation.getApplicationInfoByTag( + appMgrInfo, + sessionHandle.identifier.toString) assert(appInfo == ApplicationInfo(null, null, NOT_FOUND)) - val failKillResponse = k8sOperation.killApplicationByTag(sessionHandle.identifier.toString) + val failKillResponse = k8sOperation.killApplicationByTag( + appMgrInfo, + sessionHandle.identifier.toString) assert(!failKillResponse._1) assert(failKillResponse._2 === ApplicationOperation.NOT_FOUND) } @@ -204,31 +215,37 @@ class KyuubiOperationKubernetesClusterClusterModeSuite "runner", "passwd", "localhost", - batchRequest.getConf.asScala.toMap, batchRequest) - val session = sessionManager.getSession(sessionHandle).asInstanceOf[KyuubiBatchSessionImpl] - val batchJobSubmissionOp = session.batchJobSubmissionOp - - eventually(timeout(3.minutes), interval(50.milliseconds)) { - val appInfo = batchJobSubmissionOp.getOrFetchCurrentApplicationInfo - assert(appInfo.nonEmpty) - assert(appInfo.exists(_.state == RUNNING)) - assert(appInfo.exists(_.name.startsWith(driverPodNamePrefix))) + // wait for driver pod start + eventually(timeout(3.minutes), interval(5.second)) { + // trigger k8sOperation init here + val appInfo = k8sOperation.getApplicationInfoByTag( + appMgrInfo, + sessionHandle.identifier.toString) + assert(appInfo.state == RUNNING) + assert(appInfo.name.startsWith(driverPodNamePrefix)) } - val killResponse = k8sOperation.killApplicationByTag(sessionHandle.identifier.toString) + val killResponse = k8sOperation.killApplicationByTag( + appMgrInfo, + sessionHandle.identifier.toString) assert(killResponse._1) - assert(killResponse._2 startsWith "Operation of deleted appId:") + assert(killResponse._2 endsWith "is completed") + assert(killResponse._2 contains sessionHandle.identifier.toString) eventually(timeout(3.minutes), interval(50.milliseconds)) { - val appInfo = k8sOperation.getApplicationInfoByTag(sessionHandle.identifier.toString) + val appInfo = k8sOperation.getApplicationInfoByTag( + appMgrInfo, + sessionHandle.identifier.toString) // We may kill engine start but not ready // An EOF Error occurred when the driver was starting assert(appInfo.state == FAILED || appInfo.state == NOT_FOUND) } - val failKillResponse = k8sOperation.killApplicationByTag(sessionHandle.identifier.toString) + val failKillResponse = k8sOperation.killApplicationByTag( + appMgrInfo, + sessionHandle.identifier.toString) assert(!failKillResponse._1) } } diff --git a/integration-tests/kyuubi-trino-it/pom.xml b/integration-tests/kyuubi-trino-it/pom.xml index 107d621b075..628f63818b9 100644 --- a/integration-tests/kyuubi-trino-it/pom.xml +++ b/integration-tests/kyuubi-trino-it/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi integration-tests - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../pom.xml - kyuubi-trino-it_2.12 + kyuubi-trino-it_${scala.binary.version} Kyuubi Test Trino IT https://kyuubi.apache.org/ diff --git a/integration-tests/kyuubi-trino-it/src/test/scala/org/apache/kyuubi/it/trino/server/TrinoFrontendSuite.scala b/integration-tests/kyuubi-trino-it/src/test/scala/org/apache/kyuubi/it/trino/server/TrinoFrontendSuite.scala index bd8bf3eda2c..7575bf8a9b4 100644 --- a/integration-tests/kyuubi-trino-it/src/test/scala/org/apache/kyuubi/it/trino/server/TrinoFrontendSuite.scala +++ b/integration-tests/kyuubi-trino-it/src/test/scala/org/apache/kyuubi/it/trino/server/TrinoFrontendSuite.scala @@ -26,18 +26,37 @@ import org.apache.kyuubi.operation.SparkMetadataTests /** * This test is for Trino jdbc driver with Kyuubi Server and Spark engine: * - * ------------------------------------------------------------- - * | JDBC | - * | Trino-driver ----> Kyuubi Server --> Spark Engine | - * | | - * ------------------------------------------------------------- + * ------------------------------------------------------------- + * | JDBC | + * | Trino-driver ----> Kyuubi Server --> Spark Engine | + * | | + * ------------------------------------------------------------- */ class TrinoFrontendSuite extends WithKyuubiServer with SparkMetadataTests { - // TODO: Add more test cases + + test("execute statement - select 11 where 1=1") { + withJdbcStatement() { statement => + val resultSet = statement.executeQuery("SELECT 11 where 1<1") + while (resultSet.next()) { + assert(resultSet.getInt(1) === 11) + } + } + } + + test("execute preparedStatement - select 11 where 1 = 1") { + withJdbcPrepareStatement("select 11 where 1 = ? ") { statement => + statement.setInt(1, 1) + val rs = statement.executeQuery() + while (rs.next()) { + assert(rs.getInt(1) == 11) + } + } + } override protected val conf: KyuubiConf = { KyuubiConf().set(KyuubiConf.FRONTEND_PROTOCOLS, Seq("TRINO")) } + override protected def jdbcUrl: String = { s"jdbc:trino://${server.frontendServices.head.connectionUrl}/;" } @@ -47,7 +66,6 @@ class TrinoFrontendSuite extends WithKyuubiServer with SparkMetadataTests { override def beforeAll(): Unit = { super.beforeAll() - // eagerly start spark engine before running test, it's a workaround for trino jdbc driver // since it does not support changing http connect timeout try { @@ -55,7 +73,7 @@ class TrinoFrontendSuite extends WithKyuubiServer with SparkMetadataTests { statement.execute("SELECT 1") } } catch { - case NonFatal(e) => + case NonFatal(_) => } } } diff --git a/integration-tests/kyuubi-zookeeper-it/pom.xml b/integration-tests/kyuubi-zookeeper-it/pom.xml index bded1585b71..869fd40b2bb 100644 --- a/integration-tests/kyuubi-zookeeper-it/pom.xml +++ b/integration-tests/kyuubi-zookeeper-it/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi integration-tests - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../pom.xml - kyuubi-zookeeper-it_2.12 + kyuubi-zookeeper-it_${scala.binary.version} Kyuubi Test Zookeeper IT https://kyuubi.apache.org/ diff --git a/integration-tests/pom.xml b/integration-tests/pom.xml index b6a48daaedc..35d0b4f9ea7 100644 --- a/integration-tests/pom.xml +++ b/integration-tests/pom.xml @@ -21,7 +21,7 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT integration-tests diff --git a/kyuubi-assembly/pom.xml b/kyuubi-assembly/pom.xml index 0524470a20d..4fa0d9a0fd3 100644 --- a/kyuubi-assembly/pom.xml +++ b/kyuubi-assembly/pom.xml @@ -22,11 +22,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../pom.xml - kyuubi-assembly_2.12 + kyuubi-assembly_${scala.binary.version} pom Kyuubi Project Assembly https://kyuubi.apache.org/ @@ -69,28 +69,18 @@ - org.apache.hadoop - hadoop-client-api + org.apache.kyuubi + ${kyuubi-shaded-zookeeper.artifacts} org.apache.hadoop - hadoop-client-runtime - - - - org.apache.curator - curator-framework - - - - org.apache.curator - curator-client + hadoop-client-api - org.apache.curator - curator-recipes + org.apache.hadoop + hadoop-client-runtime diff --git a/kyuubi-common/pom.xml b/kyuubi-common/pom.xml index d62761d72b3..0d5c491b51c 100644 --- a/kyuubi-common/pom.xml +++ b/kyuubi-common/pom.xml @@ -21,20 +21,20 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../pom.xml - kyuubi-common_2.12 + kyuubi-common_${scala.binary.version} jar Kyuubi Project Common https://kyuubi.apache.org/ - com.vladsch.flexmark - flexmark-all - test + org.apache.kyuubi + kyuubi-util-scala_${scala.binary.version} + ${project.version} @@ -128,6 +128,13 @@ HikariCP + + org.apache.kyuubi + kyuubi-util-scala_${scala.binary.version} + ${project.version} + test-jar + + org.apache.hadoop hadoop-minikdc @@ -148,7 +155,7 @@ org.scalatestplus - mockito-4-6_${scala.binary.version} + mockito-4-11_${scala.binary.version} test @@ -164,11 +171,23 @@ test + + org.xerial + sqlite-jdbc + test + + com.jakewharton.fliptables fliptables test + + + com.vladsch.flexmark + flexmark-all + test + diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/KyuubiSQLException.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/KyuubiSQLException.scala index a9e486fb2b6..570ee6d3873 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/KyuubiSQLException.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/KyuubiSQLException.scala @@ -26,6 +26,7 @@ import scala.collection.JavaConverters._ import org.apache.hive.service.rpc.thrift.{TStatus, TStatusCode} import org.apache.kyuubi.Utils.stringifyException +import org.apache.kyuubi.util.reflect.DynConstructors /** * @param reason a description of the exception @@ -139,9 +140,10 @@ object KyuubiSQLException { } private def newInstance(className: String, message: String, cause: Throwable): Throwable = { try { - Class.forName(className) - .getConstructor(classOf[String], classOf[Throwable]) - .newInstance(message, cause).asInstanceOf[Throwable] + DynConstructors.builder() + .impl(className, classOf[String], classOf[Throwable]) + .buildChecked[Throwable]() + .newInstance(message, cause) } catch { case _: Exception => new RuntimeException(className + ":" + message, cause) } @@ -154,7 +156,7 @@ object KyuubiSQLException { (i1, i2, i3) } - def toCause(details: Seq[String]): Throwable = { + def toCause(details: Iterable[String]): Throwable = { var ex: Throwable = null if (details != null && details.nonEmpty) { val head = details.head @@ -170,7 +172,7 @@ object KyuubiSQLException { val lineNum = line.substring(i3 + 1).toInt new StackTraceElement(clzName, methodName, fileName, lineNum) } - ex = newInstance(exClz, msg, toCause(details.slice(length + 2, details.length))) + ex = newInstance(exClz, msg, toCause(details.slice(length + 2, details.size))) ex.setStackTrace(stackTraceElements.toArray) } ex diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/Logging.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/Logging.scala index 1df598132fb..d6dcc8d345a 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/Logging.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/Logging.scala @@ -23,7 +23,7 @@ import org.apache.logging.log4j.core.config.DefaultConfiguration import org.slf4j.{Logger, LoggerFactory} import org.slf4j.bridge.SLF4JBridgeHandler -import org.apache.kyuubi.util.ClassUtils +import org.apache.kyuubi.util.reflect.ReflectUtils /** * Simple version of logging adopted from Apache Spark. @@ -116,8 +116,9 @@ object Logging { // This distinguishes the log4j 1.2 binding, currently // org.slf4j.impl.Log4jLoggerFactory, from the log4j 2.0 binding, currently // org.apache.logging.slf4j.Log4jLoggerFactory - "org.slf4j.impl.Log4jLoggerFactory" - .equals(LoggerFactory.getILoggerFactory.getClass.getName) + val binderClass = LoggerFactory.getILoggerFactory.getClass.getName + "org.slf4j.impl.Log4jLoggerFactory".equals( + binderClass) || "org.slf4j.impl.Reload4jLoggerFactory".equals(binderClass) } private[kyuubi] def isLog4j2: Boolean = { @@ -148,7 +149,7 @@ object Logging { isInterpreter: Boolean, loggerName: String, logger: => Logger): Unit = { - if (ClassUtils.classIsLoadable("org.slf4j.bridge.SLF4JBridgeHandler")) { + if (ReflectUtils.isClassLoadable("org.slf4j.bridge.SLF4JBridgeHandler")) { // Handles configuring the JUL -> SLF4J bridge SLF4JBridgeHandler.removeHandlersForRootLogger() SLF4JBridgeHandler.install() diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/Utils.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/Utils.scala index 7ab312fa1da..accfca4c98f 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/Utils.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/Utils.scala @@ -21,9 +21,12 @@ import java.io._ import java.net.{Inet4Address, InetAddress, NetworkInterface} import java.nio.charset.StandardCharsets import java.nio.file.{Files, Path, Paths, StandardCopyOption} +import java.security.PrivilegedAction import java.text.SimpleDateFormat import java.util.{Date, Properties, TimeZone, UUID} +import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicLong +import java.util.concurrent.locks.Lock import scala.collection.JavaConverters._ import scala.sys.process._ @@ -201,6 +204,14 @@ object Utils extends Logging { def currentUser: String = UserGroupInformation.getCurrentUser.getShortUserName + def doAs[T]( + proxyUser: String, + realUser: UserGroupInformation = UserGroupInformation.getCurrentUser)(f: () => T): T = { + UserGroupInformation.createProxyUser(proxyUser, realUser).doAs(new PrivilegedAction[T] { + override def run(): T = f() + }) + } + private val shortVersionRegex = """^(\d+\.\d+\.\d+)(.*)?$""".r /** @@ -221,6 +232,11 @@ object Utils extends Logging { */ val isWindows: Boolean = SystemUtils.IS_OS_WINDOWS + /** + * Whether the underlying operating system is MacOS. + */ + val isMac: Boolean = SystemUtils.IS_OS_MAC + /** * Indicates whether Kyuubi is currently running unit tests. */ @@ -387,4 +403,50 @@ object Utils extends Logging { Option(Thread.currentThread().getContextClassLoader).getOrElse(getKyuubiClassLoader) def isOnK8s: Boolean = Files.exists(Paths.get("/var/run/secrets/kubernetes.io")) + + /** + * Return a nice string representation of the exception. It will call "printStackTrace" to + * recursively generate the stack trace including the exception and its causes. + */ + def prettyPrint(e: Throwable): String = { + if (e == null) { + "" + } else { + // Use e.printStackTrace here because e.getStackTrace doesn't include the cause + val stringWriter = new StringWriter() + e.printStackTrace(new PrintWriter(stringWriter)) + stringWriter.toString + } + } + + def withLockRequired[T](lock: Lock)(block: => T): T = { + try { + lock.lock() + block + } finally { + lock.unlock() + } + } + + /** + * Try killing the process gracefully first, then forcibly if process does not exit in + * graceful period. + * + * @param process the being killed process + * @param gracefulPeriod the graceful killing period, in milliseconds + * @return the exit code if process exit normally, None if the process finally was killed + * forcibly + */ + def terminateProcess(process: java.lang.Process, gracefulPeriod: Long): Option[Int] = { + process.destroy() + if (process.waitFor(gracefulPeriod, TimeUnit.MILLISECONDS)) { + Some(process.exitValue()) + } else { + warn(s"Process does not exit after $gracefulPeriod ms, try to forcibly kill. " + + "Staging files generated by the process may be retained!") + process.destroyForcibly() + None + } + } + } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/ConfigBuilder.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/ConfigBuilder.scala index 62f060a052d..d6de402416d 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/ConfigBuilder.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/ConfigBuilder.scala @@ -18,11 +18,14 @@ package org.apache.kyuubi.config import java.time.Duration +import java.util.Locale import java.util.regex.PatternSyntaxException import scala.util.{Failure, Success, Try} import scala.util.matching.Regex +import org.apache.kyuubi.util.EnumUtils._ + private[kyuubi] case class ConfigBuilder(key: String) { private[config] var _doc = "" @@ -150,7 +153,7 @@ private[kyuubi] case class ConfigBuilder(key: String) { } } - new TypedConfigBuilder(this, regexFromString(_, this.key), _.toString) + TypedConfigBuilder(this, regexFromString(_, this.key), _.toString) } } @@ -166,6 +169,21 @@ private[kyuubi] case class TypedConfigBuilder[T]( def transform(fn: T => T): TypedConfigBuilder[T] = this.copy(fromStr = s => fn(fromStr(s))) + def transformToUpperCase: TypedConfigBuilder[T] = { + transformString(_.toUpperCase(Locale.ROOT)) + } + + def transformToLowerCase: TypedConfigBuilder[T] = { + transformString(_.toLowerCase(Locale.ROOT)) + } + + private def transformString(fn: String => String): TypedConfigBuilder[T] = { + require(parent._type == "string") + this.asInstanceOf[TypedConfigBuilder[String]] + .transform(fn) + .asInstanceOf[TypedConfigBuilder[T]] + } + /** Checks if the user-provided value for the config matches the validator. */ def checkValue(validator: T => Boolean, errMsg: String): TypedConfigBuilder[T] = { transform { v => @@ -187,10 +205,35 @@ private[kyuubi] case class TypedConfigBuilder[T]( } } + /** Checks if the user-provided value for the config matches the value set of the enumeration. */ + def checkValues(enumeration: Enumeration): TypedConfigBuilder[T] = { + transform { v => + val isValid = v match { + case iter: Iterable[Any] => isValidEnums(enumeration, iter) + case name => isValidEnum(enumeration, name) + } + if (!isValid) { + val actualValueStr = v match { + case iter: Iterable[Any] => iter.mkString(",") + case value => value.toString + } + throw new IllegalArgumentException( + s"The value of ${parent.key} should be one of ${enumeration.values.mkString(", ")}," + + s" but was $actualValueStr") + } + v + } + } + /** Turns the config entry into a sequence of values of the underlying type. */ def toSequence(sp: String = ","): TypedConfigBuilder[Seq[T]] = { parent._type = "seq" - TypedConfigBuilder(parent, strToSeq(_, fromStr, sp), seqToStr(_, toStr)) + TypedConfigBuilder(parent, strToSeq(_, fromStr, sp), iterableToStr(_, toStr)) + } + + def toSet(sp: String = ",", skipBlank: Boolean = true): TypedConfigBuilder[Set[T]] = { + parent._type = "set" + TypedConfigBuilder(parent, strToSet(_, fromStr, sp, skipBlank), iterableToStr(_, toStr)) } def createOptional: OptionalConfigEntry[T] = { diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/ConfigHelpers.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/ConfigHelpers.scala index 225f1b53726..525ea2ff4af 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/ConfigHelpers.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/ConfigHelpers.scala @@ -17,6 +17,8 @@ package org.apache.kyuubi.config +import org.apache.commons.lang3.StringUtils + import org.apache.kyuubi.Utils object ConfigHelpers { @@ -25,7 +27,11 @@ object ConfigHelpers { Utils.strToSeq(str, sp).map(converter) } - def seqToStr[T](v: Seq[T], stringConverter: T => String): String = { - v.map(stringConverter).mkString(",") + def strToSet[T](str: String, converter: String => T, sp: String, skipBlank: Boolean): Set[T] = { + Utils.strToSeq(str, sp).filter(!skipBlank || StringUtils.isNotBlank(_)).map(converter).toSet + } + + def iterableToStr[T](v: Iterable[T], stringConverter: T => String, sp: String = ","): String = { + v.map(stringConverter).mkString(sp) } } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala index f61cfeaa756..50006b95ea1 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala @@ -42,7 +42,7 @@ case class KyuubiConf(loadSysDefault: Boolean = true) extends Logging { } if (loadSysDefault) { - val fromSysDefaults = Utils.getSystemProperties.filterKeys(_.startsWith("kyuubi.")) + val fromSysDefaults = Utils.getSystemProperties.filterKeys(_.startsWith("kyuubi.")).toMap loadFromMap(fromSysDefaults) } @@ -103,7 +103,6 @@ case class KyuubiConf(loadSysDefault: Boolean = true) extends Logging { /** unset a parameter from the configuration */ def unset(key: String): KyuubiConf = { - logDeprecationWarning(key) settings.remove(key) this } @@ -135,6 +134,31 @@ case class KyuubiConf(loadSysDefault: Boolean = true) extends Logging { getAllWithPrefix(s"$KYUUBI_BATCH_CONF_PREFIX.$normalizedBatchType", "") } + /** Get the kubernetes conf for specified kubernetes context and namespace. */ + def getKubernetesConf(context: Option[String], namespace: Option[String]): KyuubiConf = { + val conf = this.clone + context.foreach { c => + val contextConf = + getAllWithPrefix(s"$KYUUBI_KUBERNETES_CONF_PREFIX.$c", "").map { case (suffix, value) => + s"$KYUUBI_KUBERNETES_CONF_PREFIX.$suffix" -> value + } + val contextNamespaceConf = namespace.map { ns => + getAllWithPrefix(s"$KYUUBI_KUBERNETES_CONF_PREFIX.$c.$ns", "").map { + case (suffix, value) => + s"$KYUUBI_KUBERNETES_CONF_PREFIX.$suffix" -> value + } + }.getOrElse(Map.empty) + + (contextConf ++ contextNamespaceConf).map { case (key, value) => + conf.set(key, value) + } + conf.set(KUBERNETES_CONTEXT, c) + namespace.foreach(ns => conf.set(KUBERNETES_NAMESPACE, ns)) + conf + } + conf + } + /** * Retrieve key-value pairs from [[KyuubiConf]] starting with `dropped.remainder`, and put them to * the result map with the `dropped` of key being dropped. @@ -189,6 +213,8 @@ case class KyuubiConf(loadSysDefault: Boolean = true) extends Logging { s"and may be removed in the future. $comment") } } + + def isRESTEnabled: Boolean = get(FRONTEND_PROTOCOLS).contains(FrontendProtocols.REST.toString) } /** @@ -206,6 +232,7 @@ object KyuubiConf { final val KYUUBI_HOME = "KYUUBI_HOME" final val KYUUBI_ENGINE_ENV_PREFIX = "kyuubi.engineEnv" final val KYUUBI_BATCH_CONF_PREFIX = "kyuubi.batchConf" + final val KYUUBI_KUBERNETES_CONF_PREFIX = "kyuubi.kubernetes" final val USER_DEFAULTS_CONF_QUOTE = "___" private[this] val kyuubiConfEntriesUpdateLock = new Object @@ -386,12 +413,12 @@ object KyuubiConf { "
    ") .version("1.4.0") .stringConf + .transformToUpperCase .toSequence() - .transform(_.map(_.toUpperCase(Locale.ROOT))) - .checkValue( - _.forall(FrontendProtocols.values.map(_.toString).contains), - s"the frontend protocol should be one or more of ${FrontendProtocols.values.mkString(",")}") - .createWithDefault(Seq(FrontendProtocols.THRIFT_BINARY.toString)) + .checkValues(FrontendProtocols) + .createWithDefault(Seq( + FrontendProtocols.THRIFT_BINARY.toString, + FrontendProtocols.REST.toString)) val FRONTEND_BIND_HOST: OptionalConfigEntry[String] = buildConf("kyuubi.frontend.bind.host") .doc("Hostname or IP of the machine on which to run the frontend services.") @@ -400,6 +427,16 @@ object KyuubiConf { .stringConf .createOptional + val FRONTEND_ADVERTISED_HOST: OptionalConfigEntry[String] = + buildConf("kyuubi.frontend.advertised.host") + .doc("Hostname or IP of the Kyuubi server's frontend services to publish to " + + "external systems such as the service discovery ensemble and metadata store. " + + "Use it when you want to advertise a different hostname or IP than the bind host.") + .version("1.8.0") + .serverOnly + .stringConf + .createOptional + val FRONTEND_THRIFT_BINARY_BIND_HOST: ConfigEntry[Option[String]] = buildConf("kyuubi.frontend.thrift.binary.bind.host") .doc("Hostname or IP of the machine on which to run the thrift frontend service " + @@ -444,13 +481,13 @@ object KyuubiConf { .stringConf .createOptional - val FRONTEND_THRIFT_BINARY_SSL_DISALLOWED_PROTOCOLS: ConfigEntry[Seq[String]] = + val FRONTEND_THRIFT_BINARY_SSL_DISALLOWED_PROTOCOLS: ConfigEntry[Set[String]] = buildConf("kyuubi.frontend.thrift.binary.ssl.disallowed.protocols") .doc("SSL versions to disallow for Kyuubi thrift binary frontend.") .version("1.7.0") .stringConf - .toSequence() - .createWithDefault(Seq("SSLv2", "SSLv3")) + .toSet() + .createWithDefault(Set("SSLv2", "SSLv3")) val FRONTEND_THRIFT_BINARY_SSL_INCLUDE_CIPHER_SUITES: ConfigEntry[Seq[String]] = buildConf("kyuubi.frontend.thrift.binary.ssl.include.ciphersuites") @@ -726,7 +763,7 @@ object KyuubiConf { .stringConf .createWithDefault("X-Real-IP") - val AUTHENTICATION_METHOD: ConfigEntry[Seq[String]] = buildConf("kyuubi.authentication") + val AUTHENTICATION_METHOD: ConfigEntry[Set[String]] = buildConf("kyuubi.authentication") .doc("A comma-separated list of client authentication types." + "
      " + "
    • NOSASL: raw transport.
    • " + @@ -761,12 +798,10 @@ object KyuubiConf { .version("1.0.0") .serverOnly .stringConf - .toSequence() - .transform(_.map(_.toUpperCase(Locale.ROOT))) - .checkValue( - _.forall(AuthTypes.values.map(_.toString).contains), - s"the authentication type should be one or more of ${AuthTypes.values.mkString(",")}") - .createWithDefault(Seq(AuthTypes.NONE.toString)) + .transformToUpperCase + .toSet() + .checkValues(AuthTypes) + .createWithDefault(Set(AuthTypes.NONE.toString)) val AUTHENTICATION_CUSTOM_CLASS: OptionalConfigEntry[String] = buildConf("kyuubi.authentication.custom.class") @@ -822,25 +857,25 @@ object KyuubiConf { .stringConf .createOptional - val AUTHENTICATION_LDAP_GROUP_FILTER: ConfigEntry[Seq[String]] = + val AUTHENTICATION_LDAP_GROUP_FILTER: ConfigEntry[Set[String]] = buildConf("kyuubi.authentication.ldap.groupFilter") .doc("COMMA-separated list of LDAP Group names (short name not full DNs). " + "For example: HiveAdmins,HadoopAdmins,Administrators") .version("1.7.0") .serverOnly .stringConf - .toSequence() - .createWithDefault(Nil) + .toSet() + .createWithDefault(Set.empty) - val AUTHENTICATION_LDAP_USER_FILTER: ConfigEntry[Seq[String]] = + val AUTHENTICATION_LDAP_USER_FILTER: ConfigEntry[Set[String]] = buildConf("kyuubi.authentication.ldap.userFilter") .doc("COMMA-separated list of LDAP usernames (just short names, not full DNs). " + "For example: hiveuser,impalauser,hiveadmin,hadoopadmin") .version("1.7.0") .serverOnly .stringConf - .toSequence() - .createWithDefault(Nil) + .toSet() + .createWithDefault(Set.empty) val AUTHENTICATION_LDAP_GUID_KEY: ConfigEntry[String] = buildConf("kyuubi.authentication.ldap.guidKey") @@ -997,8 +1032,8 @@ object KyuubiConf { .version("1.0.0") .serverOnly .stringConf - .checkValues(SaslQOP.values.map(_.toString)) - .transform(_.toLowerCase(Locale.ROOT)) + .checkValues(SaslQOP) + .transformToLowerCase .createWithDefault(SaslQOP.AUTH.toString) val FRONTEND_REST_BIND_HOST: ConfigEntry[Option[String]] = @@ -1103,6 +1138,15 @@ object KyuubiConf { .stringConf .createOptional + val KUBERNETES_CONTEXT_ALLOW_LIST: ConfigEntry[Set[String]] = + buildConf("kyuubi.kubernetes.context.allow.list") + .doc("The allowed kubernetes context list, if it is empty," + + " there is no kubernetes context limitation.") + .version("1.8.0") + .stringConf + .toSet() + .createWithDefault(Set.empty) + val KUBERNETES_NAMESPACE: ConfigEntry[String] = buildConf("kyuubi.kubernetes.namespace") .doc("The namespace that will be used for running the kyuubi pods and find engines.") @@ -1110,6 +1154,15 @@ object KyuubiConf { .stringConf .createWithDefault("default") + val KUBERNETES_NAMESPACE_ALLOW_LIST: ConfigEntry[Set[String]] = + buildConf("kyuubi.kubernetes.namespace.allow.list") + .doc("The allowed kubernetes namespace list, if it is empty," + + " there is no kubernetes namespace limitation.") + .version("1.8.0") + .stringConf + .toSet() + .createWithDefault(Set.empty) + val KUBERNETES_MASTER: OptionalConfigEntry[String] = buildConf("kyuubi.kubernetes.master.address") .doc("The internal Kubernetes master (API server) address to be used for kyuubi.") @@ -1169,6 +1222,15 @@ object KyuubiConf { .booleanConf .createWithDefault(false) + val KUBERNETES_TERMINATED_APPLICATION_RETAIN_PERIOD: ConfigEntry[Long] = + buildConf("kyuubi.kubernetes.terminatedApplicationRetainPeriod") + .doc("The period for which the Kyuubi server retains application information after " + + "the application terminates.") + .version("1.7.1") + .timeConf + .checkValue(_ > 0, "must be positive number") + .createWithDefault(Duration.ofMinutes(5).toMillis) + // /////////////////////////////////////////////////////////////////////////////////////////////// // SQL Engine Configuration // // /////////////////////////////////////////////////////////////////////////////////////////////// @@ -1226,6 +1288,16 @@ object KyuubiConf { .timeConf .createWithDefault(0) + val ENGINE_SPARK_MAX_INITIAL_WAIT: ConfigEntry[Long] = + buildConf("kyuubi.session.engine.spark.max.initial.wait") + .doc("Max wait time for the initial connection to Spark engine. The engine will" + + " self-terminate no new incoming connection is established within this time." + + " This setting only applies at the CONNECTION share level." + + " 0 or negative means not to self-terminate.") + .version("1.8.0") + .timeConf + .createWithDefault(Duration.ofSeconds(60).toMillis) + val ENGINE_FLINK_MAIN_RESOURCE: OptionalConfigEntry[String] = buildConf("kyuubi.session.engine.flink.main.resource") .doc("The package used to create Flink SQL engine remote job. If it is undefined," + @@ -1243,6 +1315,15 @@ object KyuubiConf { .intConf .createWithDefault(1000000) + val ENGINE_FLINK_FETCH_TIMEOUT: OptionalConfigEntry[Long] = + buildConf("kyuubi.session.engine.flink.fetch.timeout") + .doc("Result fetch timeout for Flink engine. If the timeout is reached, the result " + + "fetch would be stopped and the current fetched would be returned. If no data are " + + "fetched, a TimeoutException would be thrown.") + .version("1.8.0") + .timeConf + .createOptional + val ENGINE_TRINO_MAIN_RESOURCE: OptionalConfigEntry[String] = buildConf("kyuubi.session.engine.trino.main.resource") .doc("The package used to create Trino engine remote job. If it is undefined," + @@ -1265,6 +1346,55 @@ object KyuubiConf { .stringConf .createOptional + val ENGINE_TRINO_CONNECTION_PASSWORD: OptionalConfigEntry[String] = + buildConf("kyuubi.engine.trino.connection.password") + .doc("The password used for connecting to trino cluster") + .version("1.8.0") + .stringConf + .createOptional + + val ENGINE_TRINO_CONNECTION_KEYSTORE_PATH: OptionalConfigEntry[String] = + buildConf("kyuubi.engine.trino.connection.keystore.path") + .doc("The keystore path used for connecting to trino cluster") + .version("1.8.0") + .stringConf + .createOptional + + val ENGINE_TRINO_CONNECTION_KEYSTORE_PASSWORD: OptionalConfigEntry[String] = + buildConf("kyuubi.engine.trino.connection.keystore.password") + .doc("The keystore password used for connecting to trino cluster") + .version("1.8.0") + .stringConf + .createOptional + + val ENGINE_TRINO_CONNECTION_KEYSTORE_TYPE: OptionalConfigEntry[String] = + buildConf("kyuubi.engine.trino.connection.keystore.type") + .doc("The keystore type used for connecting to trino cluster") + .version("1.8.0") + .stringConf + .createOptional + + val ENGINE_TRINO_CONNECTION_TRUSTSTORE_PATH: OptionalConfigEntry[String] = + buildConf("kyuubi.engine.trino.connection.truststore.path") + .doc("The truststore path used for connecting to trino cluster") + .version("1.8.0") + .stringConf + .createOptional + + val ENGINE_TRINO_CONNECTION_TRUSTSTORE_PASSWORD: OptionalConfigEntry[String] = + buildConf("kyuubi.engine.trino.connection.truststore.password") + .doc("The truststore password used for connecting to trino cluster") + .version("1.8.0") + .stringConf + .createOptional + + val ENGINE_TRINO_CONNECTION_TRUSTSTORE_TYPE: OptionalConfigEntry[String] = + buildConf("kyuubi.engine.trino.connection.truststore.type") + .doc("The truststore type used for connecting to trino cluster") + .version("1.8.0") + .stringConf + .createOptional + val ENGINE_TRINO_SHOW_PROGRESS: ConfigEntry[Boolean] = buildConf("kyuubi.session.engine.trino.showProgress") .doc("When true, show the progress bar and final info in the Trino engine log.") @@ -1293,6 +1423,14 @@ object KyuubiConf { .timeConf .createWithDefault(Duration.ofSeconds(15).toMillis) + val ENGINE_ALIVE_MAX_FAILURES: ConfigEntry[Int] = + buildConf("kyuubi.session.engine.alive.max.failures") + .doc("The maximum number of failures allowed for the engine.") + .version("1.8.0") + .intConf + .checkValue(_ > 0, "Must be positive") + .createWithDefault(3) + val ENGINE_ALIVE_PROBE_ENABLED: ConfigEntry[Boolean] = buildConf("kyuubi.session.engine.alive.probe.enabled") .doc("Whether to enable the engine alive probe, it true, we will create a companion thrift" + @@ -1356,6 +1494,14 @@ object KyuubiConf { .version("1.2.0") .fallbackConf(SESSION_TIMEOUT) + val SESSION_CLOSE_ON_DISCONNECT: ConfigEntry[Boolean] = + buildConf("kyuubi.session.close.on.disconnect") + .doc("Session will be closed when client disconnects from kyuubi gateway. " + + "Set this to false to have session outlive its parent connection.") + .version("1.8.0") + .booleanConf + .createWithDefault(true) + val BATCH_SESSION_IDLE_TIMEOUT: ConfigEntry[Long] = buildConf("kyuubi.batch.session.idle.timeout") .doc("Batch session idle timeout, it will be closed when it's not accessed for this duration") .version("1.6.2") @@ -1375,7 +1521,7 @@ object KyuubiConf { .timeConf .createWithDefault(Duration.ofMinutes(30L).toMillis) - val SESSION_CONF_IGNORE_LIST: ConfigEntry[Seq[String]] = + val SESSION_CONF_IGNORE_LIST: ConfigEntry[Set[String]] = buildConf("kyuubi.session.conf.ignore.list") .doc("A comma-separated list of ignored keys. If the client connection contains any of" + " them, the key and the corresponding value will be removed silently during engine" + @@ -1385,10 +1531,10 @@ object KyuubiConf { " configurations via SET syntax.") .version("1.2.0") .stringConf - .toSequence() - .createWithDefault(Nil) + .toSet() + .createWithDefault(Set.empty) - val SESSION_CONF_RESTRICT_LIST: ConfigEntry[Seq[String]] = + val SESSION_CONF_RESTRICT_LIST: ConfigEntry[Set[String]] = buildConf("kyuubi.session.conf.restrict.list") .doc("A comma-separated list of restricted keys. If the client connection contains any of" + " them, the connection will be rejected explicitly during engine bootstrap and connection" + @@ -1398,8 +1544,8 @@ object KyuubiConf { " configurations via SET syntax.") .version("1.2.0") .stringConf - .toSequence() - .createWithDefault(Nil) + .toSet() + .createWithDefault(Set.empty) val SESSION_USER_SIGN_ENABLED: ConfigEntry[Boolean] = buildConf("kyuubi.session.user.sign.enabled") @@ -1429,6 +1575,15 @@ object KyuubiConf { .booleanConf .createWithDefault(true) + val SESSION_ENGINE_STARTUP_DESTROY_TIMEOUT: ConfigEntry[Long] = + buildConf("kyuubi.session.engine.startup.destroy.timeout") + .doc("Engine startup process destroy wait time, if the process does not " + + "stop after this time, force destroy instead. This configuration only " + + s"takes effect when `${SESSION_ENGINE_STARTUP_WAIT_COMPLETION.key}=false`.") + .version("1.8.0") + .timeConf + .createWithDefault(Duration.ofSeconds(5).toMillis) + val SESSION_ENGINE_LAUNCH_ASYNC: ConfigEntry[Boolean] = buildConf("kyuubi.session.engine.launch.async") .doc("When opening kyuubi session, whether to launch the backend engine asynchronously." + @@ -1438,7 +1593,7 @@ object KyuubiConf { .booleanConf .createWithDefault(true) - val SESSION_LOCAL_DIR_ALLOW_LIST: ConfigEntry[Seq[String]] = + val SESSION_LOCAL_DIR_ALLOW_LIST: ConfigEntry[Set[String]] = buildConf("kyuubi.session.local.dir.allow.list") .doc("The local dir list that are allowed to access by the kyuubi session application. " + " End-users might set some parameters such as `spark.files` and it will " + @@ -1451,8 +1606,8 @@ object KyuubiConf { .stringConf .checkValue(dir => dir.startsWith(File.separator), "the dir should be absolute path") .transform(dir => dir.stripSuffix(File.separator) + File.separator) - .toSequence() - .createWithDefault(Nil) + .toSet() + .createWithDefault(Set.empty) val BATCH_APPLICATION_CHECK_INTERVAL: ConfigEntry[Long] = buildConf("kyuubi.batch.application.check.interval") @@ -1468,7 +1623,7 @@ object KyuubiConf { .timeConf .createWithDefault(Duration.ofMinutes(3).toMillis) - val BATCH_CONF_IGNORE_LIST: ConfigEntry[Seq[String]] = + val BATCH_CONF_IGNORE_LIST: ConfigEntry[Set[String]] = buildConf("kyuubi.batch.conf.ignore.list") .doc("A comma-separated list of ignored keys for batch conf. If the batch conf contains" + " any of them, the key and the corresponding value will be removed silently during batch" + @@ -1480,8 +1635,8 @@ object KyuubiConf { " for the Spark batch job with key `kyuubi.batchConf.spark.spark.master`.") .version("1.6.0") .stringConf - .toSequence() - .createWithDefault(Nil) + .toSet() + .createWithDefault(Set.empty) val BATCH_INTERNAL_REST_CLIENT_SOCKET_TIMEOUT: ConfigEntry[Long] = buildConf("kyuubi.batch.internal.rest.client.socket.timeout") @@ -1511,6 +1666,50 @@ object KyuubiConf { .timeConf .createWithDefault(Duration.ofSeconds(5).toMillis) + val BATCH_RESOURCE_UPLOAD_ENABLED: ConfigEntry[Boolean] = + buildConf("kyuubi.batch.resource.upload.enabled") + .internal + .doc("Whether to enable Kyuubi batch resource upload function.") + .version("1.7.1") + .booleanConf + .createWithDefault(true) + + val BATCH_SUBMITTER_ENABLED: ConfigEntry[Boolean] = + buildConf("kyuubi.batch.submitter.enabled") + .internal + .serverOnly + .doc("Batch API v2 requires batch submitter to pick the INITIALIZED batch job " + + "from metastore and submits it to Resource Manager. " + + "Note: Batch API v2 is experimental and under rapid development, this configuration " + + "is added to allow explorers conveniently testing the developing Batch v2 API, not " + + "intended exposing to end users, it may be removed in anytime.") + .version("1.8.0") + .booleanConf + .createWithDefault(false) + + val BATCH_SUBMITTER_THREADS: ConfigEntry[Int] = + buildConf("kyuubi.batch.submitter.threads") + .internal + .serverOnly + .doc("Number of threads in batch job submitter, this configuration only take effects " + + s"when ${BATCH_SUBMITTER_ENABLED.key} is enabled") + .version("1.8.0") + .intConf + .createWithDefault(16) + + val BATCH_IMPL_VERSION: ConfigEntry[String] = + buildConf("kyuubi.batch.impl.version") + .internal + .serverOnly + .doc("Batch API version, candidates: 1, 2. Only take effect when " + + s"${BATCH_SUBMITTER_ENABLED.key} is true, otherwise always use v1 implementation. " + + "Note: Batch API v2 is experimental and under rapid development, this configuration " + + "is added to allow explorers conveniently testing the developing Batch v2 API, not " + + "intended exposing to end users, it may be removed in anytime.") + .version("1.8.0") + .stringConf + .createWithDefault("1") + val SERVER_EXEC_POOL_SIZE: ConfigEntry[Int] = buildConf("kyuubi.backend.server.exec.pool.size") .doc("Number of threads in the operation execution thread pool of Kyuubi server") @@ -1705,7 +1904,7 @@ object KyuubiConf { .version("1.7.0") .stringConf .checkValues(Set("arrow", "thrift")) - .transform(_.toLowerCase(Locale.ROOT)) + .transformToLowerCase .createWithDefault("thrift") val ARROW_BASED_ROWSET_TIMESTAMP_AS_STRING: ConfigEntry[Boolean] = @@ -1730,8 +1929,8 @@ object KyuubiConf { .doc(s"(deprecated) - Using kyuubi.engine.share.level instead") .version("1.0.0") .stringConf - .transform(_.toUpperCase(Locale.ROOT)) - .checkValues(ShareLevel.values.map(_.toString)) + .transformToUpperCase + .checkValues(ShareLevel) .createWithDefault(ShareLevel.USER.toString) // [ZooKeeper Data Model] @@ -1745,7 +1944,7 @@ object KyuubiConf { .doc("(deprecated) - Using kyuubi.engine.share.level.subdomain instead") .version("1.2.0") .stringConf - .transform(_.toLowerCase(Locale.ROOT)) + .transformToLowerCase .checkValue(validZookeeperSubPath.matcher(_).matches(), "must be valid zookeeper sub path.") .createOptional @@ -1807,11 +2006,12 @@ object KyuubiConf { " all the capacity of the Hive Server2." + "
    • JDBC: specify this engine type will launch a JDBC engine which can provide" + " a MySQL protocol connector, for now we only support Doris dialect.
    • " + + "
    • CHAT: specify this engine type will launch a Chat engine.
    • " + "
    ") .version("1.4.0") .stringConf - .transform(_.toUpperCase(Locale.ROOT)) - .checkValues(EngineType.values.map(_.toString)) + .transformToUpperCase + .checkValues(EngineType) .createWithDefault(EngineType.SPARK_SQL.toString) val ENGINE_POOL_IGNORE_SUBDOMAIN: ConfigEntry[Boolean] = @@ -1834,6 +2034,7 @@ object KyuubiConf { .doc("This parameter is introduced as a server-side parameter " + "controlling the upper limit of the engine pool.") .version("1.4.0") + .serverOnly .intConf .checkValue(s => s > 0 && s < 33, "Invalid engine pool threshold, it should be in [1, 32]") .createWithDefault(9) @@ -1856,7 +2057,7 @@ object KyuubiConf { "
") .version("1.7.0") .stringConf - .transform(_.toUpperCase(Locale.ROOT)) + .transformToUpperCase .checkValues(Set("RANDOM", "POLLING")) .createWithDefault("RANDOM") @@ -1880,24 +2081,24 @@ object KyuubiConf { .toSequence(";") .createWithDefault(Nil) - val ENGINE_DEREGISTER_EXCEPTION_CLASSES: ConfigEntry[Seq[String]] = + val ENGINE_DEREGISTER_EXCEPTION_CLASSES: ConfigEntry[Set[String]] = buildConf("kyuubi.engine.deregister.exception.classes") .doc("A comma-separated list of exception classes. If there is any exception thrown," + " whose class matches the specified classes, the engine would deregister itself.") .version("1.2.0") .stringConf - .toSequence() - .createWithDefault(Nil) + .toSet() + .createWithDefault(Set.empty) - val ENGINE_DEREGISTER_EXCEPTION_MESSAGES: ConfigEntry[Seq[String]] = + val ENGINE_DEREGISTER_EXCEPTION_MESSAGES: ConfigEntry[Set[String]] = buildConf("kyuubi.engine.deregister.exception.messages") .doc("A comma-separated list of exception messages. If there is any exception thrown," + " whose message or stacktrace matches the specified message list, the engine would" + " deregister itself.") .version("1.2.0") .stringConf - .toSequence() - .createWithDefault(Nil) + .toSet() + .createWithDefault(Set.empty) val ENGINE_DEREGISTER_JOB_MAX_FAILURES: ConfigEntry[Int] = buildConf("kyuubi.engine.deregister.job.max.failures") @@ -1979,12 +2180,34 @@ object KyuubiConf { .stringConf .createWithDefault("file:///tmp/kyuubi/events") + val SERVER_EVENT_KAFKA_TOPIC: OptionalConfigEntry[String] = + buildConf("kyuubi.backend.server.event.kafka.topic") + .doc("The topic of server events go for the built-in Kafka logger") + .version("1.8.0") + .serverOnly + .stringConf + .createOptional + + val SERVER_EVENT_KAFKA_CLOSE_TIMEOUT: ConfigEntry[Long] = + buildConf("kyuubi.backend.server.event.kafka.close.timeout") + .doc("Period to wait for Kafka producer of server event handlers to close.") + .version("1.8.0") + .serverOnly + .timeConf + .createWithDefault(Duration.ofMillis(5000).toMillis) + val SERVER_EVENT_LOGGERS: ConfigEntry[Seq[String]] = buildConf("kyuubi.backend.server.event.loggers") .doc("A comma-separated list of server history loggers, where session/operation etc" + " events go.
    " + s"
  • JSON: the events will be written to the location of" + s" ${SERVER_EVENT_JSON_LOG_PATH.key}
  • " + + s"
  • KAFKA: the events will be serialized in JSON format" + + s" and sent to topic of `${SERVER_EVENT_KAFKA_TOPIC.key}`." + + s" Note: For the configs of Kafka producer," + + s" please specify them with the prefix: `kyuubi.backend.server.event.kafka.`." + + s" For example, `kyuubi.backend.server.event.kafka.bootstrap.servers=127.0.0.1:9092`" + + s"
  • " + s"
  • JDBC: to be done
  • " + s"
  • CUSTOM: User-defined event handlers.
" + " Note that: Kyuubi supports custom event handlers with the Java SPI." + @@ -1995,9 +2218,11 @@ object KyuubiConf { .version("1.4.0") .serverOnly .stringConf - .transform(_.toUpperCase(Locale.ROOT)) + .transformToUpperCase .toSequence() - .checkValue(_.toSet.subsetOf(Set("JSON", "JDBC", "CUSTOM")), "Unsupported event loggers") + .checkValue( + _.toSet.subsetOf(Set("JSON", "JDBC", "CUSTOM", "KAFKA")), + "Unsupported event loggers") .createWithDefault(Nil) @deprecated("using kyuubi.engine.spark.event.loggers instead", "1.6.0") @@ -2017,7 +2242,7 @@ object KyuubiConf { " which has a zero-arg constructor.") .version("1.3.0") .stringConf - .transform(_.toUpperCase(Locale.ROOT)) + .transformToUpperCase .toSequence() .checkValue( _.toSet.subsetOf(Set("SPARK", "JSON", "JDBC", "CUSTOM")), @@ -2143,14 +2368,14 @@ object KyuubiConf { val OPERATION_PLAN_ONLY_MODE: ConfigEntry[String] = buildConf("kyuubi.operation.plan.only.mode") .doc("Configures the statement performed mode, The value can be 'parse', 'analyze', " + - "'optimize', 'optimize_with_stats', 'physical', 'execution', or 'none', " + + "'optimize', 'optimize_with_stats', 'physical', 'execution', 'lineage' or 'none', " + "when it is 'none', indicate to the statement will be fully executed, otherwise " + "only way without executing the query. different engines currently support different " + "modes, the Spark engine supports all modes, and the Flink engine supports 'parse', " + "'physical', and 'execution', other engines do not support planOnly currently.") .version("1.4.0") .stringConf - .transform(_.toUpperCase(Locale.ROOT)) + .transformToUpperCase .checkValue( mode => Set( @@ -2160,10 +2385,11 @@ object KyuubiConf { "OPTIMIZE_WITH_STATS", "PHYSICAL", "EXECUTION", + "LINEAGE", "NONE").contains(mode), "Invalid value for 'kyuubi.operation.plan.only.mode'. Valid values are" + "'parse', 'analyze', 'optimize', 'optimize_with_stats', 'physical', 'execution' and " + - "'none'.") + "'lineage', 'none'.") .createWithDefault(NoneMode.name) val OPERATION_PLAN_ONLY_OUT_STYLE: ConfigEntry[String] = @@ -2173,14 +2399,11 @@ object KyuubiConf { "of the Spark engine") .version("1.7.0") .stringConf - .transform(_.toUpperCase(Locale.ROOT)) - .checkValue( - mode => Set("PLAIN", "JSON").contains(mode), - "Invalid value for 'kyuubi.operation.plan.only.output.style'. Valid values are " + - "'plain', 'json'.") + .transformToUpperCase + .checkValues(Set("PLAIN", "JSON")) .createWithDefault(PlainStyle.name) - val OPERATION_PLAN_ONLY_EXCLUDES: ConfigEntry[Seq[String]] = + val OPERATION_PLAN_ONLY_EXCLUDES: ConfigEntry[Set[String]] = buildConf("kyuubi.operation.plan.only.excludes") .doc("Comma-separated list of query plan names, in the form of simple class names, i.e, " + "for `SET abc=xyz`, the value will be `SetCommand`. For those auxiliary plans, such as " + @@ -2190,14 +2413,21 @@ object KyuubiConf { s"See also ${OPERATION_PLAN_ONLY_MODE.key}.") .version("1.5.0") .stringConf - .toSequence() - .createWithDefault(Seq( + .toSet() + .createWithDefault(Set( "ResetCommand", "SetCommand", "SetNamespaceCommand", "UseStatement", "SetCatalogAndNamespace")) + val LINEAGE_PARSER_PLUGIN_PROVIDER: ConfigEntry[String] = + buildConf("kyuubi.lineage.parser.plugin.provider") + .doc("The provider for the Spark lineage parser plugin.") + .version("1.8.0") + .stringConf + .createWithDefault("org.apache.kyuubi.plugin.lineage.LineageParserProvider") + object OperationLanguages extends Enumeration with Logging { type OperationLanguage = Value val PYTHON, SQL, SCALA, UNKNOWN = Value @@ -2224,8 +2454,8 @@ object KyuubiConf { "") .version("1.5.0") .stringConf - .transform(_.toUpperCase(Locale.ROOT)) - .checkValues(OperationLanguages.values.map(_.toString)) + .transformToUpperCase + .checkValues(OperationLanguages) .createWithDefault(OperationLanguages.SQL.toString) val SESSION_CONF_ADVISOR: OptionalConfigEntry[String] = @@ -2339,14 +2569,14 @@ object KyuubiConf { val ENGINE_FLINK_MEMORY: ConfigEntry[String] = buildConf("kyuubi.engine.flink.memory") - .doc("The heap memory for the Flink SQL engine") + .doc("The heap memory for the Flink SQL engine. Only effective in yarn session mode.") .version("1.6.0") .stringConf .createWithDefault("1g") val ENGINE_FLINK_JAVA_OPTIONS: OptionalConfigEntry[String] = buildConf("kyuubi.engine.flink.java.options") - .doc("The extra Java options for the Flink SQL engine") + .doc("The extra Java options for the Flink SQL engine. Only effective in yarn session mode.") .version("1.6.0") .stringConf .createOptional @@ -2354,11 +2584,19 @@ object KyuubiConf { val ENGINE_FLINK_EXTRA_CLASSPATH: OptionalConfigEntry[String] = buildConf("kyuubi.engine.flink.extra.classpath") .doc("The extra classpath for the Flink SQL engine, for configuring the location" + - " of hadoop client jars, etc") + " of hadoop client jars, etc. Only effective in yarn session mode.") .version("1.6.0") .stringConf .createOptional + val ENGINE_FLINK_APPLICATION_JARS: OptionalConfigEntry[String] = + buildConf("kyuubi.engine.flink.application.jars") + .doc("A comma-separated list of the local jars to be shipped with the job to the cluster. " + + "For example, SQL UDF jars. Only effective in yarn application mode.") + .version("1.8.0") + .stringConf + .createOptional + val SERVER_LIMIT_CONNECTIONS_PER_USER: OptionalConfigEntry[Int] = buildConf("kyuubi.server.limit.connections.per.user") .doc("Maximum kyuubi server connections per user." + @@ -2386,14 +2624,25 @@ object KyuubiConf { .intConf .createOptional - val SERVER_LIMIT_CONNECTIONS_USER_UNLIMITED_LIST: ConfigEntry[Seq[String]] = + val SERVER_LIMIT_CONNECTIONS_USER_UNLIMITED_LIST: ConfigEntry[Set[String]] = buildConf("kyuubi.server.limit.connections.user.unlimited.list") .doc("The maximum connections of the user in the white list will not be limited.") .version("1.7.0") .serverOnly .stringConf - .toSequence() - .createWithDefault(Nil) + .toSet() + .createWithDefault(Set.empty) + + val SERVER_LIMIT_CONNECTIONS_USER_DENY_LIST: ConfigEntry[Set[String]] = + buildConf("kyuubi.server.limit.connections.user.deny.list") + .doc("The user in the deny list will be denied to connect to kyuubi server, " + + "if the user has configured both user.unlimited.list and user.deny.list, " + + "the priority of the latter is higher.") + .version("1.8.0") + .serverOnly + .stringConf + .toSet() + .createWithDefault(Set.empty) val SERVER_LIMIT_BATCH_CONNECTIONS_PER_USER: OptionalConfigEntry[Int] = buildConf("kyuubi.server.limit.batch.connections.per.user") @@ -2455,15 +2704,15 @@ object KyuubiConf { .timeConf .createWithDefaultString("PT30M") - val SERVER_ADMINISTRATORS: ConfigEntry[Seq[String]] = + val SERVER_ADMINISTRATORS: ConfigEntry[Set[String]] = buildConf("kyuubi.server.administrators") .doc("Comma-separated list of Kyuubi service administrators. " + "We use this config to grant admin permission to any service accounts.") .version("1.8.0") .serverOnly .stringConf - .toSequence() - .createWithDefault(Nil) + .toSet() + .createWithDefault(Set.empty) val OPERATION_SPARK_LISTENER_ENABLED: ConfigEntry[Boolean] = buildConf("kyuubi.operation.spark.listener.enabled") @@ -2487,6 +2736,13 @@ object KyuubiConf { .stringConf .createOptional + val ENGINE_JDBC_CONNECTION_PROPAGATECREDENTIAL: ConfigEntry[Boolean] = + buildConf("kyuubi.engine.jdbc.connection.propagateCredential") + .doc("Whether to use the session's user and password to connect to database") + .version("1.8.0") + .booleanConf + .createWithDefault(false) + val ENGINE_JDBC_CONNECTION_USER: OptionalConfigEntry[String] = buildConf("kyuubi.engine.jdbc.connection.user") .doc("The user is used for connecting to server") @@ -2523,6 +2779,24 @@ object KyuubiConf { .stringConf .createOptional + val ENGINE_JDBC_INITIALIZE_SQL: ConfigEntry[Seq[String]] = + buildConf("kyuubi.engine.jdbc.initialize.sql") + .doc("SemiColon-separated list of SQL statements to be initialized in the newly created " + + "engine before queries. i.e. use `SELECT 1` to eagerly active JDBCClient.") + .version("1.8.0") + .stringConf + .toSequence(";") + .createWithDefaultString("SELECT 1") + + val ENGINE_JDBC_SESSION_INITIALIZE_SQL: ConfigEntry[Seq[String]] = + buildConf("kyuubi.engine.jdbc.session.initialize.sql") + .doc("SemiColon-separated list of SQL statements to be initialized in the newly created " + + "engine session before queries.") + .version("1.8.0") + .stringConf + .toSequence(";") + .createWithDefault(Nil) + val ENGINE_OPERATION_CONVERT_CATALOG_DATABASE_ENABLED: ConfigEntry[Boolean] = buildConf("kyuubi.engine.operation.convert.catalog.database.enabled") .doc("When set to true, The engine converts the JDBC methods of set/get Catalog " + @@ -2531,6 +2805,53 @@ object KyuubiConf { .booleanConf .createWithDefault(true) + val ENGINE_SUBMIT_TIMEOUT: ConfigEntry[Long] = + buildConf("kyuubi.engine.submit.timeout") + .doc("Period to tolerant Driver Pod ephemerally invisible after submitting. " + + "In some Resource Managers, e.g. K8s, the Driver Pod is not visible immediately " + + "after `spark-submit` is returned.") + .version("1.7.1") + .timeConf + .createWithDefaultString("PT30S") + + val ENGINE_KUBERNETES_SUBMIT_TIMEOUT: ConfigEntry[Long] = + buildConf("kyuubi.engine.kubernetes.submit.timeout") + .doc("The engine submit timeout for Kubernetes application.") + .version("1.7.2") + .fallbackConf(ENGINE_SUBMIT_TIMEOUT) + + val ENGINE_YARN_SUBMIT_TIMEOUT: ConfigEntry[Long] = + buildConf("kyuubi.engine.yarn.submit.timeout") + .doc("The engine submit timeout for YARN application.") + .version("1.7.2") + .fallbackConf(ENGINE_SUBMIT_TIMEOUT) + + object YarnUserStrategy extends Enumeration { + type YarnUserStrategy = Value + val NONE, ADMIN, OWNER = Value + } + + val YARN_USER_STRATEGY: ConfigEntry[String] = + buildConf("kyuubi.yarn.user.strategy") + .doc("Determine which user to use to construct YARN client for application management, " + + "e.g. kill application. Options:
    " + + "
  • NONE: use Kyuubi server user.
  • " + + "
  • ADMIN: use admin user configured in `kyuubi.yarn.user.admin`.
  • " + + "
  • OWNER: use session user, typically is application owner.
  • " + + "
") + .version("1.8.0") + .stringConf + .checkValues(YarnUserStrategy) + .createWithDefault("NONE") + + val YARN_USER_ADMIN: ConfigEntry[String] = + buildConf("kyuubi.yarn.user.admin") + .doc(s"When ${YARN_USER_STRATEGY.key} is set to ADMIN, use this admin user to " + + "construct YARN client for application management, e.g. kill application.") + .version("1.8.0") + .stringConf + .createWithDefault("yarn") + /** * Holds information about keys that have been deprecated. * @@ -2602,6 +2923,84 @@ object KyuubiConf { Map(configs.map { cfg => cfg.key -> cfg }: _*) } + val ENGINE_CHAT_MEMORY: ConfigEntry[String] = + buildConf("kyuubi.engine.chat.memory") + .doc("The heap memory for the Chat engine") + .version("1.8.0") + .stringConf + .createWithDefault("1g") + + val ENGINE_CHAT_JAVA_OPTIONS: OptionalConfigEntry[String] = + buildConf("kyuubi.engine.chat.java.options") + .doc("The extra Java options for the Chat engine") + .version("1.8.0") + .stringConf + .createOptional + + val ENGINE_CHAT_PROVIDER: ConfigEntry[String] = + buildConf("kyuubi.engine.chat.provider") + .doc("The provider for the Chat engine. Candidates:
    " + + "
  • ECHO: simply replies a welcome message.
  • " + + "
  • GPT: a.k.a ChatGPT, powered by OpenAI.
  • " + + "
") + .version("1.8.0") + .stringConf + .transform { + case "ECHO" | "echo" => "org.apache.kyuubi.engine.chat.provider.EchoProvider" + case "GPT" | "gpt" | "ChatGPT" => "org.apache.kyuubi.engine.chat.provider.ChatGPTProvider" + case other => other + } + .createWithDefault("ECHO") + + val ENGINE_CHAT_GPT_API_KEY: OptionalConfigEntry[String] = + buildConf("kyuubi.engine.chat.gpt.apiKey") + .doc("The key to access OpenAI open API, which could be got at " + + "https://platform.openai.com/account/api-keys") + .version("1.8.0") + .stringConf + .createOptional + + val ENGINE_CHAT_GPT_MODEL: ConfigEntry[String] = + buildConf("kyuubi.engine.chat.gpt.model") + .doc("ID of the model used in ChatGPT. Available models refer to OpenAI's " + + "[Model overview](https://platform.openai.com/docs/models/overview).") + .version("1.8.0") + .stringConf + .createWithDefault("gpt-3.5-turbo") + + val ENGINE_CHAT_EXTRA_CLASSPATH: OptionalConfigEntry[String] = + buildConf("kyuubi.engine.chat.extra.classpath") + .doc("The extra classpath for the Chat engine, for configuring the location " + + "of the SDK and etc.") + .version("1.8.0") + .stringConf + .createOptional + + val ENGINE_CHAT_GPT_HTTP_PROXY: OptionalConfigEntry[String] = + buildConf("kyuubi.engine.chat.gpt.http.proxy") + .doc("HTTP proxy url for API calling in Chat GPT engine. e.g. http://127.0.0.1:1087") + .version("1.8.0") + .stringConf + .createOptional + + val ENGINE_CHAT_GPT_HTTP_CONNECT_TIMEOUT: ConfigEntry[Long] = + buildConf("kyuubi.engine.chat.gpt.http.connect.timeout") + .doc("The timeout[ms] for establishing the connection with the Chat GPT server. " + + "A timeout value of zero is interpreted as an infinite timeout.") + .version("1.8.0") + .timeConf + .checkValue(_ >= 0, "must be 0 or positive number") + .createWithDefault(Duration.ofSeconds(120).toMillis) + + val ENGINE_CHAT_GPT_HTTP_SOCKET_TIMEOUT: ConfigEntry[Long] = + buildConf("kyuubi.engine.chat.gpt.http.socket.timeout") + .doc("The timeout[ms] for waiting for data packets after Chat GPT server " + + "connection is established. A timeout value of zero is interpreted as an infinite timeout.") + .version("1.8.0") + .timeConf + .checkValue(_ >= 0, "must be 0 or positive number") + .createWithDefault(Duration.ofSeconds(120).toMillis) + val ENGINE_JDBC_MEMORY: ConfigEntry[String] = buildConf("kyuubi.engine.jdbc.memory") .doc("The heap memory for the JDBC query engine") @@ -2658,6 +3057,15 @@ object KyuubiConf { .stringConf .createWithDefault("bin/python") + val ENGINE_SPARK_REGISTER_ATTRIBUTES: ConfigEntry[Seq[String]] = + buildConf("kyuubi.engine.spark.register.attributes") + .internal + .doc("The extra attributes to expose when registering for Spark engine.") + .version("1.8.0") + .stringConf + .toSequence() + .createWithDefault(Seq("spark.driver.memory", "spark.executor.memory")) + val ENGINE_HIVE_EVENT_LOGGERS: ConfigEntry[Seq[String]] = buildConf("kyuubi.engine.hive.event.loggers") .doc("A comma-separated list of engine history loggers, where engine/session/operation etc" + @@ -2668,7 +3076,7 @@ object KyuubiConf { "
  • CUSTOM: to be done.
  • ") .version("1.7.0") .stringConf - .transform(_.toUpperCase(Locale.ROOT)) + .transformToUpperCase .toSequence() .checkValue( _.toSet.subsetOf(Set("JSON", "JDBC", "CUSTOM")), @@ -2685,7 +3093,7 @@ object KyuubiConf { "
  • CUSTOM: to be done.
  • ") .version("1.7.0") .stringConf - .transform(_.toUpperCase(Locale.ROOT)) + .transformToUpperCase .toSequence() .checkValue( _.toSet.subsetOf(Set("JSON", "JDBC", "CUSTOM")), @@ -2713,4 +3121,23 @@ object KyuubiConf { .version("1.7.0") .timeConf .createWithDefault(Duration.ofSeconds(60).toMillis) + + val OPERATION_GET_TABLES_IGNORE_TABLE_PROPERTIES: ConfigEntry[Boolean] = + buildConf("kyuubi.operation.getTables.ignoreTableProperties") + .doc("Speed up the `GetTables` operation by returning table identities only.") + .version("1.8.0") + .booleanConf + .createWithDefault(false) + + val SERVER_LIMIT_ENGINE_CREATION: OptionalConfigEntry[Int] = + buildConf("kyuubi.server.limit.engine.startup") + .internal + .doc("The maximum engine startup concurrency of kyuubi server. Highly concurrent engine" + + " startup processes may lead to high load on the kyuubi server machine," + + " this configuration is used to limit the number of engine startup processes" + + " running at the same time to avoid it.") + .version("1.8.0") + .serverOnly + .intConf + .createOptional } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiReservedKeys.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiReservedKeys.scala index 8b42e659f82..eb209caec99 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiReservedKeys.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiReservedKeys.scala @@ -38,6 +38,8 @@ object KyuubiReservedKeys { "kyuubi.session.engine.launch.handle.guid" final val KYUUBI_SESSION_ENGINE_LAUNCH_HANDLE_SECRET = "kyuubi.session.engine.launch.handle.secret" + final val KYUUBI_SESSION_ENGINE_LAUNCH_SUPPORT_RESULT = + "kyuubi.session.engine.launch.support.result" final val KYUUBI_OPERATION_SET_CURRENT_CATALOG = "kyuubi.operation.set.current.catalog" final val KYUUBI_OPERATION_GET_CURRENT_CATALOG = "kyuubi.operation.get.current.catalog" final val KYUUBI_OPERATION_SET_CURRENT_DATABASE = "kyuubi.operation.set.current.database" diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/engine/EngineType.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/engine/EngineType.scala index 88680a8c757..3d850ba14f5 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/engine/EngineType.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/engine/EngineType.scala @@ -23,5 +23,5 @@ package org.apache.kyuubi.engine object EngineType extends Enumeration { type EngineType = Value - val SPARK_SQL, FLINK_SQL, TRINO, HIVE_SQL, JDBC = Value + val SPARK_SQL, FLINK_SQL, CHAT, TRINO, HIVE_SQL, JDBC = Value } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/AbstractOperation.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/AbstractOperation.scala index d50cb8e243f..0a185b94266 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/AbstractOperation.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/AbstractOperation.scala @@ -18,13 +18,14 @@ package org.apache.kyuubi.operation import java.util.concurrent.{Future, ScheduledExecutorService, TimeUnit} +import java.util.concurrent.locks.ReentrantLock import scala.collection.JavaConverters._ import org.apache.commons.lang3.StringUtils -import org.apache.hive.service.rpc.thrift.{TGetResultSetMetadataResp, TProgressUpdateResp, TProtocolVersion, TRowSet, TStatus, TStatusCode} +import org.apache.hive.service.rpc.thrift.{TFetchResultsResp, TGetResultSetMetadataResp, TProgressUpdateResp, TProtocolVersion, TStatus, TStatusCode} -import org.apache.kyuubi.{KyuubiSQLException, Logging} +import org.apache.kyuubi.{KyuubiSQLException, Logging, Utils} import org.apache.kyuubi.config.KyuubiConf.OPERATION_IDLE_TIMEOUT import org.apache.kyuubi.operation.FetchOrientation.FetchOrientation import org.apache.kyuubi.operation.OperationState._ @@ -45,7 +46,11 @@ abstract class AbstractOperation(session: Session) extends Operation with Loggin private var statementTimeoutCleaner: Option[ScheduledExecutorService] = None - protected def cleanup(targetState: OperationState): Unit = state.synchronized { + private val lock: ReentrantLock = new ReentrantLock() + + protected def withLockRequired[T](block: => T): T = Utils.withLockRequired(lock)(block) + + protected def cleanup(targetState: OperationState): Unit = withLockRequired { if (!isTerminalState(state)) { setState(targetState) Option(getBackgroundHandle).foreach(_.cancel(true)) @@ -110,7 +115,7 @@ abstract class AbstractOperation(session: Session) extends Operation with Loggin info(s"Processing ${session.user}'s query[$statementId]: " + s"${state.name} -> ${newState.name}, statement:\n$redactedStatement") startTime = System.currentTimeMillis() - case ERROR | FINISHED | CANCELED | TIMEOUT => + case ERROR | FINISHED | CANCELED | TIMEOUT | CLOSED => completedTime = System.currentTimeMillis() val timeCost = s", time taken: ${(completedTime - startTime) / 1000.0} seconds" info(s"Processing ${session.user}'s query[$statementId]: " + @@ -177,7 +182,12 @@ abstract class AbstractOperation(session: Session) extends Operation with Loggin override def getResultSetMetadata: TGetResultSetMetadataResp - override def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet + def getNextRowSetInternal(order: FetchOrientation, rowSetSize: Int): TFetchResultsResp + + override def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TFetchResultsResp = + withLockRequired { + getNextRowSetInternal(order, rowSetSize) + } /** * convert SQL 'like' pattern to a Java regular expression. diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/FetchIterator.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/FetchIterator.scala index fdada11747b..ada15588759 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/FetchIterator.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/FetchIterator.scala @@ -20,7 +20,7 @@ package org.apache.kyuubi.operation /** * Borrowed from Apache Spark, see SPARK-33655 */ -sealed trait FetchIterator[A] extends Iterator[A] { +trait FetchIterator[A] extends Iterator[A] { /** * Begin a fetch block, forward from the current position. diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/Operation.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/Operation.scala index 6f496c9b806..c20a16f61d0 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/Operation.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/Operation.scala @@ -19,7 +19,7 @@ package org.apache.kyuubi.operation import java.util.concurrent.Future -import org.apache.hive.service.rpc.thrift.{TGetResultSetMetadataResp, TRowSet} +import org.apache.hive.service.rpc.thrift.{TFetchResultsResp, TGetResultSetMetadataResp} import org.apache.kyuubi.operation.FetchOrientation.FetchOrientation import org.apache.kyuubi.operation.log.OperationLog @@ -32,7 +32,7 @@ trait Operation { def close(): Unit def getResultSetMetadata: TGetResultSetMetadataResp - def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet + def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TFetchResultsResp def getSession: Session def getHandle: OperationHandle diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/OperationManager.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/OperationManager.scala index df45e6dee01..38dabcc1a89 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/OperationManager.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/OperationManager.scala @@ -137,18 +137,22 @@ abstract class OperationManager(name: String) extends AbstractService(name) { final def getOperationNextRowSet( opHandle: OperationHandle, order: FetchOrientation, - maxRows: Int): TRowSet = { + maxRows: Int): TFetchResultsResp = { getOperation(opHandle).getNextRowSet(order, maxRows) } def getOperationLogRowSet( opHandle: OperationHandle, order: FetchOrientation, - maxRows: Int): TRowSet = { + maxRows: Int): TFetchResultsResp = { val operationLog = getOperation(opHandle).getOperationLog - operationLog.map(_.read(maxRows)).getOrElse { + val rowSet = operationLog.map(_.read(order, maxRows)).getOrElse { throw KyuubiSQLException(s"$opHandle failed to generate operation log") } + val resp = new TFetchResultsResp(new TStatus(TStatusCode.SUCCESS_STATUS)) + resp.setResults(rowSet) + resp.setHasMoreRows(false) + resp } final def removeExpiredOperations(handles: Seq[OperationHandle]): Seq[Operation] = synchronized { diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/PlanOnlyMode.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/PlanOnlyMode.scala index 3e170f05fc0..0407dab6266 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/PlanOnlyMode.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/PlanOnlyMode.scala @@ -41,6 +41,8 @@ case object PhysicalMode extends PlanOnlyMode { val name = "physical" } case object ExecutionMode extends PlanOnlyMode { val name = "execution" } +case object LineageMode extends PlanOnlyMode { val name = "lineage" } + case object NoneMode extends PlanOnlyMode { val name = "none" } case object UnknownMode extends PlanOnlyMode { @@ -64,6 +66,7 @@ object PlanOnlyMode { case OptimizeWithStatsMode.name => OptimizeWithStatsMode case PhysicalMode.name => PhysicalMode case ExecutionMode.name => ExecutionMode + case LineageMode.name => LineageMode case NoneMode.name => NoneMode case other => UnknownMode.mode(other) } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/Log4j12DivertAppender.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/Log4j12DivertAppender.scala index df2ef93d83b..6ea853485f6 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/Log4j12DivertAppender.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/Log4j12DivertAppender.scala @@ -30,7 +30,7 @@ class Log4j12DivertAppender extends WriterAppender { final private val lo = Logger.getRootLogger .getAllAppenders.asScala - .find(_.isInstanceOf[ConsoleAppender]) + .find(ap => ap.isInstanceOf[ConsoleAppender] || ap.isInstanceOf[RollingFileAppender]) .map(_.asInstanceOf[Appender].getLayout) .getOrElse(new PatternLayout("%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n")) diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/Log4j2DivertAppender.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/Log4j2DivertAppender.scala index dc4b24a8ca6..0daaeae48a8 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/Log4j2DivertAppender.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/Log4j2DivertAppender.scala @@ -24,11 +24,11 @@ import scala.collection.JavaConverters._ import org.apache.logging.log4j.LogManager import org.apache.logging.log4j.core.{Filter, LogEvent, StringLayout} -import org.apache.logging.log4j.core.appender.{AbstractWriterAppender, ConsoleAppender, WriterManager} +import org.apache.logging.log4j.core.appender.{AbstractWriterAppender, ConsoleAppender, RollingFileAppender, WriterManager} import org.apache.logging.log4j.core.filter.AbstractFilter import org.apache.logging.log4j.core.layout.PatternLayout -import org.apache.kyuubi.reflection.DynFields +import org.apache.kyuubi.util.reflect.ReflectUtils._ class Log4j2DivertAppender( name: String, @@ -63,11 +63,8 @@ class Log4j2DivertAppender( } }) - private val writeLock = DynFields.builder() - .hiddenImpl(classOf[AbstractWriterAppender[_]], "readWriteLock") - .build[ReadWriteLock](this) - .get() - .writeLock + private val writeLock = + getField[ReadWriteLock]((classOf[AbstractWriterAppender[_]], this), "readWriteLock").writeLock /** * Overrides AbstractWriterAppender.append(), which does the real logging. No need @@ -91,7 +88,9 @@ object Log4j2DivertAppender { def initLayout(): StringLayout = { LogManager.getRootLogger.asInstanceOf[org.apache.logging.log4j.core.Logger] .getAppenders.values().asScala - .find(ap => ap.isInstanceOf[ConsoleAppender] && ap.getLayout.isInstanceOf[StringLayout]) + .find(ap => + (ap.isInstanceOf[ConsoleAppender] || ap.isInstanceOf[RollingFileAppender]) && + ap.getLayout.isInstanceOf[StringLayout]) .map(_.getLayout.asInstanceOf[StringLayout]) .getOrElse(PatternLayout.newBuilder().withPattern( "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n").build()) diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/LogDivertAppender.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/LogDivertAppender.scala index 7d29893039e..58bca992c75 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/LogDivertAppender.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/LogDivertAppender.scala @@ -17,7 +17,7 @@ package org.apache.kyuubi.operation.log -import org.slf4j.impl.StaticLoggerBinder +import org.slf4j.LoggerFactory import org.apache.kyuubi.Logging @@ -30,9 +30,8 @@ object LogDivertAppender extends Logging { Log4j12DivertAppender.initialize() } else { warn(s"Unsupported SLF4J binding" + - s" ${StaticLoggerBinder.getSingleton.getLoggerFactoryClassStr}") + s" ${LoggerFactory.getILoggerFactory.getClass.getName}") } } - } } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/OperationLog.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/OperationLog.scala index e6312d0fb84..7ee803cb39e 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/OperationLog.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/OperationLog.scala @@ -29,6 +29,7 @@ import scala.collection.mutable.ListBuffer import org.apache.hive.service.rpc.thrift.{TColumn, TRow, TRowSet, TStringColumn} import org.apache.kyuubi.{KyuubiSQLException, Logging} +import org.apache.kyuubi.operation.FetchOrientation.{FETCH_FIRST, FETCH_NEXT, FetchOrientation} import org.apache.kyuubi.operation.OperationHandle import org.apache.kyuubi.session.Session import org.apache.kyuubi.util.ThriftUtils @@ -86,7 +87,7 @@ object OperationLog extends Logging { class OperationLog(path: Path) { private lazy val writer = Files.newBufferedWriter(path, StandardCharsets.UTF_8) - private lazy val reader = Files.newBufferedReader(path, StandardCharsets.UTF_8) + private var reader: BufferedReader = _ @volatile private var initialized: Boolean = false @@ -95,6 +96,15 @@ class OperationLog(path: Path) { private var lastSeekReadPos = 0 private var seekableReader: SeekableBufferedReader = _ + def getReader(): BufferedReader = { + if (reader == null) { + try { + reader = Files.newBufferedReader(path, StandardCharsets.UTF_8) + } catch handleFileNotFound + } + reader + } + def addExtraLog(path: Path): Unit = synchronized { try { extraReaders += Files.newBufferedReader(path, StandardCharsets.UTF_8) @@ -130,19 +140,23 @@ class OperationLog(path: Path) { val logs = new JArrayList[String] var i = 0 try { - var line: String = reader.readLine() - while ((i < lastRows || maxRows <= 0) && line != null) { - logs.add(line) + var line: String = null + do { line = reader.readLine() - i += 1 - } - (logs, i) - } catch { - case e: IOException => - val absPath = path.toAbsolutePath - val opHandle = absPath.getFileName - throw KyuubiSQLException(s"Operation[$opHandle] log file $absPath is not found", e) - } + if (line != null) { + logs.add(line) + i += 1 + } + } while ((i < lastRows || maxRows <= 0) && line != null) + } catch handleFileNotFound + (logs, i) + } + + private def handleFileNotFound: PartialFunction[Throwable, Unit] = { + case e: IOException => + val absPath = path.toAbsolutePath + val opHandle = absPath.getFileName + throw KyuubiSQLException(s"Operation[$opHandle] log file $absPath is not found", e) } private def toRowSet(logs: JList[String]): TRowSet = { @@ -152,14 +166,25 @@ class OperationLog(path: Path) { tRow } + def read(maxRows: Int): TRowSet = synchronized { + read(FETCH_NEXT, maxRows) + } + /** * Read to log file line by line * * @param maxRows maximum result number can reach + * @param order the fetch orientation of the result, can be FETCH_NEXT, FETCH_FIRST */ - def read(maxRows: Int): TRowSet = synchronized { + def read(order: FetchOrientation = FETCH_NEXT, maxRows: Int): TRowSet = synchronized { if (!initialized) return ThriftUtils.newEmptyRowSet - val (logs, lines) = readLogs(reader, maxRows, maxRows) + if (order != FETCH_NEXT && order != FETCH_FIRST) { + throw KyuubiSQLException(s"$order in operation log is not supported") + } + if (order == FETCH_FIRST) { + resetReader() + } + val (logs, lines) = readLogs(getReader(), maxRows, maxRows) var lastRows = maxRows - lines for (extraReader <- extraReaders if lastRows > 0 || maxRows <= 0) { val (extraLogs, extraRows) = readLogs(extraReader, lastRows, maxRows) @@ -170,6 +195,19 @@ class OperationLog(path: Path) { toRowSet(logs) } + private def resetReader(): Unit = { + trySafely { + if (reader != null) { + reader.close() + } + } + reader = null + closeExtraReaders() + extraReaders.clear() + extraPaths.foreach(path => + extraReaders += Files.newBufferedReader(path, StandardCharsets.UTF_8)) + } + def read(from: Int, size: Int): TRowSet = synchronized { if (!initialized) return ThriftUtils.newEmptyRowSet var pos = from @@ -195,10 +233,14 @@ class OperationLog(path: Path) { } def close(): Unit = synchronized { + if (!initialized) return + closeExtraReaders() trySafely { - reader.close() + if (reader != null) { + reader.close() + } } trySafely { writer.close() @@ -212,7 +254,7 @@ class OperationLog(path: Path) { } trySafely { - Files.delete(path) + Files.deleteIfExists(path) } } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/AbstractBackendService.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/AbstractBackendService.scala index 171e0490137..443b353546e 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/AbstractBackendService.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/AbstractBackendService.scala @@ -201,7 +201,7 @@ abstract class AbstractBackendService(name: String) operationHandle: OperationHandle, orientation: FetchOrientation, maxRows: Int, - fetchLog: Boolean): TRowSet = { + fetchLog: Boolean): TFetchResultsResp = { maxRowsLimit.foreach(limit => if (maxRows > limit) { throw new IllegalArgumentException(s"Max rows for fetching results " + diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/BackendService.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/BackendService.scala index 968a94197d2..85df9024cc4 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/BackendService.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/BackendService.scala @@ -101,7 +101,7 @@ trait BackendService { operationHandle: OperationHandle, orientation: FetchOrientation, maxRows: Int, - fetchLog: Boolean): TRowSet + fetchLog: Boolean): TFetchResultsResp def sessionManager: SessionManager } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/TBinaryFrontendService.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/TBinaryFrontendService.scala index 2e8a8b765e2..2f441937476 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/TBinaryFrontendService.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/TBinaryFrontendService.scala @@ -134,7 +134,7 @@ abstract class TBinaryFrontendService(name: String) keyStorePassword: String, keyStoreType: Option[String], keyStoreAlgorithm: Option[String], - disallowedSslProtocols: Seq[String], + disallowedSslProtocols: Set[String], includeCipherSuites: Seq[String]): TServerSocket = { val params = if (includeCipherSuites.nonEmpty) { diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/TFrontendService.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/TFrontendService.scala index 16d5c24f973..7cc23779fee 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/TFrontendService.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/TFrontendService.scala @@ -31,7 +31,7 @@ import org.apache.thrift.transport.TTransport import org.apache.kyuubi.{KyuubiSQLException, Logging, Utils} import org.apache.kyuubi.Utils.stringifyException -import org.apache.kyuubi.config.KyuubiConf.FRONTEND_CONNECTION_URL_USE_HOSTNAME +import org.apache.kyuubi.config.KyuubiConf.{FRONTEND_ADVERTISED_HOST, FRONTEND_CONNECTION_URL_USE_HOSTNAME, SESSION_CLOSE_ON_DISCONNECT} import org.apache.kyuubi.config.KyuubiReservedKeys._ import org.apache.kyuubi.operation.{FetchOrientation, OperationHandle} import org.apache.kyuubi.service.authentication.KyuubiAuthenticationFactory @@ -112,12 +112,12 @@ abstract class TFrontendService(name: String) override def connectionUrl: String = { checkInitialized() - val host = serverHost match { - case Some(h) => h // respect user's setting ahead - case None if conf.get(FRONTEND_CONNECTION_URL_USE_HOSTNAME) => + val host = (conf.get(FRONTEND_ADVERTISED_HOST), serverHost) match { + case (Some(advertisedHost), _) => advertisedHost + case (None, Some(h)) => h + case (None, None) if conf.get(FRONTEND_CONNECTION_URL_USE_HOSTNAME) => serverAddr.getCanonicalHostName - case None => - serverAddr.getHostAddress + case (None, None) => serverAddr.getHostAddress } host + ":" + actualPort @@ -520,23 +520,20 @@ abstract class TFrontendService(name: String) override def FetchResults(req: TFetchResultsReq): TFetchResultsResp = { debug(req.toString) - val resp = new TFetchResultsResp try { val operationHandle = OperationHandle(req.getOperationHandle) val orientation = FetchOrientation.getFetchOrientation(req.getOrientation) // 1 means fetching log val fetchLog = req.getFetchType == 1 val maxRows = req.getMaxRows.toInt - val rowSet = be.fetchResults(operationHandle, orientation, maxRows, fetchLog) - resp.setResults(rowSet) - resp.setHasMoreRows(false) - resp.setStatus(OK_STATUS) + be.fetchResults(operationHandle, orientation, maxRows, fetchLog) } catch { case e: Exception => error("Error fetching results: ", e) + val resp = new TFetchResultsResp resp.setStatus(KyuubiSQLException.toTStatus(e)) + resp } - resp } protected def notSupportTokenErrorStatus = { @@ -608,7 +605,14 @@ abstract class TFrontendService(name: String) if (handle != null) { info(s"Session [$handle] disconnected without closing properly, close it now") try { - be.closeSession(handle) + val needToClose = be.sessionManager.getSession(handle).conf + .getOrElse(SESSION_CLOSE_ON_DISCONNECT.key, "true").toBoolean + if (needToClose) { + be.closeSession(handle) + } else { + warn(s"Session not actually closed because configuration " + + s"${SESSION_CLOSE_ON_DISCONNECT.key} is set to false") + } } catch { case e: KyuubiSQLException => error("Failed closing session", e) diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/EngineSecuritySecretProvider.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/EngineSecuritySecretProvider.scala index 2bcfe9a676b..3216a43be7f 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/EngineSecuritySecretProvider.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/EngineSecuritySecretProvider.scala @@ -19,6 +19,7 @@ package org.apache.kyuubi.service.authentication import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ +import org.apache.kyuubi.util.reflect.DynConstructors trait EngineSecuritySecretProvider { @@ -50,9 +51,10 @@ class SimpleEngineSecuritySecretProviderImpl extends EngineSecuritySecretProvide object EngineSecuritySecretProvider { def create(conf: KyuubiConf): EngineSecuritySecretProvider = { - val providerClass = Class.forName(conf.get(ENGINE_SECURITY_SECRET_PROVIDER)) - val provider = providerClass.getConstructor().newInstance() - .asInstanceOf[EngineSecuritySecretProvider] + val provider = DynConstructors.builder() + .impl(conf.get(ENGINE_SECURITY_SECRET_PROVIDER)) + .buildChecked[EngineSecuritySecretProvider]() + .newInstance(conf) provider.initialize(conf) provider } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/InternalSecurityAccessor.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/InternalSecurityAccessor.scala index 62680e6a610..afc1dde1fd0 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/InternalSecurityAccessor.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/InternalSecurityAccessor.scala @@ -20,6 +20,8 @@ package org.apache.kyuubi.service.authentication import javax.crypto.Cipher import javax.crypto.spec.{IvParameterSpec, SecretKeySpec} +import org.apache.hadoop.classification.VisibleForTesting + import org.apache.kyuubi.{KyuubiSQLException, Logging} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ @@ -121,4 +123,9 @@ object InternalSecurityAccessor extends Logging { def get(): InternalSecurityAccessor = { _engineSecurityAccessor } + + @VisibleForTesting + def reset(): Unit = { + _engineSecurityAccessor = null + } } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/KyuubiAuthenticationFactory.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/KyuubiAuthenticationFactory.scala index 5f429fa4ed7..1b62f6030e7 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/KyuubiAuthenticationFactory.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/KyuubiAuthenticationFactory.scala @@ -39,7 +39,7 @@ class KyuubiAuthenticationFactory(conf: KyuubiConf, isServer: Boolean = true) ex private val authTypes = conf.get(AUTHENTICATION_METHOD).map(AuthTypes.withName) private val none = authTypes.contains(NONE) - private val noSasl = authTypes == Seq(NOSASL) + private val noSasl = authTypes == Set(NOSASL) private val kerberosEnabled = authTypes.contains(KERBEROS) private val plainAuthTypeOpt = authTypes.filterNot(_.equals(KERBEROS)) .filterNot(_.equals(NOSASL)).headOption diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/LdapAuthenticationProviderImpl.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/LdapAuthenticationProviderImpl.scala index 06d08f3e472..d885da55b23 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/LdapAuthenticationProviderImpl.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/LdapAuthenticationProviderImpl.scala @@ -27,6 +27,7 @@ import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.service.ServiceUtils import org.apache.kyuubi.service.authentication.LdapAuthenticationProviderImpl.FILTER_FACTORIES import org.apache.kyuubi.service.authentication.ldap._ +import org.apache.kyuubi.service.authentication.ldap.LdapUtils.getUserName class LdapAuthenticationProviderImpl( conf: KyuubiConf, @@ -70,7 +71,8 @@ class LdapAuthenticationProviderImpl( if (usedBind) { // If we used the bind user, then we need to authenticate again, // this time using the full user name we got during the bind process. - createDirSearch(search.findUserDn(user), password) + val username = getUserName(user) + createDirSearch(search.findUserDn(username), password) } } catch { case e: NamingException => @@ -108,8 +110,7 @@ class LdapAuthenticationProviderImpl( @throws[AuthenticationException] private def applyFilter(client: DirSearch, user: String): Unit = filterOpt.foreach { filter => - val username = if (LdapUtils.hasDomain(user)) LdapUtils.extractUserName(user) else user - filter.apply(client, username) + filter.apply(client, getUserName(user)) } } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/PlainSASLServer.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/PlainSASLServer.scala index 8e84c9f81ec..737a6d8cd2b 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/PlainSASLServer.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/PlainSASLServer.scala @@ -23,7 +23,7 @@ import javax.security.auth.callback.{Callback, CallbackHandler, NameCallback, Pa import javax.security.sasl.{AuthorizeCallback, SaslException, SaslServer, SaslServerFactory} import org.apache.kyuubi.KYUUBI_VERSION -import org.apache.kyuubi.engine.SemanticVersion +import org.apache.kyuubi.util.SemanticVersion class PlainSASLServer( handler: CallbackHandler, @@ -126,10 +126,7 @@ object PlainSASLServer { } } - final private val version: Double = { - val runtimeVersion = SemanticVersion(KYUUBI_VERSION) - runtimeVersion.majorVersion + runtimeVersion.minorVersion.toDouble / 10 - } + final private val version = SemanticVersion(KYUUBI_VERSION).toDouble class SaslPlainProvider extends Provider("KyuubiSaslPlain", version, "Kyuubi Plain SASL provider") { diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/ldap/GroupFilterFactory.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/ldap/GroupFilterFactory.scala index fd1c907eccd..f3048ea6fed 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/ldap/GroupFilterFactory.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/ldap/GroupFilterFactory.scala @@ -38,7 +38,7 @@ object GroupFilterFactory extends FilterFactory { } } -class GroupMembershipKeyFilter(groupFilter: Seq[String]) extends Filter with Logging { +class GroupMembershipKeyFilter(groupFilter: Set[String]) extends Filter with Logging { @throws[AuthenticationException] override def apply(ldap: DirSearch, user: String): Unit = { @@ -70,7 +70,7 @@ class GroupMembershipKeyFilter(groupFilter: Seq[String]) extends Filter with Log } } -class UserMembershipKeyFilter(groupFilter: Seq[String]) extends Filter with Logging { +class UserMembershipKeyFilter(groupFilter: Set[String]) extends Filter with Logging { @throws[AuthenticationException] override def apply(ldap: DirSearch, user: String): Unit = { info(s"Authenticating user '$user' using $classOf[UserMembershipKeyFilter].getSimpleName") diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/ldap/LdapUtils.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/ldap/LdapUtils.scala index a48f9f48f2b..e304e96f733 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/ldap/LdapUtils.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/ldap/LdapUtils.scala @@ -105,12 +105,25 @@ object LdapUtils extends Logging { * * * @param userName username - * @return true if `userName`` contains `@` part + * @return true if `userName` contains `@` part */ def hasDomain(userName: String): Boolean = { ServiceUtils.indexOfDomainMatch(userName) > 0 } + /** + * Get the username part in the provided user. + *
    + * Example: + *
    + * For user "user1@mycorp.com" this method will return "user1" + * + * @param user user + * @return the username part in the provided user + */ + def getUserName(user: String): String = + if (LdapUtils.hasDomain(user)) LdapUtils.extractUserName(user) else user + /** * Detects DN names. *
    diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/ldap/UserFilterFactory.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/ldap/UserFilterFactory.scala index 7c2f22ed869..3af3c66f564 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/ldap/UserFilterFactory.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/service/authentication/ldap/UserFilterFactory.scala @@ -29,13 +29,13 @@ object UserFilterFactory extends FilterFactory with Logging { } } -class UserFilter(_userFilter: Seq[String]) extends Filter with Logging { +class UserFilter(_userFilter: Set[String]) extends Filter with Logging { - lazy val userFilter: Seq[String] = _userFilter.map(_.toLowerCase) + lazy val userFilter: Set[String] = _userFilter.map(_.toLowerCase) @throws[AuthenticationException] override def apply(ldap: DirSearch, user: String): Unit = { - info("Authenticating user '$user' using user filter") + info(s"Authenticating user '$user' using user filter") val userName = LdapUtils.extractUserName(user).toLowerCase if (!userFilter.contains(userName)) { info("Authentication failed based on user membership") diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/session/AbstractSession.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/session/AbstractSession.scala index 1a8c51ccd0b..a9e33f5a060 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/session/AbstractSession.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/session/AbstractSession.scala @@ -233,7 +233,7 @@ abstract class AbstractSession( operationHandle: OperationHandle, orientation: FetchOrientation, maxRows: Int, - fetchLog: Boolean): TRowSet = { + fetchLog: Boolean): TFetchResultsResp = { if (fetchLog) { sessionManager.operationManager.getOperationLogRowSet(operationHandle, orientation, maxRows) } else { diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/session/Session.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/session/Session.scala index bc9f9a8f695..2cdac9f3a78 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/session/Session.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/session/Session.scala @@ -17,7 +17,7 @@ package org.apache.kyuubi.session -import org.apache.hive.service.rpc.thrift.{TGetInfoType, TGetInfoValue, TGetResultSetMetadataResp, TProtocolVersion, TRowSet} +import org.apache.hive.service.rpc.thrift.{TFetchResultsResp, TGetInfoType, TGetInfoValue, TGetResultSetMetadataResp, TProtocolVersion} import org.apache.kyuubi.operation.FetchOrientation.FetchOrientation import org.apache.kyuubi.operation.OperationHandle @@ -91,7 +91,7 @@ trait Session { operationHandle: OperationHandle, orientation: FetchOrientation, maxRows: Int, - fetchLog: Boolean): TRowSet + fetchLog: Boolean): TFetchResultsResp def closeExpiredOperations(): Unit } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/session/SessionManager.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/session/SessionManager.scala index f8e77dd63b4..6cf1f082b96 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/session/SessionManager.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/session/SessionManager.scala @@ -209,11 +209,11 @@ abstract class SessionManager(name: String) extends CompositeService(name) { key } - if (_confRestrictMatchList.exists(normalizedKey.startsWith(_)) || + if (_confRestrictMatchList.exists(normalizedKey.startsWith) || _confRestrictList.contains(normalizedKey)) { throw KyuubiSQLException(s"$normalizedKey is a restrict key according to the server-side" + s" configuration, please remove it and retry if you want to proceed") - } else if (_confIgnoreMatchList.exists(normalizedKey.startsWith(_)) || + } else if (_confIgnoreMatchList.exists(normalizedKey.startsWith) || _confIgnoreList.contains(normalizedKey)) { warn(s"$normalizedKey is a ignored key according to the server-side configuration") None @@ -228,7 +228,7 @@ abstract class SessionManager(name: String) extends CompositeService(name) { // validate whether if a batch key should be ignored def validateBatchKey(key: String, value: String): Option[(String, String)] = { - if (_batchConfIgnoreMatchList.exists(key.startsWith(_)) || _batchConfIgnoreList.contains(key)) { + if (_batchConfIgnoreMatchList.exists(key.startsWith) || _batchConfIgnoreList.contains(key)) { warn(s"$key is a ignored batch key according to the server-side configuration") None } else { @@ -265,10 +265,10 @@ abstract class SessionManager(name: String) extends CompositeService(name) { conf.get(ENGINE_EXEC_KEEPALIVE_TIME) } - _confRestrictList = conf.get(SESSION_CONF_RESTRICT_LIST).toSet - _confIgnoreList = conf.get(SESSION_CONF_IGNORE_LIST).toSet + + _confRestrictList = conf.get(SESSION_CONF_RESTRICT_LIST) + _confIgnoreList = conf.get(SESSION_CONF_IGNORE_LIST) + s"${SESSION_USER_SIGN_ENABLED.key}" - _batchConfIgnoreList = conf.get(BATCH_CONF_IGNORE_LIST).toSet + _batchConfIgnoreList = conf.get(BATCH_CONF_IGNORE_LIST) execPool = ThreadUtils.newDaemonQueuedThreadPool( poolSize, @@ -288,9 +288,9 @@ abstract class SessionManager(name: String) extends CompositeService(name) { shutdown = true val shutdownTimeout: Long = if (isServer) { - conf.get(ENGINE_EXEC_POOL_SHUTDOWN_TIMEOUT) - } else { conf.get(SERVER_EXEC_POOL_SHUTDOWN_TIMEOUT) + } else { + conf.get(ENGINE_EXEC_POOL_SHUTDOWN_TIMEOUT) } ThreadUtils.shutdown(timeoutChecker, Duration(shutdownTimeout, TimeUnit.MILLISECONDS)) @@ -307,6 +307,8 @@ abstract class SessionManager(name: String) extends CompositeService(name) { for (session <- handleToSession.values().asScala) { if (session.lastAccessTime + session.sessionIdleTimeoutThreshold <= current && session.getNoOperationTime > session.sessionIdleTimeoutThreshold) { + info(s"Closing session ${session.handle.identifier} that has been idle for more" + + s" than ${session.sessionIdleTimeoutThreshold} ms") try { closeSession(session.handle) } catch { diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/session/package.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/session/package.scala index 40abded985c..63b17dd4d2e 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/session/package.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/session/package.scala @@ -25,6 +25,8 @@ package object session { val HIVECONF_PREFIX = "hiveconf:" val HIVEVAR_PREFIX = "hivevar:" val METACONF_PREFIX = "metaconf:" + val USE_CATALOG = "use:catalog" + val USE_DATABASE = "use:database" val SPARK_PREFIX = "spark." } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/ClassUtils.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/ClassUtils.scala index bcbfdabfba4..d8eda34261a 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/ClassUtils.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/ClassUtils.scala @@ -17,10 +17,9 @@ package org.apache.kyuubi.util -import scala.util.Try - import org.apache.kyuubi.KyuubiException import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.util.reflect._ object ClassUtils { @@ -34,28 +33,16 @@ object ClassUtils { */ def createInstance[T](className: String, expected: Class[T], conf: KyuubiConf): T = { val classLoader = Thread.currentThread.getContextClassLoader - val cls = Class.forName(className, true, classLoader) - cls match { - case clazz if expected.isAssignableFrom(cls) => - val confConstructor = clazz.getConstructors.exists(p => { - val params = p.getParameterTypes - params.length == 1 && classOf[KyuubiConf].isAssignableFrom(params(0)) - }) - if (confConstructor) { - clazz.getConstructor(classOf[KyuubiConf]).newInstance(conf) - .asInstanceOf[T] - } else { - clazz.newInstance().asInstanceOf[T] - } - case _ => throw new KyuubiException( - s"$className must extend of ${expected.getName}") + try { + DynConstructors.builder(expected).loader(classLoader) + .impl(className, classOf[KyuubiConf]) + .impl(className) + .buildChecked[T]() + .newInstance(conf) + } catch { + case e: Exception => + throw new KyuubiException(s"$className must extend of ${expected.getName}", e) } } - /** Determines whether the provided class is loadable. */ - def classIsLoadable( - clazz: String, - cl: ClassLoader = Thread.currentThread().getContextClassLoader): Boolean = { - Try { Class.forName(clazz, false, cl) }.isSuccess - } } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/JdbcUtils.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/JdbcUtils.scala index b89580f4c8d..996589cb742 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/JdbcUtils.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/JdbcUtils.scala @@ -108,7 +108,8 @@ object JdbcUtils extends Logging { def isDuplicatedKeyDBErr(cause: Throwable): Boolean = { val duplicatedKeyKeywords = Seq( "duplicate key value in a unique or primary key constraint or unique index", // Derby - "Duplicate entry" // MySQL + "Duplicate entry", // MySQL + "A UNIQUE constraint failed" // SQLite ) duplicatedKeyKeywords.exists(cause.getMessage.contains) } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/KyuubiHadoopUtils.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/KyuubiHadoopUtils.scala index a63646d9bd2..4959c845d49 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/KyuubiHadoopUtils.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/KyuubiHadoopUtils.scala @@ -26,24 +26,17 @@ import scala.util.{Failure, Success, Try} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier import org.apache.hadoop.io.Text -import org.apache.hadoop.security.{Credentials, SecurityUtil, UserGroupInformation} +import org.apache.hadoop.security.{Credentials, SecurityUtil} import org.apache.hadoop.security.token.{Token, TokenIdentifier} import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier import org.apache.hadoop.yarn.conf.YarnConfiguration import org.apache.kyuubi.Logging import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.util.reflect.ReflectUtils._ object KyuubiHadoopUtils extends Logging { - private val subjectField = - classOf[UserGroupInformation].getDeclaredField("subject") - subjectField.setAccessible(true) - - private val tokenMapField = - classOf[Credentials].getDeclaredField("tokenMap") - tokenMapField.setAccessible(true) - def newHadoopConf( conf: KyuubiConf, loadDefaults: Boolean = true): Configuration = { @@ -81,12 +74,8 @@ object KyuubiHadoopUtils extends Logging { * Get [[Credentials#tokenMap]] by reflection as [[Credentials#getTokenMap]] is not present before * Hadoop 3.2.1. */ - def getTokenMap(credentials: Credentials): Map[Text, Token[_ <: TokenIdentifier]] = { - tokenMapField.get(credentials) - .asInstanceOf[JMap[Text, Token[_ <: TokenIdentifier]]] - .asScala - .toMap - } + def getTokenMap(credentials: Credentials): Map[Text, Token[_ <: TokenIdentifier]] = + getField[JMap[Text, Token[_ <: TokenIdentifier]]](credentials, "tokenMap").asScala.toMap def getTokenIssueDate(token: Token[_ <: TokenIdentifier]): Option[Long] = { token.decodeIdentifier() match { diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/KyuubiUncaughtExceptionHandler.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/KyuubiUncaughtExceptionHandler.scala new file mode 100644 index 00000000000..69cfe207f4c --- /dev/null +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/KyuubiUncaughtExceptionHandler.scala @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.util + +import java.lang.Thread.UncaughtExceptionHandler + +import org.apache.kyuubi.Logging + +class KyuubiUncaughtExceptionHandler extends UncaughtExceptionHandler with Logging { + override def uncaughtException(t: Thread, e: Throwable): Unit = { + error(s"Uncaught exception in thread ${t.getName}", e) + } +} diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/NamedThreadFactory.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/NamedThreadFactory.scala index 89c3c96ea75..3ce421e2350 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/NamedThreadFactory.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/NamedThreadFactory.scala @@ -20,10 +20,17 @@ package org.apache.kyuubi.util import java.util.concurrent.ThreadFactory class NamedThreadFactory(name: String, daemon: Boolean) extends ThreadFactory { + import NamedThreadFactory._ + override def newThread(r: Runnable): Thread = { val t = new Thread(r) t.setName(name + ": Thread-" + t.getId) t.setDaemon(daemon) + t.setUncaughtExceptionHandler(kyuubiUncaughtExceptionHandler) t } } + +object NamedThreadFactory { + private[util] val kyuubiUncaughtExceptionHandler = new KyuubiUncaughtExceptionHandler +} diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/RowSetUtils.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/RowSetUtils.scala index fca79c0f2a5..f320fd90293 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/RowSetUtils.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/RowSetUtils.scala @@ -18,7 +18,7 @@ package org.apache.kyuubi.util import java.nio.ByteBuffer -import java.time.{Instant, LocalDate, LocalDateTime, ZoneId} +import java.time.{Instant, LocalDate, LocalDateTime, LocalTime, ZoneId} import java.time.chrono.IsoChronology import java.time.format.DateTimeFormatterBuilder import java.time.temporal.ChronoField @@ -41,6 +41,12 @@ private[kyuubi] object RowSetUtils { private lazy val legacyDateFormatter = FastDateFormat.getInstance("yyyy-MM-dd", Locale.US) + private lazy val timeFormatter = createDateTimeFormatterBuilder() + .appendPattern("HH:mm:ss") + .appendFraction(ChronoField.NANO_OF_SECOND, 0, 9, true) + .toFormatter(Locale.US) + .withChronology(IsoChronology.INSTANCE) + private lazy val timestampFormatter = createDateTimeFormatterBuilder() .appendPattern("yyyy-MM-dd HH:mm:ss") .appendFraction(ChronoField.NANO_OF_SECOND, 0, 9, true) @@ -59,6 +65,10 @@ private[kyuubi] object RowSetUtils { dateFormatter.format(ld) } + def formatLocalTime(lt: LocalTime): String = { + timeFormatter.format(lt) + } + def formatLocalDateTime(ldt: LocalDateTime): String = { timestampFormatter.format(ldt) } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/SignUtils.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/SignUtils.scala index 6f7ff18df67..7fb4fde2e96 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/SignUtils.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/SignUtils.scala @@ -27,7 +27,7 @@ object SignUtils { private lazy val ecKeyPairGenerator = { val g = KeyPairGenerator.getInstance(KEYPAIR_ALGORITHM_EC) - g.initialize(new ECGenParameterSpec("secp256k1"), new SecureRandom()) + g.initialize(new ECGenParameterSpec("secp521r1"), new SecureRandom()) g } diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/ThreadUtils.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/ThreadUtils.scala index 8ce4bb2e589..76d3f416f84 100644 --- a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/ThreadUtils.scala +++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/ThreadUtils.scala @@ -95,4 +95,18 @@ object ThreadUtils extends Logging { } } } + + def runInNewThread( + threadName: String, + isDaemon: Boolean = true)(body: => Unit): Unit = { + + val thread = new Thread(threadName) { + override def run(): Unit = { + body + } + } + thread.setDaemon(isDaemon) + thread.setUncaughtExceptionHandler(NamedThreadFactory.kyuubiUncaughtExceptionHandler) + thread.start() + } } diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/HiveEngineTests.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/HiveEngineTests.scala index 9eb4a24405b..028f755f6c8 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/HiveEngineTests.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/HiveEngineTests.scala @@ -140,7 +140,7 @@ trait HiveEngineTests extends HiveJDBCTestHelper { try { val meta = statement.getConnection.getMetaData var resultSet = meta.getColumns(null, null, null, null) - var resultSetBuffer = ArrayBuffer[(String, String, String, String, String)]() + val resultSetBuffer = ArrayBuffer[(String, String, String, String, String)]() while (resultSet.next()) { resultSetBuffer += Tuple5( resultSet.getString(TABLE_CAT), @@ -434,8 +434,8 @@ trait HiveEngineTests extends HiveJDBCTestHelper { val res = statement.getConnection.getMetaData.getClientInfoProperties assert(res.next()) assert(res.getString(1) === "ApplicationName") - assert(res.getInt("MAX_LEN") === 1000); - assert(!res.next()); + assert(res.getInt("MAX_LEN") === 1000) + assert(!res.next()) val connection = statement.getConnection connection.setClientInfo("ApplicationName", "test kyuubi hive jdbc") diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/HudiSuiteMixin.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/HudiSuiteMixin.scala deleted file mode 100644 index 17cc5d27fe7..00000000000 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/HudiSuiteMixin.scala +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kyuubi - -import java.nio.file.Path - -trait HudiSuiteMixin extends DataLakeSuiteMixin { - - override protected def format: String = "hudi" - - override protected def catalog: String = "spark_catalog" - - override protected def warehouse: Path = Utils.createTempDir() - - override protected def extraJars: String = { - System.getProperty("java.class.path") - .split(":") - .filter(i => i.contains("hudi") || i.contains("spark-avro")) - .mkString(",") - } - - override protected def extraConfigs = Map( - "spark.sql.catalogImplementation" -> "in-memory", - "spark.sql.defaultCatalog" -> catalog, - "spark.sql.extensions" -> "org.apache.spark.sql.hudi.HoodieSparkSessionExtension", - "spark.serializer" -> "org.apache.spark.serializer.KryoSerializer", - "spark.jars" -> extraJars) -} diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/KyuubiFunSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/KyuubiFunSuite.scala index 96a612aabac..8d0a14c1698 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/KyuubiFunSuite.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/KyuubiFunSuite.scala @@ -30,6 +30,7 @@ import org.scalatest.funsuite.AnyFunSuite import org.slf4j.bridge.SLF4JBridgeHandler import org.apache.kyuubi.config.internal.Tests.IS_TESTING +import org.apache.kyuubi.service.authentication.InternalSecurityAccessor trait KyuubiFunSuite extends AnyFunSuite with BeforeAndAfterAll @@ -46,6 +47,7 @@ trait KyuubiFunSuite extends AnyFunSuite override def beforeAll(): Unit = { System.setProperty(IS_TESTING.key, "true") doThreadPreAudit() + InternalSecurityAccessor.reset() super.beforeAll() } @@ -102,6 +104,7 @@ trait KyuubiFunSuite extends AnyFunSuite logger.asInstanceOf[Logger].setLevel(restoreLevels(i)) logger.asInstanceOf[Logger].get().setLevel(restoreLevels(i)) } + LogManager.getContext(false).asInstanceOf[LoggerContext].updateLoggers() } } } diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/MarkdownUtils.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/MarkdownUtils.scala index 45568df25e6..4dbe6ea6711 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/MarkdownUtils.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/MarkdownUtils.scala @@ -17,81 +17,39 @@ package org.apache.kyuubi -import java.nio.charset.StandardCharsets -import java.nio.file.{Files, Path, StandardOpenOption} - -import scala.collection.JavaConverters._ -import scala.collection.mutable.ArrayBuffer -import scala.compat.Platform.EOL +import scala.collection.mutable.ListBuffer import com.vladsch.flexmark.formatter.Formatter import com.vladsch.flexmark.parser.{Parser, ParserEmulationProfile, PegdownExtensions} import com.vladsch.flexmark.profile.pegdown.PegdownOptionsAdapter import com.vladsch.flexmark.util.data.{MutableDataHolder, MutableDataSet} -import org.scalatest.Assertions.{assertResult, withClue} - -object MarkdownUtils { - - def verifyOutput( - markdown: Path, - newOutput: MarkdownBuilder, - agent: String, - module: String): Unit = { - val formatted = newOutput.formatMarkdown() - if (System.getenv("KYUUBI_UPDATE") == "1") { - Files.write( - markdown, - formatted.asJava, - StandardOpenOption.CREATE, - StandardOpenOption.TRUNCATE_EXISTING) - } else { - val linesInFile = Files.readAllLines(markdown, StandardCharsets.UTF_8) - linesInFile.asScala.zipWithIndex.zip(formatted).foreach { case ((str1, index), str2) => - withClue(s"$markdown out of date, as line ${index + 1} is not expected." + - " Please update doc with KYUUBI_UPDATE=1 build/mvn clean test" + - s" -pl $module -am -Pflink-provided,spark-provided,hive-provided" + - s" -DwildcardSuites=$agent") { - assertResult(str2)(str1) - } - } - } - } - - def line(str: String): String = { - str.stripMargin.replaceAll(EOL, "") - } - - def appendBlankLine(buffer: ArrayBuffer[String]): Unit = buffer += "" - - def appendFileContent(buffer: ArrayBuffer[String], path: Path): Unit = { - buffer += "```bash" - buffer ++= Files.readAllLines(path).asScala - buffer += "```" - } -} +import com.vladsch.flexmark.util.sequence.SequenceUtils.EOL class MarkdownBuilder { - private val buffer = new ArrayBuffer[String]() + private val buffer = new ListBuffer[String] /** * append a single line * with replacing EOL to empty string + * * @param str single line * @return */ - def line(str: String = ""): MarkdownBuilder = { - buffer += str.stripMargin.replaceAll(EOL, "") + def +=(str: String): MarkdownBuilder = { + buffer += str.stripMargin.linesIterator.mkString this } /** * append the multiline * with splitting EOL into single lines - * @param multiline multiline with default line margin "|" + * + * @param multiline multiline with line margin char + * @param marginChar margin char, default to "|" * @return */ - def lines(multiline: String): MarkdownBuilder = { - buffer ++= multiline.stripMargin.split(EOL) + def ++=(multiline: String, marginChar: Char = '|'): MarkdownBuilder = { + buffer ++= multiline.stripMargin(marginChar).linesIterator this } @@ -100,7 +58,7 @@ class MarkdownBuilder { * @return */ def licence(): MarkdownBuilder = { - lines(""" + this ++= """ | - |""") + |""" } /** @@ -126,36 +84,14 @@ class MarkdownBuilder { * @return */ def generationHint(className: String): MarkdownBuilder = { - lines(s""" + this ++= + s""" | | - |""") - } - - /** - * append file content - * @param path file path - * @return - */ - def file(path: Path): MarkdownBuilder = { - buffer ++= Files.readAllLines(path).asScala - this - } - - /** - * append file content with code block quote - * @param path path to file - * @param language language of codeblock - * @return - */ - def fileWithBlock(path: Path, language: String = "bash"): MarkdownBuilder = { - buffer += s"```$language" - file(path) - buffer += "```" - this + |""" } - def formatMarkdown(): Stream[String] = { + def toMarkdown: Stream[String] = { def createParserOptions(emulationProfile: ParserEmulationProfile): MutableDataHolder = { PegdownOptionsAdapter.flexmarkOptions(PegdownExtensions.ALL).toMutable .set(Parser.PARSER_EMULATION_PROFILE, emulationProfile) @@ -175,7 +111,7 @@ class MarkdownBuilder { val parser = Parser.builder(parserOptions).build val renderer = Formatter.builder(formatterOptions).build val document = parser.parse(buffer.mkString(EOL)) - val formattedLines = new ArrayBuffer[String](buffer.length) + val formattedLines = new ListBuffer[String] val formattedLinesAppendable = new Appendable { override def append(csq: CharSequence): Appendable = { if (csq.length() > 0) { diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/config/ConfigBuilderSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/config/ConfigBuilderSuite.scala index 4a9ade55107..78429d27c9f 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/config/ConfigBuilderSuite.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/config/ConfigBuilderSuite.scala @@ -18,6 +18,7 @@ package org.apache.kyuubi.config import org.apache.kyuubi.KyuubiFunSuite +import org.apache.kyuubi.util.AssertionUtils._ class ConfigBuilderSuite extends KyuubiFunSuite { @@ -72,6 +73,33 @@ class ConfigBuilderSuite extends KyuubiFunSuite { KyuubiConf.register(sequenceConf) val kyuubiConf = KyuubiConf().set(sequenceConf.key, "kyuubi,kent") assert(kyuubiConf.get(sequenceConf) === Seq("kyuubi", "kent")) + + val stringConfUpper = ConfigBuilder("kyuubi.string.conf.upper") + .stringConf + .transformToUpperCase + .createWithDefault("Kent, Yao") + assert(stringConfUpper.key === "kyuubi.string.conf.upper") + assert(stringConfUpper.defaultVal.get === "KENT, YAO") + + val stringConfUpperSeq = ConfigBuilder("kyuubi.string.conf.upper.seq") + .stringConf + .transformToUpperCase + .toSequence() + .createWithDefault(Seq("hehe")) + assert(stringConfUpperSeq.defaultVal.get === Seq("HEHE")) + + val stringConfSet = ConfigBuilder("kyuubi.string.conf.set") + .stringConf + .toSet() + .createWithDefault(Set("hehe", "haha")) + assert(stringConfSet.defaultVal.get === Set("hehe", "haha")) + + val stringConfLower = ConfigBuilder("kyuubi.string.conf.lower") + .stringConf + .transformToLowerCase + .createWithDefault("Kent, Yao") + assert(stringConfLower.key === "kyuubi.string.conf.lower") + assert(stringConfLower.defaultVal.get === "kent, yao") } test("time config") { @@ -98,4 +126,21 @@ class ConfigBuilderSuite extends KyuubiFunSuite { val e = intercept[IllegalArgumentException](kyuubiConf.get(intConf)) assert(e.getMessage equals "'-1' in kyuubi.invalid.config is invalid. must be positive integer") } + + test("invalid config for enum") { + object TempEnum extends Enumeration { + type TempEnum = Value + val ValA, ValB = Value + } + val stringConf = ConfigBuilder("kyuubi.invalid.config.enum") + .stringConf + .checkValues(TempEnum) + .createWithDefault("ValA") + assert(stringConf.key === "kyuubi.invalid.config.enum") + assert(stringConf.defaultVal.get === "ValA") + val kyuubiConf = KyuubiConf().set(stringConf.key, "ValC") + KyuubiConf.register(stringConf) + interceptEquals[IllegalArgumentException] { kyuubiConf.get(stringConf) }( + "The value of kyuubi.invalid.config.enum should be one of ValA, ValB, but was ValC") + } } diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/config/KyuubiConfSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/config/KyuubiConfSuite.scala index f05e15d8a76..39e68f0ecfa 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/config/KyuubiConfSuite.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/config/KyuubiConfSuite.scala @@ -200,4 +200,25 @@ class KyuubiConfSuite extends KyuubiFunSuite { assertResult(kSeq(1))("kyuubi.efg") assertResult(kSeq(2))("kyuubi.xyz") } + + test("KYUUBI #4843 - Support multiple kubernetes contexts and namespaces") { + val kyuubiConf = KyuubiConf(false) + kyuubiConf.set("kyuubi.kubernetes.28.master.address", "k8s://master") + kyuubiConf.set( + "kyuubi.kubernetes.28.ns1.authenticate.oauthTokenFile", + "/var/run/secrets/kubernetes.io/token.ns1") + kyuubiConf.set( + "kyuubi.kubernetes.28.ns2.authenticate.oauthTokenFile", + "/var/run/secrets/kubernetes.io/token.ns2") + + val kubernetesConf1 = kyuubiConf.getKubernetesConf(Some("28"), Some("ns1")) + assert(kubernetesConf1.get(KyuubiConf.KUBERNETES_MASTER) == Some("k8s://master")) + assert(kubernetesConf1.get(KyuubiConf.KUBERNETES_AUTHENTICATE_OAUTH_TOKEN_FILE) == + Some("/var/run/secrets/kubernetes.io/token.ns1")) + + val kubernetesConf2 = kyuubiConf.getKubernetesConf(Some("28"), Some("ns2")) + assert(kubernetesConf2.get(KyuubiConf.KUBERNETES_MASTER) == Some("k8s://master")) + assert(kubernetesConf2.get(KyuubiConf.KUBERNETES_AUTHENTICATE_OAUTH_TOKEN_FILE) == + Some("/var/run/secrets/kubernetes.io/token.ns2")) + } } diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/HudiMetadataTests.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/HudiMetadataTests.scala deleted file mode 100644 index e6870a4e385..00000000000 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/HudiMetadataTests.scala +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kyuubi.operation - -import org.apache.kyuubi.HudiSuiteMixin -import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._ - -trait HudiMetadataTests extends HiveJDBCTestHelper with HudiSuiteMixin { - - test("get catalogs") { - withJdbcStatement() { statement => - val metaData = statement.getConnection.getMetaData - val catalogs = metaData.getCatalogs - catalogs.next() - assert(catalogs.getString(TABLE_CAT) === "spark_catalog") - assert(!catalogs.next()) - } - } - - test("get schemas") { - val dbs = Seq("db1", "db2", "db33", "db44") - val dbDflts = Seq("default", "global_temp") - - val catalog = "spark_catalog" - withDatabases(dbs: _*) { statement => - dbs.foreach(db => statement.execute(s"CREATE DATABASE IF NOT EXISTS $db")) - val metaData = statement.getConnection.getMetaData - - Seq("", "*", "%", null, ".*", "_*", "_%", ".%") foreach { pattern => - checkGetSchemas(metaData.getSchemas(catalog, pattern), dbs ++ dbDflts, catalog) - } - - Seq("db%", "db.*") foreach { pattern => - checkGetSchemas(metaData.getSchemas(catalog, pattern), dbs, catalog) - } - - Seq("db_", "db.") foreach { pattern => - checkGetSchemas(metaData.getSchemas(catalog, pattern), dbs.take(2), catalog) - } - - checkGetSchemas(metaData.getSchemas(catalog, "db1"), Seq("db1"), catalog) - checkGetSchemas(metaData.getSchemas(catalog, "db_not_exist"), Seq.empty, catalog) - } - } - - test("get tables") { - val table = "table_1_test" - val schema = "default" - val tableType = "TABLE" - - withJdbcStatement(table) { statement => - statement.execute( - s""" - | create table $table ( - | id int, - | name string, - | price double, - | ts long - | ) using $format - | options ( - | primaryKey = 'id', - | preCombineField = 'ts', - | hoodie.bootstrap.index.class = - | 'org.apache.hudi.common.bootstrap.index.NoOpBootstrapIndex' - | ) - """.stripMargin) - - val metaData = statement.getConnection.getMetaData - val rs1 = metaData.getTables(null, null, null, null) - assert(rs1.next()) - val catalogName = rs1.getString(TABLE_CAT) - assert(catalogName === "spark_catalog" || catalogName === null) - assert(rs1.getString(TABLE_SCHEM) === schema) - assert(rs1.getString(TABLE_NAME) == table) - assert(rs1.getString(TABLE_TYPE) == tableType) - assert(!rs1.next()) - - val rs2 = metaData.getTables(null, null, "table%", Array("TABLE")) - assert(rs2.next()) - assert(rs2.getString(TABLE_NAME) == table) - assert(!rs2.next()) - - val rs3 = metaData.getTables(null, "default", "*", Array("VIEW")) - assert(!rs3.next()) - } - } - - test("get columns type") { - val dataTypes = Seq( - "boolean", - "int", - "bigint", - "float", - "double", - "decimal(38,20)", - "decimal(10,2)", - "string", - "array", - "array", - "date", - "timestamp", - "struct<`X`: bigint, `Y`: double>", - "binary", - "struct<`X`: string>") - val cols = dataTypes.zipWithIndex.map { case (dt, idx) => s"c$idx" -> dt } - val (colNames, _) = cols.unzip - - val metadataCols = Seq( - "_hoodie_commit_time", - "_hoodie_commit_seqno", - "_hoodie_record_key", - "_hoodie_partition_path", - "_hoodie_file_name") - - val defaultPkCol = "uuid" - - val reservedCols = metadataCols :+ defaultPkCol - - val tableName = "hudi_get_col_operation" - val ddl = - s""" - |CREATE TABLE IF NOT EXISTS $catalog.$defaultSchema.$tableName ( - | $defaultPkCol string, - | ${cols.map { case (cn, dt) => cn + " " + dt }.mkString(",\n")} - |) - |USING hudi""".stripMargin - - withJdbcStatement(tableName) { statement => - statement.execute(ddl) - - val metaData = statement.getConnection.getMetaData - - Seq("%", null, ".*", "c.*") foreach { columnPattern => - val rowSet = metaData.getColumns(catalog, defaultSchema, tableName, columnPattern) - - import java.sql.Types._ - val expectedJavaTypes = Seq( - BOOLEAN, - INTEGER, - BIGINT, - FLOAT, - DOUBLE, - DECIMAL, - DECIMAL, - VARCHAR, - ARRAY, - ARRAY, - DATE, - TIMESTAMP, - STRUCT, - BINARY, - STRUCT) - - var pos = 0 - while (rowSet.next()) { - assert(rowSet.getString(TABLE_CAT) === catalog) - assert(rowSet.getString(TABLE_SCHEM) === defaultSchema) - assert(rowSet.getString(TABLE_NAME) === tableName) - rowSet.getString(COLUMN_NAME) match { - case name if reservedCols.contains(name) => - assert(rowSet.getInt(DATA_TYPE) === VARCHAR) - assert(rowSet.getString(TYPE_NAME) equalsIgnoreCase "STRING") - case _ => - assert(rowSet.getString(COLUMN_NAME) === colNames(pos)) - assert(rowSet.getInt(DATA_TYPE) === expectedJavaTypes(pos)) - assert(rowSet.getString(TYPE_NAME) equalsIgnoreCase dataTypes(pos)) - pos += 1 - } - } - - assert(pos === dataTypes.size, "all columns should have been verified") - } - - val rowSet = metaData.getColumns(catalog, "*", "not_exist", "not_exist") - assert(!rowSet.next()) - } - } -} diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/JDBCTestHelper.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/JDBCTestHelper.scala index 663fd181644..e7802f2fe55 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/JDBCTestHelper.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/JDBCTestHelper.scala @@ -17,7 +17,7 @@ package org.apache.kyuubi.operation -import java.sql.{DriverManager, SQLException, Statement} +import java.sql.{DriverManager, PreparedStatement, SQLException, Statement} import java.util.Locale import org.apache.kyuubi.KyuubiFunSuite @@ -53,6 +53,7 @@ trait JDBCTestHelper extends KyuubiFunSuite { def withMultipleConnectionJdbcStatement( tableNames: String*)(fs: (Statement => Unit)*): Unit = { + info(s"Create JDBC connection using: $jdbcUrlWithConf") val connections = fs.map { _ => DriverManager.getConnection(jdbcUrlWithConf, user, password) } val statements = connections.map(_.createStatement()) @@ -75,6 +76,31 @@ trait JDBCTestHelper extends KyuubiFunSuite { } } + def withMultipleConnectionJdbcPrepareStatement( + sql: String, + tableNames: String*)(fs: (PreparedStatement => Unit)*): Unit = { + val connections = fs.map { _ => DriverManager.getConnection(jdbcUrlWithConf, user, password) } + val statements = connections.map(_.prepareStatement(sql)) + + try { + statements.zip(fs).foreach { case (s, f) => f(s) } + } finally { + tableNames.foreach { name => + if (name.toUpperCase(Locale.ROOT).startsWith("VIEW")) { + statements.head.execute(s"DROP VIEW IF EXISTS $name") + } else { + statements.head.execute(s"DROP TABLE IF EXISTS $name") + } + } + info("Closing statements") + statements.foreach(_.close()) + info("Closed statements") + info("Closing connections") + connections.foreach(_.close()) + info("Closed connections") + } + } + def withDatabases(dbNames: String*)(fs: (Statement => Unit)*): Unit = { val connections = fs.map { _ => DriverManager.getConnection(jdbcUrlWithConf, user, password) } val statements = connections.map(_.createStatement()) @@ -97,4 +123,10 @@ trait JDBCTestHelper extends KyuubiFunSuite { def withJdbcStatement(tableNames: String*)(f: Statement => Unit): Unit = { withMultipleConnectionJdbcStatement(tableNames: _*)(f) } + + def withJdbcPrepareStatement( + sql: String, + tableNames: String*)(f: PreparedStatement => Unit): Unit = { + withMultipleConnectionJdbcPrepareStatement(sql, tableNames: _*)(f) + } } diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/NoopOperation.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/NoopOperation.scala index 2d1166525fd..c369e00efd8 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/NoopOperation.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/NoopOperation.scala @@ -21,7 +21,7 @@ import java.nio.ByteBuffer import scala.collection.JavaConverters._ -import org.apache.hive.service.rpc.thrift.{TColumn, TColumnDesc, TGetResultSetMetadataResp, TPrimitiveTypeEntry, TRowSet, TStringColumn, TTableSchema, TTypeDesc, TTypeEntry, TTypeId} +import org.apache.hive.service.rpc.thrift.{TColumn, TColumnDesc, TFetchResultsResp, TGetResultSetMetadataResp, TPrimitiveTypeEntry, TStringColumn, TTableSchema, TTypeDesc, TTypeEntry, TTypeId} import org.apache.kyuubi.KyuubiSQLException import org.apache.kyuubi.operation.FetchOrientation.FetchOrientation @@ -76,11 +76,16 @@ class NoopOperation(session: Session, shouldFail: Boolean = false) resp } - override def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet = { + override def getNextRowSetInternal( + order: FetchOrientation, + rowSetSize: Int): TFetchResultsResp = { val col = TColumn.stringVal(new TStringColumn(Seq(opType).asJava, ByteBuffer.allocate(0))) val tRowSet = ThriftUtils.newEmptyRowSet tRowSet.addToColumns(col) - tRowSet + val resp = new TFetchResultsResp(OK_STATUS) + resp.setResults(tRowSet) + resp.setHasMoreRows(false) + resp } override def shouldRunAsync: Boolean = false diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/NoopOperationManager.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/NoopOperationManager.scala index 455e5d4d2df..352aae905ed 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/NoopOperationManager.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/NoopOperationManager.scala @@ -20,7 +20,7 @@ package org.apache.kyuubi.operation import java.nio.ByteBuffer import java.util -import org.apache.hive.service.rpc.thrift.{TColumn, TRow, TRowSet, TStringColumn} +import org.apache.hive.service.rpc.thrift.{TColumn, TFetchResultsResp, TRow, TRowSet, TStatus, TStatusCode, TStringColumn} import org.apache.kyuubi.operation.FetchOrientation.FetchOrientation import org.apache.kyuubi.session.Session @@ -136,13 +136,16 @@ class NoopOperationManager extends OperationManager("noop") { override def getOperationLogRowSet( opHandle: OperationHandle, order: FetchOrientation, - maxRows: Int): TRowSet = { + maxRows: Int): TFetchResultsResp = { val logs = new util.ArrayList[String]() logs.add("test") val tColumn = TColumn.stringVal(new TStringColumn(logs, ByteBuffer.allocate(0))) val tRow = new TRowSet(0, new util.ArrayList[TRow](logs.size())) tRow.addToColumns(tColumn) - tRow + val resp = new TFetchResultsResp(new TStatus(TStatusCode.SUCCESS_STATUS)) + resp.setResults(tRow) + resp.setHasMoreRows(false) + resp } override def getQueryId(operation: Operation): String = { diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/OperationStateSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/OperationStateSuite.scala index d35ea246fde..86c7e5e80a1 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/OperationStateSuite.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/OperationStateSuite.scala @@ -17,11 +17,13 @@ package org.apache.kyuubi.operation -import org.apache.hive.service.rpc.thrift.TOperationState +import org.apache.hive.service.rpc.thrift.{TOperationState, TProtocolVersion} import org.apache.hive.service.rpc.thrift.TOperationState._ import org.apache.kyuubi.{KyuubiFunSuite, KyuubiSQLException} +import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.operation.OperationState._ +import org.apache.kyuubi.session.NoopSessionManager class OperationStateSuite extends KyuubiFunSuite { test("toTOperationState") { @@ -79,4 +81,27 @@ class OperationStateSuite extends KyuubiFunSuite { assert(!OperationState.isTerminal(state)) } } + + test("kyuubi-5036 operation close should set completeTime") { + val sessionManager = new NoopSessionManager + sessionManager.initialize(KyuubiConf()) + val sHandle = sessionManager.openSession( + TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V11, + "kyuubi", + "passwd", + "localhost", + Map.empty) + val session = sessionManager.getSession(sHandle) + + val operation = new NoopOperation(session) + assert(operation.getStatus.completed == 0) + + operation.close() + val afterClose1 = operation.getStatus + assert(afterClose1.state == OperationState.CLOSED) + assert(afterClose1.completed != 0) + Thread.sleep(1000) + val afterClose2 = operation.getStatus + assert(afterClose1.completed == afterClose2.completed) + } } diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkDataTypeTests.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkDataTypeTests.scala index f0dd3e72374..2709bc861f5 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkDataTypeTests.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkDataTypeTests.scala @@ -245,7 +245,7 @@ trait SparkDataTypeTests extends HiveJDBCTestHelper with SparkVersionUtil { assert(resultSet.next()) val result = resultSet.getString("col") val metaData = resultSet.getMetaData - if (SPARK_ENGINE_RUNTIME_VERSION < "3.2") { + if (SPARK_ENGINE_RUNTIME_VERSION <= "3.1") { // for spark 3.1 and backwards assert(result === kv._2._2) assert(metaData.getPrecision(1) === Int.MaxValue) @@ -276,7 +276,7 @@ trait SparkDataTypeTests extends HiveJDBCTestHelper with SparkVersionUtil { assert(resultSet.next()) val result = resultSet.getString("col") val metaData = resultSet.getMetaData - if (SPARK_ENGINE_RUNTIME_VERSION < "3.2") { + if (SPARK_ENGINE_RUNTIME_VERSION <= "3.1") { // for spark 3.1 and backwards assert(result === kv._2._2) assert(metaData.getPrecision(1) === Int.MaxValue) diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkQueryTests.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkQueryTests.scala index ff8b124813c..20d3f6fad5b 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkQueryTests.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/SparkQueryTests.scala @@ -218,6 +218,35 @@ trait SparkQueryTests extends SparkDataTypeTests with HiveJDBCTestHelper { } } + test("kyuubi #3444: Plan only mode with lineage mode") { + + val ddl = "create table if not exists t0(a int) using parquet" + val dql = "select * from t0" + withSessionConf()(Map(KyuubiConf.OPERATION_PLAN_ONLY_MODE.key -> NoneMode.name))() { + withJdbcStatement("t0") { statement => + statement.execute(ddl) + statement.execute("SET kyuubi.operation.plan.only.mode=lineage") + val lineageParserClassName = "org.apache.kyuubi.plugin.lineage.LineageParserProvider" + + try { + val resultSet = statement.executeQuery(dql) + assert(resultSet.next()) + val actualResult = + """ + |{"inputTables":["spark_catalog.default.t0"],"outputTables":[], + |"columnLineage":[{"column":"a","originalColumns":["spark_catalog.default.t0.a"]}]} + |""".stripMargin.split("\n").mkString("") + assert(resultSet.getString(1) == actualResult) + } catch { + case e: Throwable => + assert(e.getMessage.contains(s"'$lineageParserClassName' not found")) + } finally { + statement.execute("SET kyuubi.operation.plan.only.mode=none") + } + } + } + } + test("execute simple scala code") { withJdbcStatement() { statement => statement.execute("SET kyuubi.operation.language=scala") @@ -383,7 +412,7 @@ trait SparkQueryTests extends SparkDataTypeTests with HiveJDBCTestHelper { rs.next() // scalastyle:off println(rs.getString(1)) - // scalastyle:on + // scalastyle:on } val code1 = s"""spark.sql("add jar " + jarPath)""" diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/log/OperationLogSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/log/OperationLogSuite.scala index fe3cbc7fc75..570a8159bcf 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/log/OperationLogSuite.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/log/OperationLogSuite.scala @@ -27,7 +27,7 @@ import org.apache.hive.service.rpc.thrift.{TProtocolVersion, TRowSet} import org.apache.kyuubi.{KyuubiFunSuite, KyuubiSQLException, Utils} import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.operation.OperationHandle +import org.apache.kyuubi.operation.{FetchOrientation, OperationHandle} import org.apache.kyuubi.session.NoopSessionManager import org.apache.kyuubi.util.ThriftUtils @@ -237,6 +237,47 @@ class OperationLogSuite extends KyuubiFunSuite { } } + test("test fetchOrientation read") { + val file = Utils.createTempDir().resolve("f") + val file2 = Utils.createTempDir().resolve("extra") + val writer = Files.newBufferedWriter(file, StandardCharsets.UTF_8) + val writer2 = Files.newBufferedWriter(file2, StandardCharsets.UTF_8) + try { + 0.until(10).foreach(x => writer.write(s"$x\n")) + writer.flush() + writer.close() + 10.until(20).foreach(x => writer2.write(s"$x\n")) + writer2.flush() + writer2.close() + + def compareResult(rows: TRowSet, expected: Seq[String]): Unit = { + val res = rows.getColumns.get(0).getStringVal.getValues.asScala + assert(res.size == expected.size) + res.zip(expected).foreach { case (l, r) => + assert(l == r) + } + } + + val log = new OperationLog(file) + log.addExtraLog(file2) + // The operation log file is created externally and should be initialized actively. + log.initOperationLogIfNecessary() + + compareResult( + log.read(FetchOrientation.FETCH_NEXT, 10), + Seq("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")) + compareResult(log.read(FetchOrientation.FETCH_NEXT, 5), Seq("10", "11", "12", "13", "14")) + compareResult(log.read(FetchOrientation.FETCH_FIRST, 5), Seq("0", "1", "2", "3", "4")) + compareResult( + log.read(FetchOrientation.FETCH_NEXT, 10), + Seq("5", "6", "7", "8", "9", "10", "11", "12", "13", "14")) + compareResult(log.read(FetchOrientation.FETCH_NEXT, 10), Seq("15", "16", "17", "18", "19")) + } finally { + Utils.deleteDirectoryRecursively(file.toFile) + Utils.deleteDirectoryRecursively(file2.toFile) + } + } + test("[KYUUBI #3511] Reading an uninitialized log should return empty rowSet") { val sessionManager = new NoopSessionManager sessionManager.initialize(KyuubiConf()) @@ -297,4 +338,53 @@ class OperationLogSuite extends KyuubiFunSuite { Utils.deleteDirectoryRecursively(extraFile.toFile) } } + + test("Closing the unwritten operation log should not throw an exception") { + val sessionManager = new NoopSessionManager + sessionManager.initialize(KyuubiConf()) + val sHandle = sessionManager.openSession( + TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V10, + "kyuubi", + "passwd", + "localhost", + Map.empty) + val session = sessionManager.getSession(sHandle) + OperationLog.createOperationLogRootDirectory(session) + val oHandle = OperationHandle() + + val log = OperationLog.createOperationLog(session, oHandle) + val tRowSet = log.read(1) + assert(tRowSet == ThriftUtils.newEmptyRowSet) + // close the operation log without writing + log.close() + session.close() + } + + test("test operationLog multiple read with missing line ") { + val file = Utils.createTempDir().resolve("f") + val writer = Files.newBufferedWriter(file, StandardCharsets.UTF_8) + try { + 0.until(10).foreach(x => writer.write(s"$x\n")) + writer.flush() + writer.close() + + val log = new OperationLog(file) + // The operation log file is created externally and should be initialized actively. + log.initOperationLogIfNecessary() + + def compareResult(rows: TRowSet, expected: Seq[String]): Unit = { + val res = rows.getColumns.get(0).getStringVal.getValues.asScala + assert(res.size == expected.size) + res.zip(expected).foreach { case (l, r) => + assert(l == r) + } + } + compareResult(log.read(2), Seq("0", "1")) + compareResult(log.read(3), Seq("2", "3", "4")) + compareResult(log.read(10), Seq("5", "6", "7", "8", "9")) + } finally { + Utils.deleteDirectoryRecursively(file.toFile) + } + } + } diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/TFrontendServiceSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/TFrontendServiceSuite.scala index 28442fe62ec..444bfe2cc3a 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/TFrontendServiceSuite.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/TFrontendServiceSuite.scala @@ -40,8 +40,8 @@ class TFrontendServiceSuite extends KyuubiFunSuite { .set(KyuubiConf.SESSION_CHECK_INTERVAL, Duration.ofSeconds(5).toMillis) .set(KyuubiConf.SESSION_IDLE_TIMEOUT, Duration.ofSeconds(5).toMillis) .set(KyuubiConf.OPERATION_IDLE_TIMEOUT, Duration.ofSeconds(20).toMillis) - .set(KyuubiConf.SESSION_CONF_RESTRICT_LIST, Seq("spark.*")) - .set(KyuubiConf.SESSION_CONF_IGNORE_LIST, Seq("session.engine.*")) + .set(KyuubiConf.SESSION_CONF_RESTRICT_LIST, Set("spark.*")) + .set(KyuubiConf.SESSION_CONF_IGNORE_LIST, Set("session.engine.*")) private def withSessionHandle(f: (TCLIService.Iface, TSessionHandle) => Unit): Unit = { TClientTestUtils.withSessionHandle(server.frontendServices.head.connectionUrl, Map.empty)(f) @@ -115,6 +115,33 @@ class TFrontendServiceSuite extends KyuubiFunSuite { assert(service2.connectionUrl.split("\\.")(0).toInt > 0) } + test("advertised host") { + + def newService: TBinaryFrontendService = { + new TBinaryFrontendService("DummyThriftBinaryFrontendService") { + override val serverable: Serverable = new NoopTBinaryFrontendServer + override val discoveryService: Option[Service] = None + } + } + + val conf = new KyuubiConf() + .set(FRONTEND_THRIFT_BINARY_BIND_HOST.key, "localhost") + .set(FRONTEND_THRIFT_BINARY_BIND_PORT, 0) + .set(FRONTEND_ADVERTISED_HOST, "dummy.host") + val service = newService + + service.initialize(conf) + assert(service.connectionUrl.startsWith("dummy.host")) + + val service2 = newService + val conf2 = KyuubiConf() + .set(FRONTEND_THRIFT_BINARY_BIND_HOST.key, "localhost") + .set(FRONTEND_THRIFT_BINARY_BIND_PORT, 0) + + service2.initialize(conf2) + assert(service2.connectionUrl.startsWith("localhost")) + } + test("open session") { TClientTestUtils.withThriftClient(server.frontendServices.head) { client => @@ -124,7 +151,7 @@ class TFrontendServiceSuite extends KyuubiFunSuite { val resp = client.OpenSession(req) val handle = resp.getSessionHandle assert(handle != null) - assert(resp.getStatus.getStatusCode == TStatusCode.SUCCESS_STATUS) + assert(resp.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) req.setConfiguration(Map("kyuubi.test.should.fail" -> "true").asJava) val resp1 = client.OpenSession(req) @@ -514,39 +541,37 @@ class TFrontendServiceSuite extends KyuubiFunSuite { test("close expired operations") { withSessionHandle { (client, handle) => - val req = new TCancelOperationReq() - val req1 = new TGetSchemasReq(handle) - val resp1 = client.GetSchemas(req1) + val req = new TGetSchemasReq(handle) + val resp = client.GetSchemas(req) val sessionManager = server.backendService.sessionManager val session = sessionManager .getSession(SessionHandle(handle)) .asInstanceOf[AbstractSession] var lastAccessTime = session.lastAccessTime - assert(sessionManager.getOpenSessionCount == 1) + assert(sessionManager.getOpenSessionCount === 1) assert(session.lastIdleTime > 0) - resp1.getOperationHandle - req.setOperationHandle(resp1.getOperationHandle) - val resp2 = client.CancelOperation(req) - assert(resp2.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) - assert(sessionManager.getOpenSessionCount == 1) - assert(session.lastIdleTime == 0) - assert(lastAccessTime < session.lastAccessTime) + val cancelOpReq = new TCancelOperationReq(resp.getOperationHandle) + val cancelOpResp = client.CancelOperation(cancelOpReq) + assert(cancelOpResp.getStatus.getStatusCode === TStatusCode.SUCCESS_STATUS) + assert(sessionManager.getOpenSessionCount === 1) + assert(session.lastIdleTime === 0) + eventually(timeout(Span(60, Seconds)), interval(Span(1, Seconds))) { + assert(lastAccessTime < session.lastAccessTime) + } lastAccessTime = session.lastAccessTime eventually(timeout(Span(60, Seconds)), interval(Span(1, Seconds))) { - assert(session.lastIdleTime > lastAccessTime) + assert(lastAccessTime <= session.lastIdleTime) } - info("operation is terminated") - assert(lastAccessTime == session.lastAccessTime) - assert(sessionManager.getOpenSessionCount == 1) eventually(timeout(Span(60, Seconds)), interval(Span(1, Seconds))) { assert(session.lastAccessTime > lastAccessTime) } - assert(sessionManager.getOpenSessionCount == 0) + info("session is terminated") + assert(sessionManager.getOpenSessionCount === 0) } } @@ -562,7 +587,7 @@ class TFrontendServiceSuite extends KyuubiFunSuite { Map( "session.engine.spark.main.resource" -> "org.apahce.kyuubi.test", "session.check.interval" -> "10000")) - assert(conf.size == 1) - assert(conf("session.check.interval") == "10000") + assert(conf.size === 1) + assert(conf("session.check.interval") === "10000") } } diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/JdbcAuthenticationProviderImplSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/JdbcAuthenticationProviderImplSuite.scala index dcbc62dfa3f..4642eb910e6 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/JdbcAuthenticationProviderImplSuite.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/JdbcAuthenticationProviderImplSuite.scala @@ -17,32 +17,27 @@ package org.apache.kyuubi.service.authentication -import java.sql.DriverManager import java.util.Properties import javax.security.sasl.AuthenticationException import javax.sql.DataSource import com.zaxxer.hikari.util.DriverDataSource -import org.apache.kyuubi.{KyuubiFunSuite, Utils} +import org.apache.kyuubi.KyuubiFunSuite import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ import org.apache.kyuubi.util.JdbcUtils class JdbcAuthenticationProviderImplSuite extends KyuubiFunSuite { - protected val dbUser: String = "bowenliang123" - protected val dbPasswd: String = "bowenliang123@kyuubi" - protected val authDbName: String = "auth_db" - protected val dbUrl: String = s"jdbc:derby:memory:$authDbName" - protected val jdbcUrl: String = s"$dbUrl;create=true" - private val authDbDriverClz = "org.apache.derby.jdbc.AutoloadedDriver" + protected val jdbcUrl: String = "jdbc:sqlite:file:test_auth.db" + private val authDbDriverClz = "org.sqlite.JDBC" implicit private val ds: DataSource = new DriverDataSource( jdbcUrl, authDbDriverClz, new Properties, - dbUser, - dbPasswd) + null, + null) protected val authUser: String = "kyuubiuser" protected val authPasswd: String = "kyuubiuuserpassword" @@ -50,15 +45,13 @@ class JdbcAuthenticationProviderImplSuite extends KyuubiFunSuite { protected val conf: KyuubiConf = new KyuubiConf() .set(AUTHENTICATION_JDBC_DRIVER, authDbDriverClz) .set(AUTHENTICATION_JDBC_URL, jdbcUrl) - .set(AUTHENTICATION_JDBC_USER, dbUser) - .set(AUTHENTICATION_JDBC_PASSWORD, dbPasswd) .set( AUTHENTICATION_JDBC_QUERY, "SELECT 1 FROM user_auth WHERE username=${user} and passwd=${password}") override def beforeAll(): Unit = { + JdbcUtils.execute("DROP TABLE IF EXISTS user_auth")() // init db - JdbcUtils.execute(s"CREATE SCHEMA $dbUser")() JdbcUtils.execute( """CREATE TABLE user_auth ( | username VARCHAR(64) NOT NULL PRIMARY KEY, @@ -72,15 +65,6 @@ class JdbcAuthenticationProviderImplSuite extends KyuubiFunSuite { super.beforeAll() } - override def afterAll(): Unit = { - super.afterAll() - - // cleanup db - Utils.tryLogNonFatalError { - DriverManager.getConnection(s"$dbUrl;shutdown=true") - } - } - test("authenticate tests") { val providerImpl = new JdbcAuthenticationProviderImpl(conf) providerImpl.authenticate(authUser, authPasswd) @@ -144,6 +128,6 @@ class JdbcAuthenticationProviderImplSuite extends KyuubiFunSuite { val e12 = intercept[AuthenticationException] { new JdbcAuthenticationProviderImpl(_conf).authenticate(authUser, authPasswd) } - assert(e12.getCause.getMessage.contains("Column 'UNKNOWN_COLUMN' is either not in any table")) + assert(e12.getCause.getMessage.contains("no such column: unknown_column")) } } diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/KyuubiAuthenticationFactorySuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/KyuubiAuthenticationFactorySuite.scala index 19b89b47e41..316c9b2dfdf 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/KyuubiAuthenticationFactorySuite.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/KyuubiAuthenticationFactorySuite.scala @@ -25,6 +25,7 @@ import org.apache.thrift.transport.TSaslServerTransport import org.apache.kyuubi.{KyuubiFunSuite, KyuubiSQLException} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.service.authentication.PlainSASLServer.SaslPlainProvider +import org.apache.kyuubi.util.AssertionUtils._ import org.apache.kyuubi.util.KyuubiHadoopUtils class KyuubiAuthenticationFactorySuite extends KyuubiFunSuite { @@ -56,21 +57,21 @@ class KyuubiAuthenticationFactorySuite extends KyuubiFunSuite { } test("AuthType Other") { - val conf = KyuubiConf().set(KyuubiConf.AUTHENTICATION_METHOD, Seq("INVALID")) - val e = intercept[IllegalArgumentException](new KyuubiAuthenticationFactory(conf)) - assert(e.getMessage contains "the authentication type should be one or more of" + - " NOSASL,NONE,LDAP,JDBC,KERBEROS,CUSTOM") + val conf = KyuubiConf().set(KyuubiConf.AUTHENTICATION_METHOD, Set("INVALID")) + interceptEquals[IllegalArgumentException] { new KyuubiAuthenticationFactory(conf) }( + "The value of kyuubi.authentication should be one of" + + " NOSASL, NONE, LDAP, JDBC, KERBEROS, CUSTOM, but was INVALID") } test("AuthType LDAP") { - val conf = KyuubiConf().set(KyuubiConf.AUTHENTICATION_METHOD, Seq("LDAP")) + val conf = KyuubiConf().set(KyuubiConf.AUTHENTICATION_METHOD, Set("LDAP")) val authFactory = new KyuubiAuthenticationFactory(conf) authFactory.getTTransportFactory assert(Security.getProviders.exists(_.isInstanceOf[SaslPlainProvider])) } test("AuthType KERBEROS w/o keytab/principal") { - val conf = KyuubiConf().set(KyuubiConf.AUTHENTICATION_METHOD, Seq("KERBEROS")) + val conf = KyuubiConf().set(KyuubiConf.AUTHENTICATION_METHOD, Set("KERBEROS")) val factory = new KyuubiAuthenticationFactory(conf) val e = intercept[LoginException](factory.getTTransportFactory) @@ -78,11 +79,11 @@ class KyuubiAuthenticationFactorySuite extends KyuubiFunSuite { } test("AuthType is NOSASL if only NOSASL is specified") { - val conf = KyuubiConf().set(KyuubiConf.AUTHENTICATION_METHOD, Seq("NOSASL")) + val conf = KyuubiConf().set(KyuubiConf.AUTHENTICATION_METHOD, Set("NOSASL")) var factory = new KyuubiAuthenticationFactory(conf) !factory.getTTransportFactory.isInstanceOf[TSaslServerTransport.Factory] - conf.set(KyuubiConf.AUTHENTICATION_METHOD, Seq("NOSASL", "NONE")) + conf.set(KyuubiConf.AUTHENTICATION_METHOD, Set("NOSASL", "NONE")) factory = new KyuubiAuthenticationFactory(conf) factory.getTTransportFactory.isInstanceOf[TSaslServerTransport.Factory] } diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/LdapAuthenticationProviderImplSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/LdapAuthenticationProviderImplSuite.scala index 718fc6f6ebd..f10bf7ce2df 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/LdapAuthenticationProviderImplSuite.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/LdapAuthenticationProviderImplSuite.scala @@ -27,6 +27,7 @@ import org.scalatestplus.mockito.MockitoSugar.mock import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ import org.apache.kyuubi.service.authentication.ldap.{DirSearch, DirSearchFactory, LdapSearchFactory} +import org.apache.kyuubi.service.authentication.ldap.LdapUtils.getUserName class LdapAuthenticationProviderImplSuite extends WithLdapServer { @@ -46,7 +47,7 @@ class LdapAuthenticationProviderImplSuite extends WithLdapServer { } test("authenticateGivenBlankOrNullPassword") { - Seq("", "\0", null).foreach { pwd => + Seq("", "\u0000", null).foreach { pwd => auth = new LdapAuthenticationProviderImpl(conf, new LdapSearchFactory) val thrown = intercept[AuthenticationException] { auth.authenticate("user", pwd) @@ -311,6 +312,32 @@ class LdapAuthenticationProviderImplSuite extends WithLdapServer { verify(search, times(1)).findUserDn(mockEq(authUser)) } + test("AuthenticateWithBindDomainUserPasses") { + val bindUser = "cn=BindUser,ou=Users,ou=branch1,dc=mycorp,dc=com" + val bindPass = "Blah" + val authFullUser = "cn=user1,ou=Users,ou=branch1,dc=mycorp,dc=com" + val authUser = "user1@mydomain.com" + val authPass = "Blah2" + conf.set(AUTHENTICATION_LDAP_BIND_USER, bindUser) + conf.set(AUTHENTICATION_LDAP_BIND_PASSWORD, bindPass) + + val username = getUserName(authUser) + when(search.findUserDn(mockEq(username))).thenReturn(authFullUser) + + auth = new LdapAuthenticationProviderImpl(conf, factory) + auth.authenticate(authUser, authPass) + + verify(factory, times(1)).getInstance( + isA(classOf[KyuubiConf]), + mockEq(bindUser), + mockEq(bindPass)) + verify(factory, times(1)).getInstance( + isA(classOf[KyuubiConf]), + mockEq(authFullUser), + mockEq(authPass)) + verify(search, times(1)).findUserDn(mockEq(username)) + } + test("AuthenticateWithBindUserFailsOnAuthentication") { val bindUser = "cn=BindUser,ou=Users,ou=branch1,dc=mycorp,dc=com" val bindPass = "Blah" diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/PlainSASLHelperSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/PlainSASLHelperSuite.scala index 94a61f693eb..d4290a2c6dd 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/PlainSASLHelperSuite.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/PlainSASLHelperSuite.scala @@ -23,9 +23,9 @@ import org.apache.thrift.transport.{TSaslServerTransport, TSocket} import org.apache.kyuubi.{KYUUBI_VERSION, KyuubiFunSuite} import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.engine.SemanticVersion import org.apache.kyuubi.service.{NoopTBinaryFrontendServer, TBinaryFrontendService} import org.apache.kyuubi.service.authentication.PlainSASLServer.SaslPlainProvider +import org.apache.kyuubi.util.SemanticVersion class PlainSASLHelperSuite extends KyuubiFunSuite { @@ -62,10 +62,6 @@ class PlainSASLHelperSuite extends KyuubiFunSuite { val saslPlainProvider = new SaslPlainProvider() assert(saslPlainProvider.containsKey("SaslServerFactory.PLAIN")) assert(saslPlainProvider.getName === "KyuubiSaslPlain") - val version: Double = { - val ver = SemanticVersion(KYUUBI_VERSION) - ver.majorVersion + ver.minorVersion.toDouble / 10 - } - assert(saslPlainProvider.getVersion === version) + assertResult(saslPlainProvider.getVersion)(SemanticVersion(KYUUBI_VERSION).toDouble) } } diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/PlainSASLServerSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/PlainSASLServerSuite.scala index 78fe3ef7a04..a7f4b953529 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/PlainSASLServerSuite.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/PlainSASLServerSuite.scala @@ -79,9 +79,7 @@ class PlainSASLServerSuite extends KyuubiFunSuite { "NONE", "KYUUBI", map, - new CallbackHandler { - override def handle(callbacks: Array[Callback]): Unit = {} - }) + _ => {}) val e6 = intercept[SaslException](server2.evaluateResponse(res4.map(_.toByte))) assert(e6.getMessage === "Error validating the login") assert(e6.getCause.getMessage === "Authentication failed") diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/SaslQOPSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/SaslQOPSuite.scala index c48f12aa723..6cf2793d244 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/SaslQOPSuite.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/SaslQOPSuite.scala @@ -34,7 +34,7 @@ class SaslQOPSuite extends KyuubiFunSuite { val e = intercept[IllegalArgumentException](conf.get(SASL_QOP)) assert(e.getMessage === "The value of kyuubi.authentication.sasl.qop should be one of" + - " auth, auth-conf, auth-int, but was abc") + " auth, auth-int, auth-conf, but was abc") } } diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/ldap/LdapAuthenticationTestCase.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/ldap/LdapAuthenticationTestCase.scala index e8b92ebc0ec..a06eba068dd 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/ldap/LdapAuthenticationTestCase.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/service/authentication/ldap/LdapAuthenticationTestCase.scala @@ -21,7 +21,7 @@ import javax.security.sasl.AuthenticationException import scala.collection.mutable -import org.scalatest.Assertions.{fail, intercept} +import org.scalatest.Assertions._ import org.apache.kyuubi.config.{ConfigEntry, KyuubiConf} import org.apache.kyuubi.service.authentication.LdapAuthenticationProviderImpl diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/util/ClassUtilsSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/util/ClassUtilsSuite.scala new file mode 100644 index 00000000000..cda638b0dce --- /dev/null +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/util/ClassUtilsSuite.scala @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.util + +import org.apache.kyuubi.KyuubiFunSuite +import org.apache.kyuubi.config.KyuubiConf + +class ClassUtilsSuite extends KyuubiFunSuite { + + private val _conf = KyuubiConf() + + test("create instance with zero-arg arg") { + val instance = ClassUtils.createInstance[SomeProvider]( + "org.apache.kyuubi.util.ProviderA", + classOf[SomeProvider], + _conf) + + assert(instance != null) + assert(instance.isInstanceOf[SomeProvider]) + assert(instance.isInstanceOf[ProviderA]) + } + + test("create instance with kyuubi conf") { + val instance = ClassUtils.createInstance[SomeProvider]( + "org.apache.kyuubi.util.ProviderB", + classOf[SomeProvider], + _conf) + assert(instance != null) + assert(instance.isInstanceOf[SomeProvider]) + assert(instance.isInstanceOf[ProviderB]) + assert(instance.asInstanceOf[ProviderB].getConf != null) + } + + test("create instance of inherited class with kyuubi conf") { + val instance = ClassUtils.createInstance[SomeProvider]( + "org.apache.kyuubi.util.ProviderC", + classOf[SomeProvider], + _conf) + assert(instance != null) + assert(instance.isInstanceOf[SomeProvider]) + assert(instance.isInstanceOf[ProviderB]) + assert(instance.isInstanceOf[ProviderC]) + assert(instance.asInstanceOf[ProviderC].getConf != null) + } + +} + +trait SomeProvider {} + +class ProviderA extends SomeProvider {} + +class ProviderB(conf: KyuubiConf) extends SomeProvider { + def getConf: KyuubiConf = conf +} + +class ProviderC(conf: KyuubiConf) extends ProviderB(conf) {} diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/util/SparkVersionUtil.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/util/SparkVersionUtil.scala index 785015cc377..ece9d53aa0a 100644 --- a/kyuubi-common/src/test/scala/org/apache/kyuubi/util/SparkVersionUtil.scala +++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/util/SparkVersionUtil.scala @@ -17,15 +17,12 @@ package org.apache.kyuubi.util -import org.apache.kyuubi.engine.SemanticVersion import org.apache.kyuubi.operation.HiveJDBCTestHelper trait SparkVersionUtil { this: HiveJDBCTestHelper => - protected lazy val SPARK_ENGINE_RUNTIME_VERSION = sparkEngineMajorMinorVersion - - def sparkEngineMajorMinorVersion: SemanticVersion = { + protected lazy val SPARK_ENGINE_RUNTIME_VERSION: SemanticVersion = { var sparkRuntimeVer = "" withJdbcStatement() { stmt => val result = stmt.executeQuery("SELECT version()") diff --git a/kyuubi-ctl/pom.xml b/kyuubi-ctl/pom.xml index eb4060ffdd5..c453cd3af95 100644 --- a/kyuubi-ctl/pom.xml +++ b/kyuubi-ctl/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../pom.xml - kyuubi-ctl_2.12 + kyuubi-ctl_${scala.binary.version} jar Kyuubi Project Control https://kyuubi.apache.org/ @@ -48,6 +48,11 @@ ${project.version}
    + + org.apache.kyuubi + ${kyuubi-shaded-zookeeper.artifacts} + + org.apache.hadoop hadoop-client-api @@ -60,16 +65,6 @@ provided - - org.apache.curator - curator-framework - - - - org.apache.curator - curator-recipes - - com.github.scopt scopt_${scala.binary.version} @@ -86,11 +81,6 @@ ${snakeyaml.version} - - org.apache.zookeeper - zookeeper - - org.apache.kyuubi kyuubi-common_${scala.binary.version} diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/RestClientFactory.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/RestClientFactory.scala index bbaa5f6683f..d971eec13aa 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/RestClientFactory.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/RestClientFactory.scala @@ -18,6 +18,8 @@ package org.apache.kyuubi.ctl import java.util.{Map => JMap} +import scala.collection.JavaConverters._ + import org.apache.commons.lang3.StringUtils import org.apache.kyuubi.KyuubiException @@ -45,7 +47,9 @@ object RestClientFactory { kyuubiRestClient: KyuubiRestClient, kyuubiInstance: String)(f: KyuubiRestClient => Unit): Unit = { val kyuubiInstanceRestClient = kyuubiRestClient.clone() - kyuubiInstanceRestClient.setHostUrls(s"http://${kyuubiInstance}") + val hostUrls = Option(kyuubiInstance).map(instance => s"http://$instance").toSeq ++ + kyuubiRestClient.getHostUrls.asScala + kyuubiInstanceRestClient.setHostUrls(hostUrls.asJava) try { f(kyuubiInstanceRestClient) } finally { diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cli/AdminControlCliArguments.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cli/AdminControlCliArguments.scala index 4bc1e131757..e015525b3aa 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cli/AdminControlCliArguments.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cli/AdminControlCliArguments.scala @@ -22,7 +22,7 @@ import scopt.OParser import org.apache.kyuubi.KyuubiException import org.apache.kyuubi.ctl.cmd.Command import org.apache.kyuubi.ctl.cmd.delete.AdminDeleteEngineCommand -import org.apache.kyuubi.ctl.cmd.list.AdminListEngineCommand +import org.apache.kyuubi.ctl.cmd.list.{AdminListEngineCommand, AdminListServerCommand} import org.apache.kyuubi.ctl.cmd.refresh.RefreshConfigCommand import org.apache.kyuubi.ctl.opt.{AdminCommandLine, CliConfig, ControlAction, ControlObject} @@ -37,6 +37,7 @@ class AdminControlCliArguments(args: Seq[String], env: Map[String, String] = sys cliConfig.action match { case ControlAction.LIST => cliConfig.resource match { case ControlObject.ENGINE => new AdminListEngineCommand(cliConfig) + case ControlObject.SERVER => new AdminListServerCommand(cliConfig) case _ => throw new KyuubiException(s"Invalid resource: ${cliConfig.resource}") } case ControlAction.DELETE => cliConfig.resource match { @@ -60,6 +61,12 @@ class AdminControlCliArguments(args: Seq[String], env: Map[String, String] = sys | type ${cliConfig.engineOpts.engineType} | sharelevel ${cliConfig.engineOpts.engineShareLevel} | sharesubdomain ${cliConfig.engineOpts.engineSubdomain} + | all ${cliConfig.engineOpts.all} + """.stripMargin + case ControlObject.SERVER => + s"""Parsed arguments: + | action ${cliConfig.action} + | resource ${cliConfig.resource} """.stripMargin case ControlObject.CONFIG => s"""Parsed arguments: diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cli/ControlCliArguments.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cli/ControlCliArguments.scala index 41d53b568b8..10bb992969a 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cli/ControlCliArguments.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cli/ControlCliArguments.scala @@ -33,9 +33,9 @@ import org.apache.kyuubi.ctl.opt.{CliConfig, CommandLine, ControlAction, Control class ControlCliArguments(args: Seq[String], env: Map[String, String] = sys.env) extends ControlCliArgumentsParser with Logging { - var cliConfig: CliConfig = null + var cliConfig: CliConfig = _ - var command: Command[_] = null + var command: Command[_] = _ // Set parameters from command line arguments parse(args) @@ -112,6 +112,7 @@ class ControlCliArguments(args: Seq[String], env: Map[String, String] = sys.env) | batchType ${cliConfig.batchOpts.batchType} | batchUser ${cliConfig.batchOpts.batchUser} | batchState ${cliConfig.batchOpts.batchState} + | batchName ${cliConfig.batchOpts.batchName} | createTime ${cliConfig.batchOpts.createTime} | endTime ${cliConfig.batchOpts.endTime} | from ${cliConfig.batchOpts.from} diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/create/CreateServerCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/create/CreateServerCommand.scala index 66f75fc5f67..cbff0c15adb 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/create/CreateServerCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/create/CreateServerCommand.scala @@ -25,7 +25,8 @@ import org.apache.kyuubi.ha.HighAvailabilityConf._ import org.apache.kyuubi.ha.client.{DiscoveryClient, DiscoveryPaths, ServiceNodeInfo} import org.apache.kyuubi.ha.client.DiscoveryClientProvider.withDiscoveryClient -class CreateServerCommand(cliConfig: CliConfig) extends Command[Seq[ServiceNodeInfo]](cliConfig) { +class CreateServerCommand(cliConfig: CliConfig) + extends Command[Iterable[ServiceNodeInfo]](cliConfig) { def validate(): Unit = { if (normalizedCliConfig.resource != ControlObject.SERVER) { @@ -49,14 +50,14 @@ class CreateServerCommand(cliConfig: CliConfig) extends Command[Seq[ServiceNodeI /** * Expose Kyuubi server instance to another domain. */ - def doRun(): Seq[ServiceNodeInfo] = { + override def doRun(): Iterable[ServiceNodeInfo] = { val kyuubiConf = conf kyuubiConf.setIfMissing(HA_ADDRESSES, normalizedCliConfig.zkOpts.zkQuorum) withDiscoveryClient(kyuubiConf) { discoveryClient => val fromNamespace = DiscoveryPaths.makePath(null, kyuubiConf.get(HA_NAMESPACE)) - val toNamespace = CtlUtils.getZkNamespace(kyuubiConf, normalizedCliConfig) + val toNamespace = CtlUtils.getZkServerNamespace(kyuubiConf, normalizedCliConfig) val currentServerNodes = discoveryClient.getServiceNodesInfo(fromNamespace) val exposedServiceNodes = ListBuffer[ServiceNodeInfo]() @@ -89,7 +90,7 @@ class CreateServerCommand(cliConfig: CliConfig) extends Command[Seq[ServiceNodeI } } - def render(nodes: Seq[ServiceNodeInfo]): Unit = { + override def render(nodes: Iterable[ServiceNodeInfo]): Unit = { val title = "Created zookeeper service nodes" info(Render.renderServiceNodesInfo(title, nodes)) } diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/delete/DeleteCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/delete/DeleteCommand.scala index 69479259a6f..113fb935cad 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/delete/DeleteCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/delete/DeleteCommand.scala @@ -16,15 +16,13 @@ */ package org.apache.kyuubi.ctl.cmd.delete -import scala.collection.mutable.ListBuffer - import org.apache.kyuubi.ctl.cmd.Command import org.apache.kyuubi.ctl.opt.CliConfig -import org.apache.kyuubi.ctl.util.{CtlUtils, Render, Validator} -import org.apache.kyuubi.ha.client.DiscoveryClientProvider.withDiscoveryClient +import org.apache.kyuubi.ctl.util.{Render, Validator} import org.apache.kyuubi.ha.client.ServiceNodeInfo -class DeleteCommand(cliConfig: CliConfig) extends Command[Seq[ServiceNodeInfo]](cliConfig) { +abstract class DeleteCommand(cliConfig: CliConfig) + extends Command[Iterable[ServiceNodeInfo]](cliConfig) { def validate(): Unit = { Validator.validateZkArguments(normalizedCliConfig) @@ -35,30 +33,9 @@ class DeleteCommand(cliConfig: CliConfig) extends Command[Seq[ServiceNodeInfo]]( /** * Delete zookeeper service node with specified host port. */ - def doRun(): Seq[ServiceNodeInfo] = { - withDiscoveryClient(conf) { discoveryClient => - val znodeRoot = CtlUtils.getZkNamespace(conf, normalizedCliConfig) - val hostPortOpt = - Some((normalizedCliConfig.zkOpts.host, normalizedCliConfig.zkOpts.port.toInt)) - val nodesToDelete = CtlUtils.getServiceNodes(discoveryClient, znodeRoot, hostPortOpt) - - val deletedNodes = ListBuffer[ServiceNodeInfo]() - nodesToDelete.foreach { node => - val nodePath = s"$znodeRoot/${node.nodeName}" - info(s"Deleting zookeeper service node:$nodePath") - try { - discoveryClient.delete(nodePath) - deletedNodes += node - } catch { - case e: Exception => - error(s"Failed to delete zookeeper service node:$nodePath", e) - } - } - deletedNodes - } - } + override def doRun(): Iterable[ServiceNodeInfo] - def render(nodes: Seq[ServiceNodeInfo]): Unit = { + override def render(nodes: Iterable[ServiceNodeInfo]): Unit = { val title = "Deleted zookeeper service nodes" info(Render.renderServiceNodesInfo(title, nodes)) } diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/delete/DeleteEngineCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/delete/DeleteEngineCommand.scala index 7be60746785..f3117a7b113 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/delete/DeleteEngineCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/delete/DeleteEngineCommand.scala @@ -16,7 +16,12 @@ */ package org.apache.kyuubi.ctl.cmd.delete +import scala.collection.mutable.ListBuffer + import org.apache.kyuubi.ctl.opt.CliConfig +import org.apache.kyuubi.ctl.util.CtlUtils +import org.apache.kyuubi.ha.client.DiscoveryClientProvider.withDiscoveryClient +import org.apache.kyuubi.ha.client.ServiceNodeInfo class DeleteEngineCommand(cliConfig: CliConfig) extends DeleteCommand(cliConfig) { @@ -28,4 +33,29 @@ class DeleteEngineCommand(cliConfig: CliConfig) extends DeleteCommand(cliConfig) fail("Must specify user name for engine, please use -u or --user.") } } + + override def doRun(): Iterable[ServiceNodeInfo] = { + withDiscoveryClient(conf) { discoveryClient => + val hostPortOpt = + Some((cliConfig.zkOpts.host, cliConfig.zkOpts.port.toInt)) + val candidateNodes = CtlUtils.listZkEngineNodes(conf, normalizedCliConfig, hostPortOpt) + hostPortOpt.map { case (host, port) => + candidateNodes.filter { cn => cn.host == host && cn.port == port } + }.getOrElse(candidateNodes) + val deletedNodes = ListBuffer[ServiceNodeInfo]() + candidateNodes.foreach { node => + val engineNode = discoveryClient.getChildren(node.namespace)(0) + val nodePath = s"${node.namespace}/$engineNode" + info(s"Deleting zookeeper service node:$nodePath") + try { + discoveryClient.delete(nodePath) + deletedNodes += node + } catch { + case e: Exception => + error(s"Failed to delete zookeeper service node:$nodePath", e) + } + } + deletedNodes + } + } } diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/delete/DeleteServerCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/delete/DeleteServerCommand.scala index 6debba4d56f..1f4d67ee63a 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/delete/DeleteServerCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/delete/DeleteServerCommand.scala @@ -16,6 +16,34 @@ */ package org.apache.kyuubi.ctl.cmd.delete +import scala.collection.mutable.ListBuffer + import org.apache.kyuubi.ctl.opt.CliConfig +import org.apache.kyuubi.ctl.util.CtlUtils +import org.apache.kyuubi.ha.client.DiscoveryClientProvider.withDiscoveryClient +import org.apache.kyuubi.ha.client.ServiceNodeInfo + +class DeleteServerCommand(cliConfig: CliConfig) extends DeleteCommand(cliConfig) { + override def doRun(): Iterable[ServiceNodeInfo] = { + withDiscoveryClient(conf) { discoveryClient => + val znodeRoot = CtlUtils.getZkServerNamespace(conf, normalizedCliConfig) + val hostPortOpt = + Some((normalizedCliConfig.zkOpts.host, normalizedCliConfig.zkOpts.port.toInt)) + val nodesToDelete = CtlUtils.getServiceNodes(discoveryClient, znodeRoot, hostPortOpt) -class DeleteServerCommand(cliConfig: CliConfig) extends DeleteCommand(cliConfig) {} + val deletedNodes = ListBuffer[ServiceNodeInfo]() + nodesToDelete.foreach { node => + val nodePath = s"$znodeRoot/${node.nodeName}" + info(s"Deleting zookeeper service node:$nodePath") + try { + discoveryClient.delete(nodePath) + deletedNodes += node + } catch { + case e: Exception => + error(s"Failed to delete zookeeper service node:$nodePath", e) + } + } + deletedNodes + } + } +} diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/get/GetCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/get/GetCommand.scala index d78f0b995bb..5b7ada27d66 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/get/GetCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/get/GetCommand.scala @@ -18,10 +18,11 @@ package org.apache.kyuubi.ctl.cmd.get import org.apache.kyuubi.ctl.cmd.Command import org.apache.kyuubi.ctl.opt.CliConfig -import org.apache.kyuubi.ctl.util.{CtlUtils, Render, Validator} +import org.apache.kyuubi.ctl.util.{Render, Validator} import org.apache.kyuubi.ha.client.ServiceNodeInfo -class GetCommand(cliConfig: CliConfig) extends Command[Seq[ServiceNodeInfo]](cliConfig) { +abstract class GetCommand(cliConfig: CliConfig) + extends Command[Iterable[ServiceNodeInfo]](cliConfig) { def validate(): Unit = { Validator.validateZkArguments(normalizedCliConfig) @@ -29,11 +30,9 @@ class GetCommand(cliConfig: CliConfig) extends Command[Seq[ServiceNodeInfo]](cli mergeArgsIntoKyuubiConf() } - def doRun(): Seq[ServiceNodeInfo] = { - CtlUtils.listZkServerNodes(conf, normalizedCliConfig, filterHostPort = true) - } + override def doRun(): Iterable[ServiceNodeInfo] - def render(nodes: Seq[ServiceNodeInfo]): Unit = { + override def render(nodes: Iterable[ServiceNodeInfo]): Unit = { val title = "Zookeeper service nodes" info(Render.renderServiceNodesInfo(title, nodes)) } diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/get/GetEngineCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/get/GetEngineCommand.scala index 4d9101625fb..0d30183726e 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/get/GetEngineCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/get/GetEngineCommand.scala @@ -17,6 +17,8 @@ package org.apache.kyuubi.ctl.cmd.get import org.apache.kyuubi.ctl.opt.CliConfig +import org.apache.kyuubi.ctl.util.CtlUtils +import org.apache.kyuubi.ha.client.ServiceNodeInfo class GetEngineCommand(cliConfig: CliConfig) extends GetCommand(cliConfig) { @@ -28,4 +30,12 @@ class GetEngineCommand(cliConfig: CliConfig) extends GetCommand(cliConfig) { fail("Must specify user name for engine, please use -u or --user.") } } + + override def doRun(): Iterable[ServiceNodeInfo] = { + CtlUtils.listZkEngineNodes( + conf, + normalizedCliConfig, + Some((cliConfig.zkOpts.host, cliConfig.zkOpts.port.toInt))) + } + } diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/get/GetServerCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/get/GetServerCommand.scala index 71b8684532d..744655fd9b6 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/get/GetServerCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/get/GetServerCommand.scala @@ -17,5 +17,14 @@ package org.apache.kyuubi.ctl.cmd.get import org.apache.kyuubi.ctl.opt.CliConfig +import org.apache.kyuubi.ctl.util.CtlUtils +import org.apache.kyuubi.ha.client.ServiceNodeInfo -class GetServerCommand(cliConfig: CliConfig) extends GetCommand(cliConfig) {} +class GetServerCommand(cliConfig: CliConfig) extends GetCommand(cliConfig) { + override def doRun(): Iterable[ServiceNodeInfo] = { + CtlUtils.listZkServerNodes( + conf, + normalizedCliConfig, + Some((cliConfig.zkOpts.host, cliConfig.zkOpts.port.toInt))) + } +} diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/AdminListEngineCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/AdminListEngineCommand.scala index bc0b16e67f3..96be5cc4744 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/AdminListEngineCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/AdminListEngineCommand.scala @@ -26,22 +26,24 @@ import org.apache.kyuubi.ctl.cmd.AdminCtlCommand import org.apache.kyuubi.ctl.opt.CliConfig import org.apache.kyuubi.ctl.util.Render -class AdminListEngineCommand(cliConfig: CliConfig) extends AdminCtlCommand[Seq[Engine]](cliConfig) { +class AdminListEngineCommand(cliConfig: CliConfig) + extends AdminCtlCommand[Iterable[Engine]](cliConfig) { override def validate(): Unit = {} - def doRun(): Seq[Engine] = { + override def doRun(): Iterable[Engine] = { withKyuubiRestClient(normalizedCliConfig, null, conf) { kyuubiRestClient => val adminRestApi = new AdminRestApi(kyuubiRestClient) adminRestApi.listEngines( normalizedCliConfig.engineOpts.engineType, normalizedCliConfig.engineOpts.engineShareLevel, normalizedCliConfig.engineOpts.engineSubdomain, - normalizedCliConfig.commonOpts.hs2ProxyUser).asScala + normalizedCliConfig.commonOpts.hs2ProxyUser, + normalizedCliConfig.engineOpts.all).asScala } } - def render(resp: Seq[Engine]): Unit = { + override def render(resp: Iterable[Engine]): Unit = { info(Render.renderEngineNodesInfo(resp)) } } diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/AdminListServerCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/AdminListServerCommand.scala new file mode 100644 index 00000000000..27471f6adb5 --- /dev/null +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/AdminListServerCommand.scala @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.ctl.cmd.list + +import scala.collection.JavaConverters._ + +import org.apache.kyuubi.client.AdminRestApi +import org.apache.kyuubi.client.api.v1.dto.ServerData +import org.apache.kyuubi.ctl.RestClientFactory.withKyuubiRestClient +import org.apache.kyuubi.ctl.cmd.AdminCtlCommand +import org.apache.kyuubi.ctl.opt.CliConfig +import org.apache.kyuubi.ctl.util.Render + +class AdminListServerCommand(cliConfig: CliConfig) + extends AdminCtlCommand[Iterable[ServerData]](cliConfig) { + + override def validate(): Unit = {} + + override def doRun(): Iterable[ServerData] = { + withKyuubiRestClient(normalizedCliConfig, null, conf) { kyuubiRestClient => + val adminRestApi = new AdminRestApi(kyuubiRestClient) + adminRestApi.listServers().asScala + } + } + + override def render(resp: Iterable[ServerData]): Unit = { + info(Render.renderServerNodesInfo(resp)) + } +} diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListBatchCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListBatchCommand.scala index 4ce1b49b20c..db781da385f 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListBatchCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListBatchCommand.scala @@ -46,6 +46,7 @@ class ListBatchCommand(cliConfig: CliConfig) extends Command[GetBatchesResponse] batchOpts.batchType, batchOpts.batchUser, batchOpts.batchState, + batchOpts.batchName, batchOpts.createTime, batchOpts.endTime, if (batchOpts.from < 0) 0 else batchOpts.from, diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListCommand.scala index 0cfeb8e4ea0..95399a2c742 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListCommand.scala @@ -18,21 +18,20 @@ package org.apache.kyuubi.ctl.cmd.list import org.apache.kyuubi.ctl.cmd.Command import org.apache.kyuubi.ctl.opt.CliConfig -import org.apache.kyuubi.ctl.util.{CtlUtils, Render, Validator} +import org.apache.kyuubi.ctl.util.{Render, Validator} import org.apache.kyuubi.ha.client.ServiceNodeInfo -class ListCommand(cliConfig: CliConfig) extends Command[Seq[ServiceNodeInfo]](cliConfig) { +abstract class ListCommand(cliConfig: CliConfig) + extends Command[Iterable[ServiceNodeInfo]](cliConfig) { def validate(): Unit = { Validator.validateZkArguments(normalizedCliConfig) mergeArgsIntoKyuubiConf() } - def doRun(): Seq[ServiceNodeInfo] = { - CtlUtils.listZkServerNodes(conf, normalizedCliConfig, filterHostPort = false) - } + override def doRun(): Iterable[ServiceNodeInfo] - def render(nodes: Seq[ServiceNodeInfo]): Unit = { + override def render(nodes: Iterable[ServiceNodeInfo]): Unit = { val title = "Zookeeper service nodes" info(Render.renderServiceNodesInfo(title, nodes)) } diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListEngineCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListEngineCommand.scala index 6a78a9e97c3..8a26b4cc973 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListEngineCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListEngineCommand.scala @@ -17,6 +17,8 @@ package org.apache.kyuubi.ctl.cmd.list import org.apache.kyuubi.ctl.opt.CliConfig +import org.apache.kyuubi.ctl.util.CtlUtils +import org.apache.kyuubi.ha.client.ServiceNodeInfo class ListEngineCommand(cliConfig: CliConfig) extends ListCommand(cliConfig) { @@ -28,4 +30,7 @@ class ListEngineCommand(cliConfig: CliConfig) extends ListCommand(cliConfig) { fail("Must specify user name for engine, please use -u or --user.") } } + + override def doRun(): Seq[ServiceNodeInfo] = + CtlUtils.listZkEngineNodes(conf, normalizedCliConfig, None) } diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListServerCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListServerCommand.scala index 8c3219ecea6..e6c8d6ad36b 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListServerCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListServerCommand.scala @@ -17,5 +17,11 @@ package org.apache.kyuubi.ctl.cmd.list import org.apache.kyuubi.ctl.opt.CliConfig +import org.apache.kyuubi.ctl.util.CtlUtils +import org.apache.kyuubi.ha.client.ServiceNodeInfo -class ListServerCommand(cliConfig: CliConfig) extends ListCommand(cliConfig) {} +class ListServerCommand(cliConfig: CliConfig) extends ListCommand(cliConfig) { + override def doRun(): Iterable[ServiceNodeInfo] = { + CtlUtils.listZkServerNodes(conf, normalizedCliConfig, None) + } +} diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListSessionCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListSessionCommand.scala index 7a3668876bb..9d1dfead4fb 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListSessionCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/list/ListSessionCommand.scala @@ -26,18 +26,18 @@ import org.apache.kyuubi.ctl.cmd.Command import org.apache.kyuubi.ctl.opt.CliConfig import org.apache.kyuubi.ctl.util.Render -class ListSessionCommand(cliConfig: CliConfig) extends Command[Seq[SessionData]](cliConfig) { +class ListSessionCommand(cliConfig: CliConfig) extends Command[Iterable[SessionData]](cliConfig) { override def validate(): Unit = {} - def doRun(): Seq[SessionData] = { + override def doRun(): Iterable[SessionData] = { withKyuubiRestClient(normalizedCliConfig, null, conf) { kyuubiRestClient => val sessionRestApi = new SessionRestApi(kyuubiRestClient) sessionRestApi.listSessions.asScala } } - def render(resp: Seq[SessionData]): Unit = { + override def render(resp: Iterable[SessionData]): Unit = { info(Render.renderSessionDataListInfo(resp)) } } diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/refresh/RefreshConfigCommand.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/refresh/RefreshConfigCommand.scala index 69aa0c3d0f1..1cda224df00 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/refresh/RefreshConfigCommand.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/cmd/refresh/RefreshConfigCommand.scala @@ -21,7 +21,7 @@ import org.apache.kyuubi.KyuubiException import org.apache.kyuubi.client.AdminRestApi import org.apache.kyuubi.ctl.RestClientFactory.withKyuubiRestClient import org.apache.kyuubi.ctl.cmd.AdminCtlCommand -import org.apache.kyuubi.ctl.cmd.refresh.RefreshConfigCommandConfigType.{HADOOP_CONF, UNLIMITED_USERS, USER_DEFAULTS_CONF} +import org.apache.kyuubi.ctl.cmd.refresh.RefreshConfigCommandConfigType.{DENY_USERS, HADOOP_CONF, KUBERNETES_CONF, UNLIMITED_USERS, USER_DEFAULTS_CONF} import org.apache.kyuubi.ctl.opt.CliConfig import org.apache.kyuubi.ctl.util.{Tabulator, Validator} @@ -36,7 +36,9 @@ class RefreshConfigCommand(cliConfig: CliConfig) extends AdminCtlCommand[String] normalizedCliConfig.adminConfigOpts.configType match { case HADOOP_CONF => adminRestApi.refreshHadoopConf() case USER_DEFAULTS_CONF => adminRestApi.refreshUserDefaultsConf() + case KUBERNETES_CONF => adminRestApi.refreshKubernetesConf() case UNLIMITED_USERS => adminRestApi.refreshUnlimitedUsers() + case DENY_USERS => adminRestApi.refreshDenyUsers() case configType => throw new KyuubiException(s"Invalid config type:$configType") } } @@ -49,5 +51,7 @@ class RefreshConfigCommand(cliConfig: CliConfig) extends AdminCtlCommand[String] object RefreshConfigCommandConfigType { final val HADOOP_CONF = "hadoopConf" final val USER_DEFAULTS_CONF = "userDefaultsConf" + final val KUBERNETES_CONF = "kubernetesConf" final val UNLIMITED_USERS = "unlimitedUsers" + final val DENY_USERS = "denyUsers" } diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/opt/AdminCommandLine.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/opt/AdminCommandLine.scala index b1a70935b0d..c02826b6875 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/opt/AdminCommandLine.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/opt/AdminCommandLine.scala @@ -52,7 +52,7 @@ object AdminCommandLine extends CommonCommandLine { .text("\tDelete resources.") .action((_, c) => c.copy(action = ControlAction.DELETE)) .children( - engineCmd(builder).text("\tDelete the specified engine node for user."))) + deleteEngineCmd(builder).text("\tDelete the specified engine node for user."))) } @@ -64,7 +64,8 @@ object AdminCommandLine extends CommonCommandLine { .text("\tList information about resources.") .action((_, c) => c.copy(action = ControlAction.LIST)) .children( - engineCmd(builder).text("\tList all the engine nodes for a user"))) + listEngineCmd(builder).text("\tList the engine nodes"), + serverCmd(builder).text("\tList all the server nodes"))) } @@ -79,7 +80,7 @@ object AdminCommandLine extends CommonCommandLine { refreshConfigCmd(builder).text("\tRefresh the config with specified type."))) } - private def engineCmd(builder: OParserBuilder[CliConfig]): OParser[_, CliConfig] = { + private def deleteEngineCmd(builder: OParserBuilder[CliConfig]): OParser[_, CliConfig] = { import builder._ cmd("engine").action((_, c) => c.copy(resource = ControlObject.ENGINE)) .children( @@ -94,6 +95,29 @@ object AdminCommandLine extends CommonCommandLine { .text("The engine share level this engine belong to.")) } + private def listEngineCmd(builder: OParserBuilder[CliConfig]): OParser[_, CliConfig] = { + import builder._ + cmd("engine").action((_, c) => c.copy(resource = ControlObject.ENGINE)) + .children( + opt[String]("engine-type").abbr("et") + .action((v, c) => c.copy(engineOpts = c.engineOpts.copy(engineType = v))) + .text("The engine type this engine belong to."), + opt[String]("engine-subdomain").abbr("es") + .action((v, c) => c.copy(engineOpts = c.engineOpts.copy(engineSubdomain = v))) + .text("The engine subdomain this engine belong to."), + opt[String]("engine-share-level").abbr("esl") + .action((v, c) => c.copy(engineOpts = c.engineOpts.copy(engineShareLevel = v))) + .text("The engine share level this engine belong to."), + opt[String]("all").abbr("a") + .action((v, c) => c.copy(engineOpts = c.engineOpts.copy(all = v))) + .text("All the engine.")) + } + + private def serverCmd(builder: OParserBuilder[CliConfig]): OParser[_, CliConfig] = { + import builder._ + cmd("server").action((_, c) => c.copy(resource = ControlObject.SERVER)) + } + private def refreshConfigCmd(builder: OParserBuilder[CliConfig]): OParser[_, CliConfig] = { import builder._ cmd("config").action((_, c) => c.copy(resource = ControlObject.CONFIG)) @@ -102,6 +126,7 @@ object AdminCommandLine extends CommonCommandLine { .optional() .action((v, c) => c.copy(adminConfigOpts = c.adminConfigOpts.copy(configType = v))) .text("The valid config type can be one of the following: " + - s"$HADOOP_CONF, $USER_DEFAULTS_CONF, $UNLIMITED_USERS.")) + s"$HADOOP_CONF, $USER_DEFAULTS_CONF, $KUBERNETES_CONF, " + + s"$UNLIMITED_USERS, $DENY_USERS.")) } } diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/opt/CliConfig.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/opt/CliConfig.scala index 38284c595b5..4ccae109c6a 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/opt/CliConfig.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/opt/CliConfig.scala @@ -66,6 +66,7 @@ case class BatchOpts( batchType: String = null, batchUser: String = null, batchState: String = null, + batchName: String = null, createTime: Long = 0, endTime: Long = 0, from: Int = -1, @@ -76,6 +77,7 @@ case class EngineOpts( user: String = null, engineType: String = null, engineSubdomain: String = null, - engineShareLevel: String = null) + engineShareLevel: String = null, + all: String = null) case class AdminConfigOpts(configType: String = null) diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/opt/CommandLine.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/opt/CommandLine.scala index 478c439a45b..271bb06ab57 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/opt/CommandLine.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/opt/CommandLine.scala @@ -222,6 +222,9 @@ object CommandLine extends CommonCommandLine { opt[String]("batchState") .action((v, c) => c.copy(batchOpts = c.batchOpts.copy(batchState = v))) .text("Batch state."), + opt[String]("batchName") + .action((v, c) => c.copy(batchOpts = c.batchOpts.copy(batchName = v))) + .text("Batch name."), opt[String]("createTime") .action((v, c) => c.copy(batchOpts = c.batchOpts.copy(createTime = diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/util/CtlUtils.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/util/CtlUtils.scala index fdcc127f16a..8ce1d611a5a 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/util/CtlUtils.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/util/CtlUtils.scala @@ -25,48 +25,35 @@ import org.yaml.snakeyaml.Yaml import org.apache.kyuubi.KyuubiException import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf.{ENGINE_SHARE_LEVEL, ENGINE_SHARE_LEVEL_SUBDOMAIN, ENGINE_TYPE} -import org.apache.kyuubi.ctl.opt.{CliConfig, ControlObject} +import org.apache.kyuubi.ctl.opt.CliConfig import org.apache.kyuubi.ha.client.{DiscoveryClient, DiscoveryPaths, ServiceNodeInfo} import org.apache.kyuubi.ha.client.DiscoveryClientProvider.withDiscoveryClient object CtlUtils { - private[ctl] def getZkNamespace(conf: KyuubiConf, cliConfig: CliConfig): String = { - cliConfig.resource match { - case ControlObject.SERVER => - DiscoveryPaths.makePath(null, cliConfig.zkOpts.namespace) - case ControlObject.ENGINE => - val engineType = Some(cliConfig.engineOpts.engineType) - .filter(_ != null).filter(_.nonEmpty) - .getOrElse(conf.get(ENGINE_TYPE)) - val engineSubdomain = Some(cliConfig.engineOpts.engineSubdomain) - .filter(_ != null).filter(_.nonEmpty) - .getOrElse(conf.get(ENGINE_SHARE_LEVEL_SUBDOMAIN).getOrElse("default")) - val engineShareLevel = Some(cliConfig.engineOpts.engineShareLevel) - .filter(_ != null).filter(_.nonEmpty) - .getOrElse(conf.get(ENGINE_SHARE_LEVEL)) - // The path of the engine defined in zookeeper comes from - // org.apache.kyuubi.engine.EngineRef#engineSpace - DiscoveryPaths.makePath( - s"${cliConfig.zkOpts.namespace}_" + - s"${cliConfig.zkOpts.version}_" + - s"${engineShareLevel}_${engineType}", - cliConfig.engineOpts.user, - engineSubdomain) - } + private[ctl] def getZkServerNamespace(conf: KyuubiConf, cliConfig: CliConfig): String = { + DiscoveryPaths.makePath(null, cliConfig.zkOpts.namespace) } - private[ctl] def getServiceNodes( - discoveryClient: DiscoveryClient, - znodeRoot: String, - hostPortOpt: Option[(String, Int)]): Seq[ServiceNodeInfo] = { - val serviceNodes = discoveryClient.getServiceNodesInfo(znodeRoot) - hostPortOpt match { - case Some((host, port)) => serviceNodes.filter { sn => - sn.host == host && sn.port == port - } - case _ => serviceNodes - } + private[ctl] def getZkEngineNamespaceAndSubdomain( + conf: KyuubiConf, + cliConfig: CliConfig): (String, Option[String]) = { + val engineType = Some(cliConfig.engineOpts.engineType) + .filter(_ != null).filter(_.nonEmpty) + .getOrElse(conf.get(ENGINE_TYPE)) + val engineShareLevel = Some(cliConfig.engineOpts.engineShareLevel) + .filter(_ != null).filter(_.nonEmpty) + .getOrElse(conf.get(ENGINE_SHARE_LEVEL)) + val engineSubdomain = Option(cliConfig.engineOpts.engineSubdomain) + .filter(_.nonEmpty).orElse(conf.get(ENGINE_SHARE_LEVEL_SUBDOMAIN)) + // The path of the engine defined in zookeeper comes from + // org.apache.kyuubi.engine.EngineRef#engineSpace + val rootPath = DiscoveryPaths.makePath( + s"${cliConfig.zkOpts.namespace}_" + + s"${cliConfig.zkOpts.version}_" + + s"${engineShareLevel}_${engineType}", + cliConfig.engineOpts.user) + (rootPath, engineSubdomain) } /** @@ -75,17 +62,41 @@ object CtlUtils { private[ctl] def listZkServerNodes( conf: KyuubiConf, cliConfig: CliConfig, - filterHostPort: Boolean): Seq[ServiceNodeInfo] = { - var nodes = Seq.empty[ServiceNodeInfo] + hostPortOpt: Option[(String, Int)]): Seq[ServiceNodeInfo] = { withDiscoveryClient(conf) { discoveryClient => - val znodeRoot = getZkNamespace(conf, cliConfig) - val hostPortOpt = - if (filterHostPort) { - Some((cliConfig.zkOpts.host, cliConfig.zkOpts.port.toInt)) - } else None - nodes = getServiceNodes(discoveryClient, znodeRoot, hostPortOpt) + val znodeRoot = getZkServerNamespace(conf, cliConfig) + getServiceNodes(discoveryClient, znodeRoot, hostPortOpt) } - nodes + } + + /** + * List Kyuubi engine nodes info. + */ + private[ctl] def listZkEngineNodes( + conf: KyuubiConf, + cliConfig: CliConfig, + hostPortOpt: Option[(String, Int)]): Seq[ServiceNodeInfo] = { + withDiscoveryClient(conf) { discoveryClient => + val (znodeRoot, subdomainOpt) = getZkEngineNamespaceAndSubdomain(conf, cliConfig) + val candidates = discoveryClient.getChildren(znodeRoot) + val matched = subdomainOpt match { + case Some(subdomain) => candidates.filter(_ == subdomain) + case None => candidates + } + matched.flatMap { subdomain => + getServiceNodes(discoveryClient, s"$znodeRoot/$subdomain", hostPortOpt) + } + } + } + + private[ctl] def getServiceNodes( + discoveryClient: DiscoveryClient, + znodeRoot: String, + hostPortOpt: Option[(String, Int)]): Seq[ServiceNodeInfo] = { + val serviceNodes = discoveryClient.getServiceNodesInfo(znodeRoot) + hostPortOpt.map { case (host, port) => + serviceNodes.filter { sn => sn.host == host && sn.port == port } + }.getOrElse(serviceNodes) } private[ctl] def loadYamlAsMap(cliConfig: CliConfig): JMap[String, Object] = { diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/util/Render.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/util/Render.scala index aba6df35a4b..92db46d888c 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/util/Render.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/util/Render.scala @@ -19,21 +19,21 @@ package org.apache.kyuubi.ctl.util import scala.collection.JavaConverters._ import scala.collection.mutable.ListBuffer -import org.apache.kyuubi.client.api.v1.dto.{Batch, Engine, GetBatchesResponse, SessionData} +import org.apache.kyuubi.client.api.v1.dto.{Batch, Engine, GetBatchesResponse, ServerData, SessionData} import org.apache.kyuubi.ctl.util.DateTimeUtils._ import org.apache.kyuubi.ha.client.ServiceNodeInfo private[ctl] object Render { - def renderServiceNodesInfo(title: String, serviceNodeInfo: Seq[ServiceNodeInfo]): String = { + def renderServiceNodesInfo(title: String, serviceNodeInfo: Iterable[ServiceNodeInfo]): String = { val header = Array("Namespace", "Host", "Port", "Version") - val rows = serviceNodeInfo.sortBy(_.nodeName).map { sn => + val rows = serviceNodeInfo.toSeq.sortBy(_.nodeName).map { sn => Array(sn.namespace, sn.host, sn.port.toString, sn.version.getOrElse("")) }.toArray Tabulator.format(title, header, rows) } - def renderEngineNodesInfo(engineNodesInfo: Seq[Engine]): String = { + def renderEngineNodesInfo(engineNodesInfo: Iterable[Engine]): String = { val title = s"Engine Node List (total ${engineNodesInfo.size})" val header = Array("Namespace", "Instance", "Attributes") val rows = engineNodesInfo.map { engine => @@ -45,7 +45,20 @@ private[ctl] object Render { Tabulator.format(title, header, rows) } - def renderSessionDataListInfo(sessions: Seq[SessionData]): String = { + def renderServerNodesInfo(serverNodesInfo: Iterable[ServerData]): String = { + val title = s"Server Node List (total ${serverNodesInfo.size})" + val header = Array("Namespace", "Instance", "Attributes", "Status") + val rows = serverNodesInfo.map { server => + Array( + server.getNamespace, + server.getInstance, + server.getAttributes.asScala.map { case (k, v) => s"$k=$v" }.mkString("\n"), + server.getStatus) + }.toArray + Tabulator.format(title, header, rows) + } + + def renderSessionDataListInfo(sessions: Iterable[SessionData]): String = { val title = s"Live Session List (total ${sessions.size})" val header = Array( "Identifier", @@ -111,6 +124,9 @@ private[ctl] object Render { private def buildBatchAppInfo(batch: Batch, showDiagnostic: Boolean = true): List[String] = { val batchAppInfo = ListBuffer[String]() + batch.getBatchInfo.asScala.foreach { case (key, value) => + batchAppInfo += s"$key: $value" + } if (batch.getAppStartTime > 0) { batchAppInfo += s"App Start Time:" + s" ${millisToDateString(batch.getAppStartTime, "yyyy-MM-dd HH:mm:ss")}" diff --git a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/util/Tabulator.scala b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/util/Tabulator.scala index 70443628910..70fed87f653 100644 --- a/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/util/Tabulator.scala +++ b/kyuubi-ctl/src/main/scala/org/apache/kyuubi/ctl/util/Tabulator.scala @@ -23,11 +23,11 @@ import org.apache.commons.lang3.StringUtils private[kyuubi] object Tabulator { def format(title: String, header: Array[String], rows: Array[Array[String]]): String = { val textTable = formatTextTable(header, rows) - val footer = s"${rows.size} row(s)\n" + val footer = s"${rows.length} row(s)\n" if (StringUtils.isBlank(title)) { textTable + footer } else { - val rowWidth = textTable.split("\n").head.size + val rowWidth = textTable.split("\n").head.length val titleNewLine = "\n" + StringUtils.center(title, rowWidth) + "\n" titleNewLine + textTable + footer } diff --git a/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/AdminControlCliArgumentsSuite.scala b/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/AdminControlCliArgumentsSuite.scala index dab796127e3..ae7c0fa1b96 100644 --- a/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/AdminControlCliArgumentsSuite.scala +++ b/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/AdminControlCliArgumentsSuite.scala @@ -83,6 +83,24 @@ class AdminControlCliArgumentsSuite extends KyuubiFunSuite with TestPrematureExi assert(opArgs3.cliConfig.resource === ControlObject.CONFIG) assert(opArgs3.cliConfig.adminConfigOpts.configType === UNLIMITED_USERS) + args = Array( + "refresh", + "config", + "kubernetesConf") + val opArgs4 = new AdminControlCliArguments(args) + assert(opArgs4.cliConfig.action === ControlAction.REFRESH) + assert(opArgs4.cliConfig.resource === ControlObject.CONFIG) + assert(opArgs4.cliConfig.adminConfigOpts.configType === KUBERNETES_CONF) + + args = Array( + "refresh", + "config", + "denyUsers") + val opArgs5 = new AdminControlCliArguments(args) + assert(opArgs5.cliConfig.action === ControlAction.REFRESH) + assert(opArgs5.cliConfig.resource === ControlObject.CONFIG) + assert(opArgs5.cliConfig.adminConfigOpts.configType === DENY_USERS) + args = Array( "refresh", "config", @@ -115,6 +133,13 @@ class AdminControlCliArgumentsSuite extends KyuubiFunSuite with TestPrematureExi } } + test("test list server") { + val args = Array("list", "server") + val opArgs = new AdminControlCliArguments(args) + assert(opArgs.cliConfig.action.toString === "LIST") + assert(opArgs.cliConfig.resource.toString === "SERVER") + } + test("test --help") { // scalastyle:off val helpString = @@ -130,16 +155,19 @@ class AdminControlCliArgumentsSuite extends KyuubiFunSuite with TestPrematureExi | --hs2ProxyUser The value of hive.server2.proxy.user config. | --conf Kyuubi config property pair, formatted key=value. | - |Command: list [engine] + |Command: list [engine|server] | List information about resources. |Command: list engine [options] - | List all the engine nodes for a user + | List the engine nodes | -et, --engine-type | The engine type this engine belong to. | -es, --engine-subdomain | The engine subdomain this engine belong to. | -esl, --engine-share-level | The engine share level this engine belong to. + | -a, --all All the engine. + |Command: list server + | List all the server nodes | |Command: delete [engine] | Delete resources. @@ -156,7 +184,7 @@ class AdminControlCliArgumentsSuite extends KyuubiFunSuite with TestPrematureExi | Refresh the resource. |Command: refresh config [] | Refresh the config with specified type. - | The valid config type can be one of the following: $HADOOP_CONF, $USER_DEFAULTS_CONF, $UNLIMITED_USERS. + | The valid config type can be one of the following: $HADOOP_CONF, $USER_DEFAULTS_CONF, $KUBERNETES_CONF, $UNLIMITED_USERS, $DENY_USERS. | | -h, --help Show help message and exit.""".stripMargin // scalastyle:on diff --git a/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/BatchCliArgumentsSuite.scala b/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/BatchCliArgumentsSuite.scala index 7563d985a74..bf8f101e00a 100644 --- a/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/BatchCliArgumentsSuite.scala +++ b/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/BatchCliArgumentsSuite.scala @@ -84,7 +84,7 @@ class BatchCliArgumentsSuite extends KyuubiFunSuite with TestPrematureExit { "-f", batchYamlFile) val opArgs = new ControlCliArguments(args) - assert(opArgs.cliConfig.batchOpts.waitCompletion == true) + assert(opArgs.cliConfig.batchOpts.waitCompletion) } test("submit batch without waitForCompletion") { @@ -96,7 +96,7 @@ class BatchCliArgumentsSuite extends KyuubiFunSuite with TestPrematureExit { "--waitCompletion", "false") val opArgs = new ControlCliArguments(args) - assert(opArgs.cliConfig.batchOpts.waitCompletion == false) + assert(!opArgs.cliConfig.batchOpts.waitCompletion) } test("get/delete batch") { diff --git a/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/ControlCliArgumentsSuite.scala b/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/ControlCliArgumentsSuite.scala index 1b973c0eb3c..bd5b2ac4518 100644 --- a/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/ControlCliArgumentsSuite.scala +++ b/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/ControlCliArgumentsSuite.scala @@ -429,6 +429,7 @@ class ControlCliArgumentsSuite extends KyuubiFunSuite with TestPrematureExit { | --batchType Batch type. | --batchUser Batch user. | --batchState Batch state. + | --batchName Batch name. | --createTime Batch create time, should be in yyyyMMddHHmmss format. | --endTime Batch end time, should be in yyyyMMddHHmmss format. | --from Specify which record to start from retrieving info. diff --git a/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/ControlCliSuite.scala b/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/ControlCliSuite.scala index d27f3ec2a19..43a694a081a 100644 --- a/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/ControlCliSuite.scala +++ b/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/ControlCliSuite.scala @@ -199,20 +199,23 @@ class ControlCliSuite extends KyuubiFunSuite with TestPrematureExit { } } - test("test get zk namespace for different service type") { - val arg1 = Array( + test("test get zk server namespace") { + val args = Array( "list", "server", "--zk-quorum", zkServer.getConnectString, "--namespace", namespace) - val scArgs1 = new ControlCliArguments(arg1) - assert(CtlUtils.getZkNamespace( - scArgs1.command.conf, - scArgs1.command.normalizedCliConfig) == s"/$namespace") + val scArgs = new ControlCliArguments(args) + assert( + CtlUtils.getZkServerNamespace( + scArgs.command.conf, + scArgs.command.normalizedCliConfig) === s"/$namespace") + } - val arg2 = Array( + test("test get zk engine namespace") { + val args = Array( "list", "engine", "--zk-quorum", @@ -221,9 +224,11 @@ class ControlCliSuite extends KyuubiFunSuite with TestPrematureExit { namespace, "--user", user) - val scArgs2 = new ControlCliArguments(arg2) - assert(CtlUtils.getZkNamespace(scArgs2.command.conf, scArgs2.command.normalizedCliConfig) == - s"/${namespace}_${KYUUBI_VERSION}_USER_SPARK_SQL/$user/default") + val scArgs = new ControlCliArguments(args) + val expected = (s"/${namespace}_${KYUUBI_VERSION}_USER_SPARK_SQL/$user", None) + assert(CtlUtils.getZkEngineNamespaceAndSubdomain( + scArgs.command.conf, + scArgs.command.normalizedCliConfig) === expected) } test("test list zk service nodes info") { @@ -364,8 +369,10 @@ class ControlCliSuite extends KyuubiFunSuite with TestPrematureExit { "--user", user) val scArgs1 = new ControlCliArguments(arg1) - assert(CtlUtils.getZkNamespace(scArgs1.command.conf, scArgs1.command.normalizedCliConfig) == - s"/${namespace}_${KYUUBI_VERSION}_USER_SPARK_SQL/$user/default") + val expected1 = (s"/${namespace}_${KYUUBI_VERSION}_USER_SPARK_SQL/$user", None) + assert(CtlUtils.getZkEngineNamespaceAndSubdomain( + scArgs1.command.conf, + scArgs1.command.normalizedCliConfig) === expected1) val arg2 = Array( "list", @@ -379,8 +386,10 @@ class ControlCliSuite extends KyuubiFunSuite with TestPrematureExit { "--engine-type", "FLINK_SQL") val scArgs2 = new ControlCliArguments(arg2) - assert(CtlUtils.getZkNamespace(scArgs2.command.conf, scArgs2.command.normalizedCliConfig) == - s"/${namespace}_${KYUUBI_VERSION}_USER_FLINK_SQL/$user/default") + val expected2 = (s"/${namespace}_${KYUUBI_VERSION}_USER_FLINK_SQL/$user", None) + assert(CtlUtils.getZkEngineNamespaceAndSubdomain( + scArgs2.command.conf, + scArgs2.command.normalizedCliConfig) === expected2) val arg3 = Array( "list", @@ -394,8 +403,10 @@ class ControlCliSuite extends KyuubiFunSuite with TestPrematureExit { "--engine-type", "TRINO") val scArgs3 = new ControlCliArguments(arg3) - assert(CtlUtils.getZkNamespace(scArgs3.command.conf, scArgs3.command.normalizedCliConfig) == - s"/${namespace}_${KYUUBI_VERSION}_USER_TRINO/$user/default") + val expected3 = (s"/${namespace}_${KYUUBI_VERSION}_USER_TRINO/$user", None) + assert(CtlUtils.getZkEngineNamespaceAndSubdomain( + scArgs3.command.conf, + scArgs3.command.normalizedCliConfig) === expected3) val arg4 = Array( "list", @@ -411,8 +422,10 @@ class ControlCliSuite extends KyuubiFunSuite with TestPrematureExit { "--engine-subdomain", "sub_1") val scArgs4 = new ControlCliArguments(arg4) - assert(CtlUtils.getZkNamespace(scArgs4.command.conf, scArgs4.command.normalizedCliConfig) == - s"/${namespace}_${KYUUBI_VERSION}_USER_SPARK_SQL/$user/sub_1") + val expected4 = (s"/${namespace}_${KYUUBI_VERSION}_USER_SPARK_SQL/$user", Some("sub_1")) + assert(CtlUtils.getZkEngineNamespaceAndSubdomain( + scArgs4.command.conf, + scArgs4.command.normalizedCliConfig) === expected4) val arg5 = Array( "list", @@ -430,8 +443,10 @@ class ControlCliSuite extends KyuubiFunSuite with TestPrematureExit { "--engine-subdomain", "sub_1") val scArgs5 = new ControlCliArguments(arg5) - assert(CtlUtils.getZkNamespace(scArgs5.command.conf, scArgs5.command.normalizedCliConfig) == - s"/${namespace}_1.5.0_USER_SPARK_SQL/$user/sub_1") + val expected5 = (s"/${namespace}_1.5.0_USER_SPARK_SQL/$user", Some("sub_1")) + assert(CtlUtils.getZkEngineNamespaceAndSubdomain( + scArgs5.command.conf, + scArgs5.command.normalizedCliConfig) === expected5) } test("test get zk namespace for different share level engines") { @@ -445,8 +460,10 @@ class ControlCliSuite extends KyuubiFunSuite with TestPrematureExit { "--user", user) val scArgs1 = new ControlCliArguments(arg1) - assert(CtlUtils.getZkNamespace(scArgs1.command.conf, scArgs1.command.normalizedCliConfig) == - s"/${namespace}_${KYUUBI_VERSION}_USER_SPARK_SQL/$user/default") + val expected1 = (s"/${namespace}_${KYUUBI_VERSION}_USER_SPARK_SQL/$user", None) + assert(CtlUtils.getZkEngineNamespaceAndSubdomain( + scArgs1.command.conf, + scArgs1.command.normalizedCliConfig) === expected1) val arg2 = Array( "list", @@ -460,8 +477,10 @@ class ControlCliSuite extends KyuubiFunSuite with TestPrematureExit { "--engine-share-level", "CONNECTION") val scArgs2 = new ControlCliArguments(arg2) - assert(CtlUtils.getZkNamespace(scArgs2.command.conf, scArgs2.command.normalizedCliConfig) == - s"/${namespace}_${KYUUBI_VERSION}_CONNECTION_SPARK_SQL/$user/default") + val expected2 = (s"/${namespace}_${KYUUBI_VERSION}_CONNECTION_SPARK_SQL/$user", None) + assert(CtlUtils.getZkEngineNamespaceAndSubdomain( + scArgs2.command.conf, + scArgs2.command.normalizedCliConfig) === expected2) val arg3 = Array( "list", @@ -475,8 +494,10 @@ class ControlCliSuite extends KyuubiFunSuite with TestPrematureExit { "--engine-share-level", "USER") val scArgs3 = new ControlCliArguments(arg3) - assert(CtlUtils.getZkNamespace(scArgs3.command.conf, scArgs3.command.normalizedCliConfig) == - s"/${namespace}_${KYUUBI_VERSION}_USER_SPARK_SQL/$user/default") + val expected3 = (s"/${namespace}_${KYUUBI_VERSION}_USER_SPARK_SQL/$user", None) + assert(CtlUtils.getZkEngineNamespaceAndSubdomain( + scArgs3.command.conf, + scArgs3.command.normalizedCliConfig) === expected3) val arg4 = Array( "list", @@ -490,8 +511,10 @@ class ControlCliSuite extends KyuubiFunSuite with TestPrematureExit { "--engine-share-level", "GROUP") val scArgs4 = new ControlCliArguments(arg4) - assert(CtlUtils.getZkNamespace(scArgs4.command.conf, scArgs4.command.normalizedCliConfig) == - s"/${namespace}_${KYUUBI_VERSION}_GROUP_SPARK_SQL/$user/default") + val expected4 = (s"/${namespace}_${KYUUBI_VERSION}_GROUP_SPARK_SQL/$user", None) + assert(CtlUtils.getZkEngineNamespaceAndSubdomain( + scArgs4.command.conf, + scArgs4.command.normalizedCliConfig) === expected4) val arg5 = Array( "list", @@ -505,7 +528,9 @@ class ControlCliSuite extends KyuubiFunSuite with TestPrematureExit { "--engine-share-level", "SERVER") val scArgs5 = new ControlCliArguments(arg5) - assert(CtlUtils.getZkNamespace(scArgs5.command.conf, scArgs5.command.normalizedCliConfig) == - s"/${namespace}_${KYUUBI_VERSION}_SERVER_SPARK_SQL/$user/default") + val expected5 = (s"/${namespace}_${KYUUBI_VERSION}_SERVER_SPARK_SQL/$user", None) + assert(CtlUtils.getZkEngineNamespaceAndSubdomain( + scArgs5.command.conf, + scArgs5.command.normalizedCliConfig) === expected5) } } diff --git a/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/TestPrematureExit.scala b/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/TestPrematureExit.scala index 0e4cc130227..5f8107da701 100644 --- a/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/TestPrematureExit.scala +++ b/kyuubi-ctl/src/test/scala/org/apache/kyuubi/ctl/TestPrematureExit.scala @@ -34,7 +34,7 @@ trait TestPrematureExit { /** Simple PrintStream that reads data into a buffer */ private class BufferPrintStream extends PrintStream(noOpOutputStream) { - var lineBuffer = ArrayBuffer[String]() + val lineBuffer = ArrayBuffer[String]() // scalastyle:off println override def println(line: Any): Unit = { lineBuffer += line.toString @@ -52,11 +52,11 @@ trait TestPrematureExit { @volatile var exitedCleanly = false val original = mainObject.exitFn - mainObject.exitFn = (_) => exitedCleanly = true + mainObject.exitFn = _ => exitedCleanly = true try { @volatile var exception: Exception = null val thread = new Thread { - override def run() = + override def run(): Unit = try { mainObject.main(input) } catch { diff --git a/kyuubi-events/pom.xml b/kyuubi-events/pom.xml index b97e9dffbb5..9b30b575017 100644 --- a/kyuubi-events/pom.xml +++ b/kyuubi-events/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../pom.xml - kyuubi-events_2.12 + kyuubi-events_${scala.binary.version} jar Kyuubi Project Events https://kyuubi.apache.org/ @@ -37,6 +37,17 @@ ${project.version} + + org.apache.kyuubi + kyuubi-util-scala_${scala.binary.version} + ${project.version} + + + + org.apache.kafka + kafka-clients + + org.apache.kyuubi kyuubi-common_${scala.binary.version} diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventBus.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventBus.scala index e854e40a769..063f1719ec2 100644 --- a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventBus.scala +++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventBus.scala @@ -40,6 +40,8 @@ sealed trait EventBus { def register[T <: KyuubiEvent: ClassTag](eventHandler: EventHandler[T]): EventBus def registerAsync[T <: KyuubiEvent: ClassTag](eventHandler: EventHandler[T]): EventBus + + def deregisterAll(): Unit = {} } object EventBus extends Logging { @@ -68,6 +70,10 @@ object EventBus extends Logging { def registerAsync[T <: KyuubiEvent: ClassTag](et: EventHandler[T]): EventBus = defaultEventBus.registerAsync[T](et) + def deregisterAll(): Unit = synchronized { + defaultEventBus.deregisterAll() + } + private case class EventBusLive() extends EventBus { private[this] lazy val eventHandlerRegistry = new Registry private[this] lazy val asyncEventHandlerRegistry = new Registry @@ -96,6 +102,11 @@ object EventBus extends Logging { asyncEventHandlerRegistry.register(et) this } + + override def deregisterAll(): Unit = { + eventHandlerRegistry.deregisterAll() + asyncEventHandlerRegistry.deregisterAll() + } } private class Registry { @@ -122,5 +133,10 @@ object EventBus extends Logging { } yield parent clazz :: parents } + + def deregisterAll(): Unit = { + eventHandlers.values.flatten.foreach(_.close()) + eventHandlers.clear() + } } } diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventHandlerRegister.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventHandlerRegister.scala index 6c7e0893ff0..f75e4be4f51 100644 --- a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventHandlerRegister.scala +++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventHandlerRegister.scala @@ -51,6 +51,10 @@ trait EventHandlerRegister extends Logging { throw new KyuubiException(s"Unsupported jdbc event logger.") } + protected def createKafkaEventHandler(kyuubiConf: KyuubiConf): EventHandler[KyuubiEvent] = { + throw new KyuubiException(s"Unsupported kafka event logger.") + } + private def loadEventHandler( eventLoggerType: EventLoggerType, kyuubiConf: KyuubiConf): Seq[EventHandler[KyuubiEvent]] = { @@ -64,6 +68,9 @@ trait EventHandlerRegister extends Logging { case EventLoggerType.JDBC => createJdbcEventHandler(kyuubiConf) :: Nil + case EventLoggerType.KAFKA => + createKafkaEventHandler(kyuubiConf) :: Nil + case EventLoggerType.CUSTOM => EventHandlerLoader.loadCustom(kyuubiConf) diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventLoggerType.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventLoggerType.scala index a029a0fc5db..987982371e7 100644 --- a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventLoggerType.scala +++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventLoggerType.scala @@ -21,6 +21,5 @@ object EventLoggerType extends Enumeration { type EventLoggerType = Value - // TODO: Only SPARK is done now - val SPARK, JSON, JDBC, CUSTOM = Value + val SPARK, JSON, JDBC, CUSTOM, KAFKA = Value } diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/EventHandlerLoader.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/EventHandlerLoader.scala index c81dcfb9b20..ea4110455b9 100644 --- a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/EventHandlerLoader.scala +++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/EventHandlerLoader.scala @@ -16,40 +16,30 @@ */ package org.apache.kyuubi.events.handler -import java.util.ServiceLoader - -import scala.collection.JavaConverters._ -import scala.collection.mutable.ArrayBuffer import scala.util.{Failure, Success, Try} import org.apache.kyuubi.{Logging, Utils} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.events.KyuubiEvent +import org.apache.kyuubi.util.reflect.ReflectUtils._ object EventHandlerLoader extends Logging { def loadCustom(kyuubiConf: KyuubiConf): Seq[EventHandler[KyuubiEvent]] = { - val providers = ArrayBuffer[CustomEventHandlerProvider]() - ServiceLoader.load( - classOf[CustomEventHandlerProvider], - Utils.getContextOrKyuubiClassLoader) - .iterator() - .asScala - .foreach(provider => providers += provider) - - providers.map { provider => - Try { - provider.create(kyuubiConf) - } match { - case Success(value) => - value - case Failure(exception) => - warn( - s"Failed to create an EventHandler by the ${provider.getClass.getName}," + - s" it will be ignored.", - exception) - null - } - }.filter(_ != null) + loadFromServiceLoader[CustomEventHandlerProvider](Utils.getContextOrKyuubiClassLoader) + .map { provider => + Try { + provider.create(kyuubiConf) + } match { + case Success(value) => + value + case Failure(exception) => + warn( + s"Failed to create an EventHandler by the ${provider.getClass.getName}," + + s" it will be ignored.", + exception) + null + } + }.filter(_ != null).toSeq } } diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/JsonLoggingEventHandler.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/JsonLoggingEventHandler.scala index f6f74de9a28..77d80b1521c 100644 --- a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/JsonLoggingEventHandler.scala +++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/JsonLoggingEventHandler.scala @@ -65,6 +65,17 @@ class JsonLoggingEventHandler( stream.foreach(_.hflush()) } + override def close(): Unit = { + writers.values.foreach { case (writer, stream) => + writer.flush() + stream.foreach(_.hflush()) + writer.close() + stream.foreach(_.close()) + } + writers.clear() + fs = null + } + private def getOrUpdate(event: KyuubiEvent): Logger = synchronized { val partitions = event.partitions.map(kv => s"${kv._1}=${kv._2}").mkString(Path.SEPARATOR) writers.getOrElseUpdate( @@ -108,6 +119,7 @@ class JsonLoggingEventHandler( } object JsonLoggingEventHandler { - val JSON_LOG_DIR_PERM: FsPermission = new FsPermission(Integer.parseInt("770", 8).toShort) - val JSON_LOG_FILE_PERM: FsPermission = new FsPermission(Integer.parseInt("660", 8).toShort) + private val JSON_LOG_DIR_PERM: FsPermission = new FsPermission(Integer.parseInt("770", 8).toShort) + private val JSON_LOG_FILE_PERM: FsPermission = + new FsPermission(Integer.parseInt("660", 8).toShort) } diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/KafkaLoggingEventHandler.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/KafkaLoggingEventHandler.scala new file mode 100644 index 00000000000..2625f167bb1 --- /dev/null +++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/KafkaLoggingEventHandler.scala @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.events.handler + +import java.time.Duration +import java.util.Properties + +import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} + +import org.apache.kyuubi.Logging +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.events.KyuubiEvent +import org.apache.kyuubi.events.handler.KafkaLoggingEventHandler._ + +/** + * This event logger logs events to Kafka. + */ +class KafkaLoggingEventHandler( + topic: String, + producerConf: Iterable[(String, String)], + kyuubiConf: KyuubiConf, + closeTimeoutInMs: Long) extends EventHandler[KyuubiEvent] with Logging { + private def defaultProducerConf: Properties = { + val conf = new Properties() + conf.setProperty("key.serializer", DEFAULT_SERIALIZER_CLASS) + conf.setProperty("value.serializer", DEFAULT_SERIALIZER_CLASS) + conf + } + + private val normalizedProducerConf: Properties = { + val conf = defaultProducerConf + producerConf.foreach(p => conf.setProperty(p._1, p._2)) + conf + } + + private val kafkaProducer = new KafkaProducer[String, String](normalizedProducerConf) + + override def apply(event: KyuubiEvent): Unit = { + try { + val record = new ProducerRecord[String, String](topic, event.eventType, event.toJson) + kafkaProducer.send(record) + } catch { + case e: Exception => + error("Failed to send event in KafkaEventHandler", e) + } + } + + override def close(): Unit = { + kafkaProducer.close(Duration.ofMillis(closeTimeoutInMs)) + } +} + +object KafkaLoggingEventHandler { + private val DEFAULT_SERIALIZER_CLASS = "org.apache.kafka.common.serialization.StringSerializer" +} diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/package.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/package.scala index 41cf001ed31..69e1fdcee12 100644 --- a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/package.scala +++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/package.scala @@ -18,5 +18,9 @@ package org.apache.kyuubi.events package object handler { - type EventHandler[T <: KyuubiEvent] = T => Unit + trait EventHandler[T <: KyuubiEvent] extends AutoCloseable { + def apply(event: T): Unit + + def close(): Unit = {} + } } diff --git a/kyuubi-events/src/test/scala/org/apache/kyuubi/events/EventBusSuite.scala b/kyuubi-events/src/test/scala/org/apache/kyuubi/events/EventBusSuite.scala index 9c75766dacb..0a8563ee4f8 100644 --- a/kyuubi-events/src/test/scala/org/apache/kyuubi/events/EventBusSuite.scala +++ b/kyuubi-events/src/test/scala/org/apache/kyuubi/events/EventBusSuite.scala @@ -44,29 +44,29 @@ class EventBusSuite extends KyuubiFunSuite { } test("register event handler") { - var test0EventRecievedCount = 0 - var test1EventRecievedCount = 0 - var test2EventRecievedCount = 0 - var testEventRecievedCount = 0 + var test0EventReceivedCount = 0 + var test1EventReceivedCount = 0 + var test2EventReceivedCount = 0 + var testEventReceivedCount = 0 val liveBus = EventBus() liveBus.register[Test0KyuubiEvent] { e => assert(e.content == "test0") assert(e.eventType == "test0_kyuubi") - test0EventRecievedCount += 1 + test0EventReceivedCount += 1 } liveBus.register[Test1KyuubiEvent] { e => assert(e.content == "test1") assert(e.eventType == "test1_kyuubi") - test1EventRecievedCount += 1 + test1EventReceivedCount += 1 } // scribe subclass event liveBus.register[TestKyuubiEvent] { e => assert(e.eventType == "test2_kyuubi") - test2EventRecievedCount += 1 + test2EventReceivedCount += 1 } - liveBus.register[KyuubiEvent] { e => - testEventRecievedCount += 1 + liveBus.register[KyuubiEvent] { _ => + testEventReceivedCount += 1 } class Test0Handler extends EventHandler[Test0KyuubiEvent] { @@ -77,11 +77,9 @@ class EventBusSuite extends KyuubiFunSuite { liveBus.register[Test0KyuubiEvent](new Test0Handler) - liveBus.register[Test1KyuubiEvent](new EventHandler[Test1KyuubiEvent] { - override def apply(e: Test1KyuubiEvent): Unit = { - assert(e.content == "test1") - } - }) + liveBus.register[Test1KyuubiEvent] { e => + assert(e.content == "test1") + } (1 to 10) foreach { _ => liveBus.post(Test0KyuubiEvent("test0")) @@ -92,10 +90,10 @@ class EventBusSuite extends KyuubiFunSuite { (1 to 30) foreach { _ => liveBus.post(Test2KyuubiEvent("name2", "test2")) } - assert(test0EventRecievedCount == 10) - assert(test1EventRecievedCount == 20) - assert(test2EventRecievedCount == 30) - assert(testEventRecievedCount == 60) + assert(test0EventReceivedCount == 10) + assert(test1EventReceivedCount == 20) + assert(test2EventReceivedCount == 30) + assert(testEventReceivedCount == 60) } test("register event handler for default bus") { @@ -120,7 +118,7 @@ class EventBusSuite extends KyuubiFunSuite { test("async event handler") { val countDownLatch = new CountDownLatch(4) - val count = new AtomicInteger(0); + val count = new AtomicInteger(0) class Test0Handler extends EventHandler[Test0KyuubiEvent] { override def apply(e: Test0KyuubiEvent): Unit = { Thread.sleep(10) diff --git a/kyuubi-ha/pom.xml b/kyuubi-ha/pom.xml index b4605b6a187..129f7a53dbb 100644 --- a/kyuubi-ha/pom.xml +++ b/kyuubi-ha/pom.xml @@ -21,11 +21,11 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT ../pom.xml - kyuubi-ha_2.12 + kyuubi-ha_${scala.binary.version} jar Kyuubi Project High Availability https://kyuubi.apache.org/ @@ -38,18 +38,8 @@ - org.apache.curator - curator-framework - - - - org.apache.curator - curator-recipes - - - - org.apache.zookeeper - zookeeper + org.apache.kyuubi + ${kyuubi-shaded-zookeeper.artifacts} @@ -99,6 +89,12 @@ grpc-stub + + com.dimafeng + testcontainers-scala-scalatest_${scala.binary.version} + test + + io.etcd jetcd-launcher diff --git a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/HighAvailabilityConf.scala b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/HighAvailabilityConf.scala index 148a21e4dd3..6265570081d 100644 --- a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/HighAvailabilityConf.scala +++ b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/HighAvailabilityConf.scala @@ -79,7 +79,7 @@ object HighAvailabilityConf { s"${AuthTypes.values.mkString("
    • ", "
    • ", "
    ")}") .version("1.3.2") .stringConf - .checkValues(AuthTypes.values.map(_.toString)) + .checkValues(AuthTypes) .createWithDefault(AuthTypes.NONE.toString) val HA_ZK_ENGINE_AUTH_TYPE: ConfigEntry[String] = @@ -88,25 +88,36 @@ object HighAvailabilityConf { s"${AuthTypes.values.mkString("
    • ", "
    • ", "
    ")}") .version("1.3.2") .stringConf - .checkValues(AuthTypes.values.map(_.toString)) + .checkValues(AuthTypes) .createWithDefault(AuthTypes.NONE.toString) + val HA_ZK_AUTH_SERVER_PRINCIPAL: OptionalConfigEntry[String] = + buildConf("kyuubi.ha.zookeeper.auth.serverPrincipal") + .doc("Kerberos principal name of ZooKeeper Server. It only takes effect when " + + "Zookeeper client's version at least 3.5.7 or 3.6.0 or applies ZOOKEEPER-1467. " + + "To use Zookeeper 3.6 client, compile Kyuubi with `-Pzookeeper-3.6`.") + .version("1.8.0") + .stringConf + .createOptional + val HA_ZK_AUTH_PRINCIPAL: ConfigEntry[Option[String]] = buildConf("kyuubi.ha.zookeeper.auth.principal") - .doc("Name of the Kerberos principal is used for ZooKeeper authentication.") + .doc("Kerberos principal name that is used for ZooKeeper authentication.") .version("1.3.2") .fallbackConf(KyuubiConf.SERVER_PRINCIPAL) - val HA_ZK_AUTH_KEYTAB: ConfigEntry[Option[String]] = buildConf("kyuubi.ha.zookeeper.auth.keytab") - .doc("Location of the Kyuubi server's keytab is used for ZooKeeper authentication.") - .version("1.3.2") - .fallbackConf(KyuubiConf.SERVER_KEYTAB) + val HA_ZK_AUTH_KEYTAB: ConfigEntry[Option[String]] = + buildConf("kyuubi.ha.zookeeper.auth.keytab") + .doc("Location of the Kyuubi server's keytab that is used for ZooKeeper authentication.") + .version("1.3.2") + .fallbackConf(KyuubiConf.SERVER_KEYTAB) - val HA_ZK_AUTH_DIGEST: OptionalConfigEntry[String] = buildConf("kyuubi.ha.zookeeper.auth.digest") - .doc("The digest auth string is used for ZooKeeper authentication, like: username:password.") - .version("1.3.2") - .stringConf - .createOptional + val HA_ZK_AUTH_DIGEST: OptionalConfigEntry[String] = + buildConf("kyuubi.ha.zookeeper.auth.digest") + .doc("The digest auth string is used for ZooKeeper authentication, like: username:password.") + .version("1.3.2") + .stringConf + .createOptional val HA_ZK_CONN_MAX_RETRIES: ConfigEntry[Int] = buildConf("kyuubi.ha.zookeeper.connection.max.retries") @@ -149,7 +160,7 @@ object HighAvailabilityConf { s" ${RetryPolicies.values.mkString("
    • ", "
    • ", "
    ")}") .version("1.0.0") .stringConf - .checkValues(RetryPolicies.values.map(_.toString)) + .checkValues(RetryPolicies) .createWithDefault(RetryPolicies.EXPONENTIAL_BACKOFF.toString) val HA_ZK_NODE_TIMEOUT: ConfigEntry[Long] = @@ -209,14 +220,14 @@ object HighAvailabilityConf { .stringConf .createOptional - val HA_ETCD_SSL_CLINET_CRT_PATH: OptionalConfigEntry[String] = + val HA_ETCD_SSL_CLIENT_CRT_PATH: OptionalConfigEntry[String] = buildConf("kyuubi.ha.etcd.ssl.client.certificate.path") .doc("Where the etcd SSL certificate file is stored.") .version("1.6.0") .stringConf .createOptional - val HA_ETCD_SSL_CLINET_KEY_PATH: OptionalConfigEntry[String] = + val HA_ETCD_SSL_CLIENT_KEY_PATH: OptionalConfigEntry[String] = buildConf("kyuubi.ha.etcd.ssl.client.key.path") .doc("Where the etcd SSL key file is stored.") .version("1.6.0") diff --git a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/DiscoveryPaths.scala b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/DiscoveryPaths.scala index 987a88ddafd..fe7ebe2ab86 100644 --- a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/DiscoveryPaths.scala +++ b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/DiscoveryPaths.scala @@ -17,7 +17,7 @@ package org.apache.kyuubi.ha.client -import org.apache.curator.utils.ZKPaths +import org.apache.kyuubi.shaded.curator.utils.ZKPaths object DiscoveryPaths { def makePath(parent: String, firstChild: String, restChildren: String*): String = { diff --git a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/ServiceDiscovery.scala b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/ServiceDiscovery.scala index bdb9b12fe82..a1b1466d122 100644 --- a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/ServiceDiscovery.scala +++ b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/ServiceDiscovery.scala @@ -60,6 +60,7 @@ abstract class ServiceDiscovery( override def start(): Unit = { discoveryClient.registerService(conf, namespace, this) + info(s"Registered $name in namespace ${_namespace}.") super.start() } diff --git a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/etcd/EtcdDiscoveryClient.scala b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/etcd/EtcdDiscoveryClient.scala index 80a70f2f218..d979804f417 100644 --- a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/etcd/EtcdDiscoveryClient.scala +++ b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/etcd/EtcdDiscoveryClient.scala @@ -74,10 +74,10 @@ class EtcdDiscoveryClient(conf: KyuubiConf) extends DiscoveryClient { } else { val caPath = conf.getOption(HA_ETCD_SSL_CA_PATH.key).getOrElse( throw new IllegalArgumentException(s"${HA_ETCD_SSL_CA_PATH.key} is not defined")) - val crtPath = conf.getOption(HA_ETCD_SSL_CLINET_CRT_PATH.key).getOrElse( - throw new IllegalArgumentException(s"${HA_ETCD_SSL_CLINET_CRT_PATH.key} is not defined")) - val keyPath = conf.getOption(HA_ETCD_SSL_CLINET_KEY_PATH.key).getOrElse( - throw new IllegalArgumentException(s"${HA_ETCD_SSL_CLINET_KEY_PATH.key} is not defined")) + val crtPath = conf.getOption(HA_ETCD_SSL_CLIENT_CRT_PATH.key).getOrElse( + throw new IllegalArgumentException(s"${HA_ETCD_SSL_CLIENT_CRT_PATH.key} is not defined")) + val keyPath = conf.getOption(HA_ETCD_SSL_CLIENT_KEY_PATH.key).getOrElse( + throw new IllegalArgumentException(s"${HA_ETCD_SSL_CLIENT_KEY_PATH.key} is not defined")) val context = GrpcSslContexts.forClient() .trustManager(new File(caPath)) @@ -358,11 +358,11 @@ class EtcdDiscoveryClient(conf: KyuubiConf) extends DiscoveryClient { client.getLeaseClient.keepAlive( leaseId, new StreamObserver[LeaseKeepAliveResponse] { - override def onNext(v: LeaseKeepAliveResponse): Unit = Unit // do nothing + override def onNext(v: LeaseKeepAliveResponse): Unit = () // do nothing - override def onError(throwable: Throwable): Unit = Unit // do nothing + override def onError(throwable: Throwable): Unit = () // do nothing - override def onCompleted(): Unit = Unit // do nothing + override def onCompleted(): Unit = () // do nothing }) client.getKVClient.put( ByteSequence.from(realPath.getBytes()), @@ -388,7 +388,7 @@ class EtcdDiscoveryClient(conf: KyuubiConf) extends DiscoveryClient { override def onError(throwable: Throwable): Unit = throw new KyuubiException(throwable.getMessage, throwable.getCause) - override def onCompleted(): Unit = Unit + override def onCompleted(): Unit = () } } diff --git a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperACLProvider.scala b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperACLProvider.scala index 467c323b77e..87ea65c17a2 100644 --- a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperACLProvider.scala +++ b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperACLProvider.scala @@ -17,13 +17,12 @@ package org.apache.kyuubi.ha.client.zookeeper -import org.apache.curator.framework.api.ACLProvider -import org.apache.zookeeper.ZooDefs -import org.apache.zookeeper.data.ACL - import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.ha.HighAvailabilityConf import org.apache.kyuubi.ha.client.AuthTypes +import org.apache.kyuubi.shaded.curator.framework.api.ACLProvider +import org.apache.kyuubi.shaded.zookeeper.ZooDefs +import org.apache.kyuubi.shaded.zookeeper.data.ACL class ZookeeperACLProvider(conf: KyuubiConf) extends ACLProvider { diff --git a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperClientProvider.scala b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperClientProvider.scala index 8dd32d6b62b..d0749c8d923 100644 --- a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperClientProvider.scala +++ b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperClientProvider.scala @@ -18,22 +18,23 @@ package org.apache.kyuubi.ha.client.zookeeper import java.io.{File, IOException} +import java.nio.charset.StandardCharsets import javax.security.auth.login.Configuration import scala.util.Random import com.google.common.annotations.VisibleForTesting -import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} -import org.apache.curator.retry._ import org.apache.hadoop.security.UserGroupInformation -import org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecretManager.JaasConfiguration import org.apache.kyuubi.Logging import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.ha.HighAvailabilityConf._ import org.apache.kyuubi.ha.client.{AuthTypes, RetryPolicies} import org.apache.kyuubi.ha.client.RetryPolicies._ +import org.apache.kyuubi.shaded.curator.framework.{CuratorFramework, CuratorFrameworkFactory} +import org.apache.kyuubi.shaded.curator.retry._ import org.apache.kyuubi.util.KyuubiHadoopUtils +import org.apache.kyuubi.util.reflect.DynConstructors object ZookeeperClientProvider extends Logging { @@ -65,10 +66,8 @@ object ZookeeperClientProvider extends Logging { .aclProvider(new ZookeeperACLProvider(conf)) .retryPolicy(retryPolicy) - conf.get(HA_ZK_AUTH_DIGEST) match { - case Some(anthString) => - builder.authorization("digest", anthString.getBytes("UTF-8")) - case _ => + conf.get(HA_ZK_AUTH_DIGEST).foreach { authString => + builder.authorization("digest", authString.getBytes(StandardCharsets.UTF_8)) } builder.build() @@ -103,46 +102,51 @@ object ZookeeperClientProvider extends Logging { */ @throws[Exception] def setUpZooKeeperAuth(conf: KyuubiConf): Unit = { - def setupZkAuth(): Unit = { - val keyTabFile = getKeyTabFile(conf) - val maybePrincipal = conf.get(HA_ZK_AUTH_PRINCIPAL) - val kerberized = maybePrincipal.isDefined && keyTabFile.isDefined - if (UserGroupInformation.isSecurityEnabled && kerberized) { - if (!new File(keyTabFile.get).exists()) { - throw new IOException(s"${HA_ZK_AUTH_KEYTAB.key}: $keyTabFile does not exists") + def setupZkAuth(): Unit = (conf.get(HA_ZK_AUTH_PRINCIPAL), getKeyTabFile(conf)) match { + case (Some(principal), Some(keytab)) if UserGroupInformation.isSecurityEnabled => + if (!new File(keytab).exists()) { + throw new IOException(s"${HA_ZK_AUTH_KEYTAB.key}: $keytab does not exists") } System.setProperty("zookeeper.sasl.clientconfig", "KyuubiZooKeeperClient") - var principal = maybePrincipal.get - principal = KyuubiHadoopUtils.getServerPrincipal(principal) - val jaasConf = new JaasConfiguration("KyuubiZooKeeperClient", principal, keyTabFile.get) + conf.get(HA_ZK_AUTH_SERVER_PRINCIPAL).foreach { zkServerPrincipal => + // ZOOKEEPER-1467 allows configuring SPN in client + System.setProperty("zookeeper.server.principal", zkServerPrincipal) + } + val zkClientPrincipal = KyuubiHadoopUtils.getServerPrincipal(principal) + // HDFS-16591 makes breaking change on JaasConfiguration + val jaasConf = DynConstructors.builder() + .impl( // Hadoop 3.3.5 and above + "org.apache.hadoop.security.authentication.util.JaasConfiguration", + classOf[String], + classOf[String], + classOf[String]) + .impl( // Hadoop 3.3.4 and previous + // scalastyle:off + "org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecretManager$JaasConfiguration", + // scalastyle:on + classOf[String], + classOf[String], + classOf[String]) + .build[Configuration]() + .newInstance("KyuubiZooKeeperClient", zkClientPrincipal, keytab) Configuration.setConfiguration(jaasConf) - } + case _ => } - if (conf.get(HA_ENGINE_REF_ID).isEmpty - && AuthTypes.withName(conf.get(HA_ZK_AUTH_TYPE)) == AuthTypes.KERBEROS) { + if (conf.get(HA_ENGINE_REF_ID).isEmpty && + AuthTypes.withName(conf.get(HA_ZK_AUTH_TYPE)) == AuthTypes.KERBEROS) { setupZkAuth() - } else if (conf.get(HA_ENGINE_REF_ID).nonEmpty && AuthTypes - .withName(conf.get(HA_ZK_ENGINE_AUTH_TYPE)) == AuthTypes.KERBEROS) { + } else if (conf.get(HA_ENGINE_REF_ID).nonEmpty && + AuthTypes.withName(conf.get(HA_ZK_ENGINE_AUTH_TYPE)) == AuthTypes.KERBEROS) { setupZkAuth() } - } @VisibleForTesting def getKeyTabFile(conf: KyuubiConf): Option[String] = { - val zkAuthKeytab = conf.get(HA_ZK_AUTH_KEYTAB) - if (zkAuthKeytab.isDefined) { - val zkAuthKeytabPath = zkAuthKeytab.get - val relativeFileName = new File(zkAuthKeytabPath).getName - if (new File(relativeFileName).exists()) { - Some(relativeFileName) - } else { - Some(zkAuthKeytabPath) - } - } else { - None + conf.get(HA_ZK_AUTH_KEYTAB).map { fullPath => + val filename = new File(fullPath).getName + if (new File(filename).exists()) filename else fullPath } } - } diff --git a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClient.scala b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClient.scala index daa27047eb9..2db7d89d649 100644 --- a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClient.scala +++ b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClient.scala @@ -25,39 +25,25 @@ import java.util.concurrent.atomic.AtomicBoolean import scala.collection.JavaConverters._ import com.google.common.annotations.VisibleForTesting -import org.apache.curator.framework.CuratorFramework -import org.apache.curator.framework.recipes.atomic.{AtomicValue, DistributedAtomicInteger} -import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex -import org.apache.curator.framework.recipes.nodes.PersistentNode -import org.apache.curator.framework.state.ConnectionState -import org.apache.curator.framework.state.ConnectionState.CONNECTED -import org.apache.curator.framework.state.ConnectionState.LOST -import org.apache.curator.framework.state.ConnectionState.RECONNECTED -import org.apache.curator.framework.state.ConnectionStateListener -import org.apache.curator.retry.RetryForever -import org.apache.curator.utils.ZKPaths -import org.apache.zookeeper.CreateMode -import org.apache.zookeeper.CreateMode.PERSISTENT -import org.apache.zookeeper.KeeperException -import org.apache.zookeeper.KeeperException.NodeExistsException -import org.apache.zookeeper.WatchedEvent -import org.apache.zookeeper.Watcher - -import org.apache.kyuubi.KYUUBI_VERSION -import org.apache.kyuubi.KyuubiException -import org.apache.kyuubi.KyuubiSQLException -import org.apache.kyuubi.Logging + +import org.apache.kyuubi.{KYUUBI_VERSION, KyuubiException, KyuubiSQLException, Logging} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_ENGINE_ID -import org.apache.kyuubi.ha.HighAvailabilityConf.HA_ENGINE_REF_ID -import org.apache.kyuubi.ha.HighAvailabilityConf.HA_ZK_NODE_TIMEOUT -import org.apache.kyuubi.ha.HighAvailabilityConf.HA_ZK_PUBLISH_CONFIGS -import org.apache.kyuubi.ha.client.DiscoveryClient -import org.apache.kyuubi.ha.client.ServiceDiscovery -import org.apache.kyuubi.ha.client.ServiceNodeInfo -import org.apache.kyuubi.ha.client.zookeeper.ZookeeperClientProvider.buildZookeeperClient -import org.apache.kyuubi.ha.client.zookeeper.ZookeeperClientProvider.getGracefulStopThreadDelay +import org.apache.kyuubi.ha.HighAvailabilityConf.{HA_ENGINE_REF_ID, HA_ZK_NODE_TIMEOUT, HA_ZK_PUBLISH_CONFIGS} +import org.apache.kyuubi.ha.client.{DiscoveryClient, ServiceDiscovery, ServiceNodeInfo} +import org.apache.kyuubi.ha.client.zookeeper.ZookeeperClientProvider.{buildZookeeperClient, getGracefulStopThreadDelay} import org.apache.kyuubi.ha.client.zookeeper.ZookeeperDiscoveryClient.connectionChecker +import org.apache.kyuubi.shaded.curator.framework.CuratorFramework +import org.apache.kyuubi.shaded.curator.framework.recipes.atomic.{AtomicValue, DistributedAtomicInteger} +import org.apache.kyuubi.shaded.curator.framework.recipes.locks.InterProcessSemaphoreMutex +import org.apache.kyuubi.shaded.curator.framework.recipes.nodes.PersistentNode +import org.apache.kyuubi.shaded.curator.framework.state.{ConnectionState, ConnectionStateListener} +import org.apache.kyuubi.shaded.curator.framework.state.ConnectionState.{CONNECTED, LOST, RECONNECTED} +import org.apache.kyuubi.shaded.curator.retry.RetryForever +import org.apache.kyuubi.shaded.curator.utils.ZKPaths +import org.apache.kyuubi.shaded.zookeeper.{CreateMode, KeeperException, WatchedEvent, Watcher} +import org.apache.kyuubi.shaded.zookeeper.CreateMode.PERSISTENT +import org.apache.kyuubi.shaded.zookeeper.KeeperException.NodeExistsException import org.apache.kyuubi.util.ThreadUtils class ZookeeperDiscoveryClient(conf: KyuubiConf) extends DiscoveryClient { @@ -226,7 +212,7 @@ class ZookeeperDiscoveryClient(conf: KyuubiConf) extends DiscoveryClient { info(s"Get service instance:$instance$engineIdStr and version:${version.getOrElse("")} " + s"under $namespace") ServiceNodeInfo(namespace, p, host, port, version, engineRefId, attributes) - } + }.toSeq } catch { case _: Exception if silent => Nil case e: Exception => @@ -305,6 +291,10 @@ class ZookeeperDiscoveryClient(conf: KyuubiConf) extends DiscoveryClient { basePath, initData.getBytes(StandardCharsets.UTF_8)) secretNode.start() + val znodeTimeout = conf.get(HA_ZK_NODE_TIMEOUT) + if (!secretNode.waitForInitialCreate(znodeTimeout, TimeUnit.MILLISECONDS)) { + throw new KyuubiException(s"Max znode creation wait time $znodeTimeout s exhausted") + } } override def getAndIncrement(path: String, delta: Int = 1): Int = { diff --git a/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/DiscoveryClientTests.scala b/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/DiscoveryClientTests.scala index 87db340b5fe..9caf3864640 100644 --- a/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/DiscoveryClientTests.scala +++ b/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/DiscoveryClientTests.scala @@ -135,17 +135,17 @@ trait DiscoveryClientTests extends KyuubiFunSuite { new Thread(() => { withDiscoveryClient(conf) { discoveryClient => - discoveryClient.tryWithLock(lockPath, 3000) { + discoveryClient.tryWithLock(lockPath, 10000) { lockLatch.countDown() - Thread.sleep(5000) + Thread.sleep(15000) } } }).start() withDiscoveryClient(conf) { discoveryClient => - assert(lockLatch.await(5000, TimeUnit.MILLISECONDS)) + assert(lockLatch.await(20000, TimeUnit.MILLISECONDS)) val e = intercept[KyuubiSQLException] { - discoveryClient.tryWithLock(lockPath, 2000) {} + discoveryClient.tryWithLock(lockPath, 5000) {} } assert(e.getMessage contains s"Timeout to lock on path [$lockPath]") } @@ -162,7 +162,7 @@ trait DiscoveryClientTests extends KyuubiFunSuite { test("setData method test") { withDiscoveryClient(conf) { discoveryClient => - val data = "abc"; + val data = "abc" val path = "/setData_test" discoveryClient.create(path, "PERSISTENT") discoveryClient.setData(path, data.getBytes) diff --git a/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/etcd/EtcdDiscoveryClientSuite.scala b/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/etcd/EtcdDiscoveryClientSuite.scala index 5b8855c1ee9..de48a3495db 100644 --- a/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/etcd/EtcdDiscoveryClientSuite.scala +++ b/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/etcd/EtcdDiscoveryClientSuite.scala @@ -22,6 +22,9 @@ import java.nio.charset.StandardCharsets import scala.collection.JavaConverters._ import io.etcd.jetcd.launcher.{Etcd, EtcdCluster} +import org.scalactic.source.Position +import org.scalatest.Tag +import org.testcontainers.DockerClientFactory import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.ha.HighAvailabilityConf.{HA_ADDRESSES, HA_CLIENT_CLASS} @@ -41,25 +44,38 @@ class EtcdDiscoveryClientSuite extends DiscoveryClientTests { var conf: KyuubiConf = KyuubiConf() .set(HA_CLIENT_CLASS, classOf[EtcdDiscoveryClient].getName) + private val hasDockerEnv = DockerClientFactory.instance().isDockerAvailable + override def beforeAll(): Unit = { - etcdCluster = new Etcd.Builder() - .withNodes(2) - .build() - etcdCluster.start() - conf = new KyuubiConf() - .set(HA_CLIENT_CLASS, classOf[EtcdDiscoveryClient].getName) - .set(HA_ADDRESSES, getConnectString) + if (hasDockerEnv) { + etcdCluster = new Etcd.Builder() + .withNodes(2) + .build() + etcdCluster.start() + conf = new KyuubiConf() + .set(HA_CLIENT_CLASS, classOf[EtcdDiscoveryClient].getName) + .set(HA_ADDRESSES, getConnectString) + } super.beforeAll() } override def afterAll(): Unit = { super.afterAll() - if (etcdCluster != null) { + if (hasDockerEnv && etcdCluster != null) { etcdCluster.close() etcdCluster = null } } + override protected def test( + testName: String, + testTags: Tag*)(testFun: => Any)(implicit pos: Position): Unit = { + if (hasDockerEnv) { + super.test(testName, testTags: _*)(testFun) + } + // skip test + } + test("etcd test: set, get and delete") { withDiscoveryClient(conf) { discoveryClient => val path = "/kyuubi" diff --git a/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClientSuite.scala b/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClientSuite.scala index bbd8b94ac7c..dd78e1fb8a0 100644 --- a/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClientSuite.scala +++ b/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClientSuite.scala @@ -25,11 +25,7 @@ import javax.security.auth.login.Configuration import scala.collection.JavaConverters._ -import org.apache.curator.framework.CuratorFrameworkFactory -import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.hadoop.util.StringUtils -import org.apache.zookeeper.ZooDefs -import org.apache.zookeeper.data.ACL import org.scalatest.time.SpanSugar._ import org.apache.kyuubi.{KerberizedTestHelper, KYUUBI_VERSION} @@ -37,7 +33,13 @@ import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.ha.HighAvailabilityConf._ import org.apache.kyuubi.ha.client._ import org.apache.kyuubi.ha.client.DiscoveryClientProvider.withDiscoveryClient +import org.apache.kyuubi.ha.client.zookeeper.ZookeeperClientProvider._ import org.apache.kyuubi.service._ +import org.apache.kyuubi.shaded.curator.framework.CuratorFrameworkFactory +import org.apache.kyuubi.shaded.curator.retry.ExponentialBackoffRetry +import org.apache.kyuubi.shaded.zookeeper.ZooDefs +import org.apache.kyuubi.shaded.zookeeper.data.ACL +import org.apache.kyuubi.util.reflect.ReflectUtils._ import org.apache.kyuubi.zookeeper.EmbeddedZookeeper import org.apache.kyuubi.zookeeper.ZookeeperConf.ZK_CLIENT_PORT @@ -117,7 +119,7 @@ abstract class ZookeeperDiscoveryClientSuite extends DiscoveryClientTests conf.set(HA_ZK_AUTH_PRINCIPAL.key, principal) conf.set(HA_ZK_AUTH_TYPE.key, AuthTypes.KERBEROS.toString) - ZookeeperClientProvider.setUpZooKeeperAuth(conf) + setUpZooKeeperAuth(conf) val configuration = Configuration.getConfiguration val entries = configuration.getAppConfigurationEntry("KyuubiZooKeeperClient") @@ -129,9 +131,9 @@ abstract class ZookeeperDiscoveryClientSuite extends DiscoveryClientTests assert(options("useKeyTab").toString.toBoolean) conf.set(HA_ZK_AUTH_KEYTAB.key, s"${keytab.getName}") - val e = intercept[IOException](ZookeeperClientProvider.setUpZooKeeperAuth(conf)) - assert(e.getMessage === - s"${HA_ZK_AUTH_KEYTAB.key}: ${ZookeeperClientProvider.getKeyTabFile(conf)} does not exists") + val e = intercept[IOException](setUpZooKeeperAuth(conf)) + assert( + e.getMessage === s"${HA_ZK_AUTH_KEYTAB.key}: ${getKeyTabFile(conf).get} does not exists") } } @@ -155,12 +157,11 @@ abstract class ZookeeperDiscoveryClientSuite extends DiscoveryClientTests assert(service.getServiceState === ServiceState.STARTED) stopZk() - val isServerLostM = discovery.getClass.getSuperclass.getDeclaredField("isServerLost") - isServerLostM.setAccessible(true) - val isServerLost = isServerLostM.get(discovery) + val isServerLost = + getField[AtomicBoolean]((discovery.getClass.getSuperclass, discovery), "isServerLost") eventually(timeout(10.seconds), interval(100.millis)) { - assert(isServerLost.asInstanceOf[AtomicBoolean].get()) + assert(isServerLost.get()) assert(discovery.getServiceState === ServiceState.STOPPED) assert(service.getServiceState === ServiceState.STOPPED) } diff --git a/kyuubi-hive-beeline/pom.xml b/kyuubi-hive-beeline/pom.xml index 15161624354..1068a81ce18 100644 --- a/kyuubi-hive-beeline/pom.xml +++ b/kyuubi-hive-beeline/pom.xml @@ -21,7 +21,7 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT kyuubi-hive-beeline @@ -40,6 +40,12 @@ ${project.version}
    + + org.apache.kyuubi + kyuubi-util + ${project.version} + + org.apache.hive hive-beeline @@ -115,6 +121,12 @@ commons-io + + org.mockito + mockito-core + test + + commons-lang commons-lang @@ -149,6 +161,11 @@ log4j-slf4j-impl + + org.slf4j + jul-to-slf4j + + org.apache.logging.log4j log4j-api @@ -211,6 +228,14 @@ true + + + org.apache.maven.plugins + maven-surefire-plugin + + ${skipTests} + + target/classes target/test-classes diff --git a/kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/KyuubiBeeLine.java b/kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/KyuubiBeeLine.java index 7ca7671486b..224cbb3ce11 100644 --- a/kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/KyuubiBeeLine.java +++ b/kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/KyuubiBeeLine.java @@ -19,22 +19,45 @@ import java.io.IOException; import java.io.InputStream; -import java.lang.reflect.Field; -import java.lang.reflect.Method; import java.sql.Driver; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; +import java.util.*; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; +import org.apache.hive.common.util.HiveStringUtils; +import org.apache.kyuubi.util.reflect.DynConstructors; +import org.apache.kyuubi.util.reflect.DynFields; +import org.apache.kyuubi.util.reflect.DynMethods; public class KyuubiBeeLine extends BeeLine { + + static { + try { + // We use reflection here to handle the case where users remove the + // slf4j-to-jul bridge order to route their logs to JUL. + Class bridgeClass = Class.forName("org.slf4j.bridge.SLF4JBridgeHandler"); + bridgeClass.getMethod("removeHandlersForRootLogger").invoke(null); + boolean installed = (boolean) bridgeClass.getMethod("isInstalled").invoke(null); + if (!installed) { + bridgeClass.getMethod("install").invoke(null); + } + } catch (ReflectiveOperationException cnf) { + // can't log anything yet so just fail silently + } + } + public static final String KYUUBI_BEELINE_DEFAULT_JDBC_DRIVER = "org.apache.kyuubi.jdbc.KyuubiHiveDriver"; protected KyuubiCommands commands = new KyuubiCommands(this); - private Driver defaultDriver = null; + private Driver defaultDriver; + + // copied from org.apache.hive.beeline.BeeLine + private static final int ERRNO_OK = 0; + private static final int ERRNO_ARGS = 1; + private static final int ERRNO_OTHER = 2; + + private static final String PYTHON_MODE_PREFIX = "--python-mode"; + private boolean pythonMode = false; public KyuubiBeeLine() { this(true); @@ -44,25 +67,37 @@ public KyuubiBeeLine() { public KyuubiBeeLine(boolean isBeeLine) { super(isBeeLine); try { - Field commandsField = BeeLine.class.getDeclaredField("commands"); - commandsField.setAccessible(true); - commandsField.set(this, commands); + DynFields.builder().hiddenImpl(BeeLine.class, "commands").buildChecked(this).set(commands); } catch (Throwable t) { throw new ExceptionInInitializerError("Failed to inject kyuubi commands"); } try { defaultDriver = - (Driver) - Class.forName( - KYUUBI_BEELINE_DEFAULT_JDBC_DRIVER, - true, - Thread.currentThread().getContextClassLoader()) - .newInstance(); + DynConstructors.builder() + .impl(KYUUBI_BEELINE_DEFAULT_JDBC_DRIVER) + .buildChecked() + .newInstance(); } catch (Throwable t) { throw new ExceptionInInitializerError(KYUUBI_BEELINE_DEFAULT_JDBC_DRIVER + "-missing"); } } + @Override + void usage() { + super.usage(); + output("Usage: java \" + KyuubiBeeLine.class.getCanonicalName()"); + output(" --python-mode Execute python code/script."); + } + + public boolean isPythonMode() { + return pythonMode; + } + + // Visible for testing + public void setPythonMode(boolean pythonMode) { + this.pythonMode = pythonMode; + } + /** Starts the program. */ public static void main(String[] args) throws IOException { mainWithInputRedirection(args, null); @@ -115,25 +150,37 @@ int initArgs(String[] args) { BeelineParser beelineParser; boolean connSuccessful; boolean exit; - Field exitField; + DynFields.BoundField exitField; try { - Field optionsField = BeeLine.class.getDeclaredField("options"); - optionsField.setAccessible(true); - Options options = (Options) optionsField.get(this); + Options options = + DynFields.builder() + .hiddenImpl(BeeLine.class, "options") + .buildStaticChecked() + .get(); - beelineParser = new BeelineParser(); + beelineParser = + new BeelineParser() { + @SuppressWarnings("rawtypes") + @Override + protected void processOption(String arg, ListIterator iter) throws ParseException { + if (PYTHON_MODE_PREFIX.equals(arg)) { + pythonMode = true; + } else { + super.processOption(arg, iter); + } + } + }; cl = beelineParser.parse(options, args); - Method connectUsingArgsMethod = - BeeLine.class.getDeclaredMethod( - "connectUsingArgs", BeelineParser.class, CommandLine.class); - connectUsingArgsMethod.setAccessible(true); - connSuccessful = (boolean) connectUsingArgsMethod.invoke(this, beelineParser, cl); + connSuccessful = + DynMethods.builder("connectUsingArgs") + .hiddenImpl(BeeLine.class, BeelineParser.class, CommandLine.class) + .buildChecked(this) + .invoke(beelineParser, cl); - exitField = BeeLine.class.getDeclaredField("exit"); - exitField.setAccessible(true); - exit = (boolean) exitField.get(this); + exitField = DynFields.builder().hiddenImpl(BeeLine.class, "exit").buildChecked(this); + exit = exitField.get(); } catch (ParseException e1) { output(e1.getMessage()); @@ -149,10 +196,11 @@ int initArgs(String[] args) { // no-op if the file is not present if (!connSuccessful && !exit) { try { - Method defaultBeelineConnectMethod = - BeeLine.class.getDeclaredMethod("defaultBeelineConnect", CommandLine.class); - defaultBeelineConnectMethod.setAccessible(true); - connSuccessful = (boolean) defaultBeelineConnectMethod.invoke(this, cl); + connSuccessful = + DynMethods.builder("defaultBeelineConnect") + .hiddenImpl(BeeLine.class, CommandLine.class) + .buildChecked(this) + .invoke(cl); } catch (Exception t) { error(t.getMessage()); @@ -160,6 +208,11 @@ int initArgs(String[] args) { } } + // see HIVE-19048 : InitScript errors are ignored + if (exit) { + return 1; + } + int code = 0; if (cl.getOptionValues('e') != null) { commands = Arrays.asList(cl.getOptionValues('e')); @@ -175,8 +228,7 @@ int initArgs(String[] args) { return 1; } if (!commands.isEmpty()) { - for (Iterator i = commands.iterator(); i.hasNext(); ) { - String command = i.next().toString(); + for (String command : commands) { debug(loc("executing-command", command)); if (!dispatch(command)) { code++; @@ -184,7 +236,7 @@ int initArgs(String[] args) { } try { exit = true; - exitField.set(this, exit); + exitField.set(exit); } catch (Exception e) { error(e.getMessage()); return 1; @@ -192,4 +244,59 @@ int initArgs(String[] args) { } return code; } + + // see HIVE-19048 : Initscript errors are ignored + @Override + int runInit() { + String[] initFiles = getOpts().getInitFiles(); + + // executionResult will be ERRNO_OK only if all initFiles execute successfully + int executionResult = ERRNO_OK; + boolean exitOnError = !getOpts().getForce(); + DynFields.BoundField exitField = null; + + if (initFiles != null && initFiles.length != 0) { + for (String initFile : initFiles) { + info("Running init script " + initFile); + try { + int currentResult; + try { + currentResult = + DynMethods.builder("executeFile") + .hiddenImpl(BeeLine.class, String.class) + .buildChecked(this) + .invoke(initFile); + exitField = DynFields.builder().hiddenImpl(BeeLine.class, "exit").buildChecked(this); + } catch (Exception t) { + error(t.getMessage()); + currentResult = ERRNO_OTHER; + } + + if (currentResult != ERRNO_OK) { + executionResult = currentResult; + + if (exitOnError) { + return executionResult; + } + } + } finally { + // exit beeline if there is initScript failure and --force is not set + boolean exit = exitOnError && executionResult != ERRNO_OK; + try { + exitField.set(exit); + } catch (Exception t) { + error(t.getMessage()); + return ERRNO_OTHER; + } + } + } + } + return executionResult; + } + + // see HIVE-15820: comment at the head of beeline -e + @Override + boolean dispatch(String line) { + return super.dispatch(isPythonMode() ? line : HiveStringUtils.removeComments(line)); + } } diff --git a/kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/KyuubiCommands.java b/kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/KyuubiCommands.java index aaa32739acd..fcfee49edb0 100644 --- a/kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/KyuubiCommands.java +++ b/kyuubi-hive-beeline/src/main/java/org/apache/hive/beeline/KyuubiCommands.java @@ -19,10 +19,13 @@ import static org.apache.kyuubi.jdbc.hive.JdbcConnectionParams.*; +import com.google.common.annotations.VisibleForTesting; import java.io.*; +import java.nio.file.Files; import java.sql.*; import java.util.*; import org.apache.hive.beeline.logs.KyuubiBeelineInPlaceUpdateStream; +import org.apache.hive.common.util.HiveStringUtils; import org.apache.kyuubi.jdbc.hive.KyuubiStatement; import org.apache.kyuubi.jdbc.hive.Utils; import org.apache.kyuubi.jdbc.hive.logs.InPlaceUpdateStream; @@ -43,9 +46,14 @@ public boolean sql(String line) { return execute(line, false, false); } + /** For python mode, keep it as it is. */ + private String trimForNonPythonMode(String line) { + return beeLine.isPythonMode() ? line : line.trim(); + } + /** Extract and clean up the first command in the input. */ private String getFirstCmd(String cmd, int length) { - return cmd.substring(length).trim(); + return trimForNonPythonMode(cmd.substring(length)); } private String[] tokenizeCmd(String cmd) { @@ -79,10 +87,9 @@ private boolean sourceFile(String cmd) { } private boolean sourceFileInternal(File sourceFile) throws IOException { - BufferedReader reader = null; - try { - reader = new BufferedReader(new FileReader(sourceFile)); - String lines = null, extra; + try (BufferedReader reader = Files.newBufferedReader(sourceFile.toPath())) { + String lines = null; + String extra; while ((extra = reader.readLine()) != null) { if (beeLine.isComment(extra)) { continue; @@ -93,16 +100,13 @@ private boolean sourceFileInternal(File sourceFile) throws IOException { lines += "\n" + extra; } } - String[] cmds = lines.split(";"); + String[] cmds = lines.split(beeLine.getOpts().getDelimiter()); for (String c : cmds) { + c = trimForNonPythonMode(c); if (!executeInternal(c, false)) { return false; } } - } finally { - if (reader != null) { - reader.close(); - } } return true; } @@ -258,9 +262,10 @@ private boolean execute(String line, boolean call, boolean entireLineAsCommand) beeLine.handleException(e); } + line = trimForNonPythonMode(line); List cmdList = getCmdList(line, entireLineAsCommand); for (int i = 0; i < cmdList.size(); i++) { - String sql = cmdList.get(i); + String sql = trimForNonPythonMode(cmdList.get(i)); if (sql.length() != 0) { if (!executeInternal(sql, call)) { return false; @@ -276,7 +281,8 @@ private boolean execute(String line, boolean call, boolean entireLineAsCommand) * quotations. It iterates through each character in the line and checks to see if it is a ;, ', * or " */ - private List getCmdList(String line, boolean entireLineAsCommand) { + @VisibleForTesting + public List getCmdList(String line, boolean entireLineAsCommand) { List cmdList = new ArrayList(); if (entireLineAsCommand) { cmdList.add(line); @@ -352,7 +358,7 @@ private List getCmdList(String line, boolean entireLineAsCommand) { */ private void addCmdPart(List cmdList, StringBuilder command, String cmdpart) { if (cmdpart.endsWith("\\")) { - command.append(cmdpart.substring(0, cmdpart.length() - 1)).append(";"); + command.append(cmdpart, 0, cmdpart.length() - 1).append(";"); return; } else { command.append(cmdpart); @@ -417,6 +423,7 @@ private String getProperty(Properties props, String[] keys) { return null; } + @Override public boolean connect(Properties props) throws IOException { String url = getProperty( @@ -462,7 +469,7 @@ public boolean connect(Properties props) throws IOException { beeLine.info("Connecting to " + url); if (Utils.parsePropertyFromUrl(url, AUTH_PRINCIPAL) == null - || Utils.parsePropertyFromUrl(url, AUTH_KYUUBI_SERVER_PRINCIPAL) == null) { + && Utils.parsePropertyFromUrl(url, AUTH_KYUUBI_SERVER_PRINCIPAL) == null) { String urlForPrompt = url.substring(0, url.contains(";") ? url.indexOf(';') : url.length()); if (username == null) { username = beeLine.getConsoleReader().readLine("Enter username for " + urlForPrompt + ": "); @@ -484,7 +491,19 @@ public boolean connect(Properties props) throws IOException { if (!beeLine.isBeeLine()) { beeLine.updateOptsForCli(); } - beeLine.runInit(); + + // see HIVE-19048 : Initscript errors are ignored + int initScriptExecutionResult = beeLine.runInit(); + + // if execution of the init script(s) return anything other than ERRNO_OK from beeline + // exit beeline with error unless --force is set + if (initScriptExecutionResult != 0 && !beeLine.getOpts().getForce()) { + return beeLine.error("init script execution failed."); + } + + if (beeLine.getOpts().getInitFiles() != null) { + beeLine.initializeConsoleReader(null); + } beeLine.setCompletions(); beeLine.getOpts().setLastConnectedUrl(url); @@ -499,12 +518,14 @@ public boolean connect(Properties props) throws IOException { @Override public String handleMultiLineCmd(String line) throws IOException { - int[] startQuote = {-1}; Character mask = (System.getProperty("jline.terminal", "").equals("jline.UnsupportedTerminal")) ? null : jline.console.ConsoleReader.NULL_MASK; + if (!beeLine.isPythonMode()) { + line = HiveStringUtils.removeComments(line); + } while (isMultiLine(line) && beeLine.getOpts().isAllowMultiLineCommand()) { StringBuilder prompt = new StringBuilder(beeLine.getPrompt()); if (!beeLine.getOpts().isSilent()) { @@ -530,6 +551,9 @@ public String handleMultiLineCmd(String line) throws IOException { if (extra == null) { // it happens when using -f and the line of cmds does not end with ; break; } + if (!beeLine.isPythonMode()) { + extra = HiveStringUtils.removeComments(extra); + } if (!extra.isEmpty()) { line += "\n" + extra; } @@ -541,12 +565,13 @@ public String handleMultiLineCmd(String line) throws IOException { // console. Used in handleMultiLineCmd method assumes line would never be null when this method is // called private boolean isMultiLine(String line) { + line = trimForNonPythonMode(line); if (line.endsWith(beeLine.getOpts().getDelimiter()) || beeLine.isComment(line)) { return false; } // handles the case like line = show tables; --test comment List cmds = getCmdList(line, false); - return cmds.isEmpty() || !cmds.get(cmds.size() - 1).startsWith("--"); + return cmds.isEmpty() || !trimForNonPythonMode(cmds.get(cmds.size() - 1)).startsWith("--"); } static class KyuubiLogRunnable implements Runnable { diff --git a/kyuubi-hive-beeline/src/test/java/org/apache/hive/beeline/KyuubiBeeLineTest.java b/kyuubi-hive-beeline/src/test/java/org/apache/hive/beeline/KyuubiBeeLineTest.java index b144c95c61f..9c7aec35a42 100644 --- a/kyuubi-hive-beeline/src/test/java/org/apache/hive/beeline/KyuubiBeeLineTest.java +++ b/kyuubi-hive-beeline/src/test/java/org/apache/hive/beeline/KyuubiBeeLineTest.java @@ -19,7 +19,12 @@ package org.apache.hive.beeline; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import java.io.IOException; +import java.io.OutputStream; +import java.io.PrintStream; +import org.apache.kyuubi.util.reflect.DynFields; import org.junit.Test; public class KyuubiBeeLineTest { @@ -29,4 +34,104 @@ public void testKyuubiBeelineWithoutArgs() { int result = kyuubiBeeLine.initArgs(new String[0]); assertEquals(0, result); } + + @Test + public void testKyuubiBeelineExitCodeWithoutConnection() { + KyuubiBeeLine kyuubiBeeLine = new KyuubiBeeLine(); + String scriptFile = getClass().getClassLoader().getResource("test.sql").getFile(); + + String[] args1 = {"-u", "badUrl", "-e", "show tables"}; + int result1 = kyuubiBeeLine.initArgs(args1); + assertEquals(1, result1); + + String[] args2 = {"-u", "badUrl", "-f", scriptFile}; + int result2 = kyuubiBeeLine.initArgs(args2); + assertEquals(1, result2); + + String[] args3 = {"-u", "badUrl", "-i", scriptFile}; + int result3 = kyuubiBeeLine.initArgs(args3); + assertEquals(1, result3); + } + + @Test + public void testKyuubiBeeLineCmdUsage() { + BufferPrintStream printStream = new BufferPrintStream(); + + KyuubiBeeLine kyuubiBeeLine = new KyuubiBeeLine(); + DynFields.builder() + .hiddenImpl(BeeLine.class, "outputStream") + .build(kyuubiBeeLine) + .set(printStream); + String[] args1 = {"-h"}; + kyuubiBeeLine.initArgs(args1); + String output = printStream.getOutput(); + assert output.contains("--python-mode Execute python code/script."); + } + + @Test + public void testKyuubiBeeLinePythonMode() { + KyuubiBeeLine kyuubiBeeLine = new KyuubiBeeLine(); + String[] args1 = {"-u", "badUrl", "--python-mode"}; + kyuubiBeeLine.initArgs(args1); + assertTrue(kyuubiBeeLine.isPythonMode()); + kyuubiBeeLine.setPythonMode(false); + + String[] args2 = {"--python-mode", "-f", "test.sql"}; + kyuubiBeeLine.initArgs(args2); + assertTrue(kyuubiBeeLine.isPythonMode()); + assert kyuubiBeeLine.getOpts().getScriptFile().equals("test.sql"); + kyuubiBeeLine.setPythonMode(false); + + String[] args3 = {"-u", "badUrl"}; + kyuubiBeeLine.initArgs(args3); + assertTrue(!kyuubiBeeLine.isPythonMode()); + kyuubiBeeLine.setPythonMode(false); + } + + @Test + public void testKyuubiBeelineComment() { + KyuubiBeeLine kyuubiBeeLine = new KyuubiBeeLine(); + int result = kyuubiBeeLine.initArgsFromCliVars(new String[] {"-e", "--comment show database;"}); + assertEquals(0, result); + result = kyuubiBeeLine.initArgsFromCliVars(new String[] {"-e", "--comment\n show database;"}); + assertEquals(1, result); + result = + kyuubiBeeLine.initArgsFromCliVars( + new String[] {"-e", "--comment line 1 \n --comment line 2 \n show database;"}); + assertEquals(1, result); + } + + static class BufferPrintStream extends PrintStream { + public StringBuilder stringBuilder = new StringBuilder(); + + static OutputStream noOpOutputStream = + new OutputStream() { + @Override + public void write(int b) throws IOException { + // do nothing + } + }; + + public BufferPrintStream() { + super(noOpOutputStream); + } + + public BufferPrintStream(OutputStream outputStream) { + super(noOpOutputStream); + } + + @Override + public void println(String x) { + stringBuilder.append(x).append("\n"); + } + + @Override + public void print(String x) { + stringBuilder.append(x); + } + + public String getOutput() { + return stringBuilder.toString(); + } + } } diff --git a/kyuubi-hive-beeline/src/test/java/org/apache/hive/beeline/KyuubiCommandsTest.java b/kyuubi-hive-beeline/src/test/java/org/apache/hive/beeline/KyuubiCommandsTest.java new file mode 100644 index 00000000000..653d1b08f55 --- /dev/null +++ b/kyuubi-hive-beeline/src/test/java/org/apache/hive/beeline/KyuubiCommandsTest.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.beeline; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.util.List; +import jline.console.ConsoleReader; +import org.junit.Test; +import org.mockito.Mockito; + +public class KyuubiCommandsTest { + @Test + public void testParsePythonSnippets() throws IOException { + ConsoleReader reader = Mockito.mock(ConsoleReader.class); + String pythonSnippets = "for i in [1, 2, 3]:\n" + " print(i)\n"; + Mockito.when(reader.readLine()).thenReturn(pythonSnippets); + + KyuubiBeeLine beeline = new KyuubiBeeLine(); + beeline.setPythonMode(true); + beeline.setConsoleReader(reader); + KyuubiCommands commands = new KyuubiCommands(beeline); + String line = commands.handleMultiLineCmd(pythonSnippets); + + List cmdList = commands.getCmdList(line, false); + assertEquals(cmdList.size(), 1); + assertEquals(cmdList.get(0), pythonSnippets); + } + + @Test + public void testHandleMultiLineCmd() throws IOException { + ConsoleReader reader = Mockito.mock(ConsoleReader.class); + String snippets = "select 1;--comments1\nselect 2;--comments2"; + Mockito.when(reader.readLine()).thenReturn(snippets); + + KyuubiBeeLine beeline = new KyuubiBeeLine(); + beeline.setConsoleReader(reader); + beeline.setPythonMode(false); + KyuubiCommands commands = new KyuubiCommands(beeline); + String line = commands.handleMultiLineCmd(snippets); + List cmdList = commands.getCmdList(line, false); + assertEquals(cmdList.size(), 2); + assertEquals(cmdList.get(0), "select 1"); + assertEquals(cmdList.get(1), "\nselect 2"); + + // see HIVE-15820: comment at the head of beeline -e + snippets = "--comments1\nselect 2;--comments2"; + Mockito.when(reader.readLine()).thenReturn(snippets); + line = commands.handleMultiLineCmd(snippets); + cmdList = commands.getCmdList(line, false); + assertEquals(cmdList.size(), 1); + assertEquals(cmdList.get(0), "select 2"); + } +} diff --git a/kyuubi-hive-beeline/src/test/resources/test.sql b/kyuubi-hive-beeline/src/test/resources/test.sql new file mode 100644 index 00000000000..c7c3ee2f92b --- /dev/null +++ b/kyuubi-hive-beeline/src/test/resources/test.sql @@ -0,0 +1,17 @@ +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +show tables; diff --git a/kyuubi-hive-jdbc-shaded/pom.xml b/kyuubi-hive-jdbc-shaded/pom.xml index 1a6f258b02f..174f199bead 100644 --- a/kyuubi-hive-jdbc-shaded/pom.xml +++ b/kyuubi-hive-jdbc-shaded/pom.xml @@ -21,7 +21,7 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT kyuubi-hive-jdbc-shaded @@ -108,10 +108,6 @@ org.apache.commons ${kyuubi.shade.packageName}.org.apache.commons - - org.apache.curator - ${kyuubi.shade.packageName}.org.apache.curator - org.apache.hive ${kyuubi.shade.packageName}.org.apache.hive @@ -120,18 +116,10 @@ org.apache.http ${kyuubi.shade.packageName}.org.apache.http - - org.apache.jute - ${kyuubi.shade.packageName}.org.apache.jute - org.apache.thrift ${kyuubi.shade.packageName}.org.apache.thrift - - org.apache.zookeeper - ${kyuubi.shade.packageName}.org.apache.zookeeper - diff --git a/kyuubi-hive-jdbc/pom.xml b/kyuubi-hive-jdbc/pom.xml index 36ea7acc274..aa5e7c161d5 100644 --- a/kyuubi-hive-jdbc/pom.xml +++ b/kyuubi-hive-jdbc/pom.xml @@ -21,7 +21,7 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT kyuubi-hive-jdbc @@ -35,6 +35,11 @@ + + org.apache.kyuubi + kyuubi-util + ${project.version} + org.apache.arrow @@ -102,24 +107,14 @@ provided - - org.apache.curator - curator-framework - - - - org.apache.curator - curator-client - - org.apache.httpcomponents httpclient - org.apache.zookeeper - zookeeper + org.apache.kyuubi + ${kyuubi-shaded-zookeeper.artifacts} diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/KyuubiHiveDriver.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/KyuubiHiveDriver.java index 3b874ba2e3a..66b797087e5 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/KyuubiHiveDriver.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/KyuubiHiveDriver.java @@ -24,6 +24,7 @@ import java.util.jar.Attributes; import java.util.jar.Manifest; import java.util.logging.Logger; +import org.apache.commons.lang3.StringUtils; import org.apache.kyuubi.jdbc.hive.JdbcConnectionParams; import org.apache.kyuubi.jdbc.hive.KyuubiConnection; import org.apache.kyuubi.jdbc.hive.KyuubiSQLException; @@ -137,7 +138,7 @@ private Properties parseURLForPropertyInfo(String url, Properties defaults) thro host = ""; } String port = Integer.toString(params.getPort()); - if (host.equals("")) { + if (StringUtils.isEmpty(host)) { port = ""; } else if (port.equals("0") || port.equals("-1")) { port = DEFAULT_PORT; diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiArrowQueryResultSet.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiArrowQueryResultSet.java index fda70f463e9..54491b2d670 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiArrowQueryResultSet.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiArrowQueryResultSet.java @@ -250,9 +250,6 @@ private void retrieveSchema() throws SQLException { metadataResp = client.GetResultSetMetadata(metadataReq); Utils.verifySuccess(metadataResp.getStatus()); - StringBuilder namesSb = new StringBuilder(); - StringBuilder typesSb = new StringBuilder(); - TTableSchema schema = metadataResp.getSchema(); if (schema == null || !schema.isSetColumns()) { // TODO: should probably throw an exception here. @@ -262,10 +259,6 @@ private void retrieveSchema() throws SQLException { List columns = schema.getColumns(); for (int pos = 0; pos < schema.getColumnsSize(); pos++) { - if (pos != 0) { - namesSb.append(","); - typesSb.append(","); - } String columnName = columns.get(pos).getColumnName(); columnNames.add(columnName); normalizedColumnNames.add(columnName.toLowerCase()); diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiConnection.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiConnection.java index 0932ea56585..c23985328ec 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiConnection.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiConnection.java @@ -30,10 +30,7 @@ import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; -import java.security.AccessControlContext; -import java.security.AccessController; -import java.security.KeyStore; -import java.security.SecureRandom; +import java.security.*; import java.sql.*; import java.util.*; import java.util.Map.Entry; @@ -43,6 +40,7 @@ import javax.net.ssl.TrustManagerFactory; import javax.security.auth.Subject; import javax.security.sasl.Sasl; +import org.apache.commons.lang3.ClassUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hive.service.rpc.thrift.*; import org.apache.http.HttpRequestInterceptor; @@ -106,9 +104,11 @@ public class KyuubiConnection implements SQLConnection, KyuubiLoggable { private Thread engineLogThread; private boolean engineLogInflight = true; private volatile boolean launchEngineOpCompleted = false; + private boolean launchEngineOpSupportResult = false; private String engineId = ""; private String engineName = ""; private String engineUrl = ""; + private String engineRefId = ""; private boolean isBeeLineMode; @@ -771,6 +771,10 @@ private void openSession() throws SQLException { String launchEngineOpHandleSecret = openRespConf.get("kyuubi.session.engine.launch.handle.secret"); + launchEngineOpSupportResult = + Boolean.parseBoolean( + openRespConf.getOrDefault("kyuubi.session.engine.launch.support.result", "false")); + if (launchEngineOpHandleGuid != null && launchEngineOpHandleSecret != null) { try { byte[] guidBytes = Base64.getMimeDecoder().decode(launchEngineOpHandleGuid); @@ -813,11 +817,16 @@ private boolean isSaslAuthMode() { return !AUTH_SIMPLE.equalsIgnoreCase(sessConfMap.get(AUTH_TYPE)); } - private boolean isFromSubjectAuthMode() { - return isSaslAuthMode() - && hasSessionValue(AUTH_PRINCIPAL) - && AUTH_KERBEROS_AUTH_TYPE_FROM_SUBJECT.equalsIgnoreCase( - sessConfMap.get(AUTH_KERBEROS_AUTH_TYPE)); + private boolean isHadoopUserGroupInformationDoAs() { + try { + @SuppressWarnings("unchecked") + Class HadoopUserClz = + (Class) ClassUtils.getClass("org.apache.hadoop.security.User"); + Subject subject = Subject.getSubject(AccessController.getContext()); + return subject != null && !subject.getPrincipals(HadoopUserClz).isEmpty(); + } catch (ClassNotFoundException e) { + return false; + } } private boolean isKeytabAuthMode() { @@ -827,6 +836,16 @@ && hasSessionValue(AUTH_KYUUBI_CLIENT_PRINCIPAL) && hasSessionValue(AUTH_KYUUBI_CLIENT_KEYTAB); } + private boolean isFromSubjectAuthMode() { + return isSaslAuthMode() + && hasSessionValue(AUTH_PRINCIPAL) + && !hasSessionValue(AUTH_KYUUBI_CLIENT_PRINCIPAL) + && !hasSessionValue(AUTH_KYUUBI_CLIENT_KEYTAB) + && (AUTH_KERBEROS_AUTH_TYPE_FROM_SUBJECT.equalsIgnoreCase( + sessConfMap.get(AUTH_KERBEROS_AUTH_TYPE)) + || isHadoopUserGroupInformationDoAs()); + } + private boolean isTgtCacheAuthMode() { return isSaslAuthMode() && hasSessionValue(AUTH_PRINCIPAL) @@ -843,15 +862,15 @@ private boolean isKerberosAuthMode() { } private Subject createSubject() { - if (isFromSubjectAuthMode()) { + if (isKeytabAuthMode()) { + String principal = sessConfMap.get(AUTH_KYUUBI_CLIENT_PRINCIPAL); + String keytab = sessConfMap.get(AUTH_KYUUBI_CLIENT_KEYTAB); + return KerberosAuthenticationManager.getKeytabAuthentication(principal, keytab).getSubject(); + } else if (isFromSubjectAuthMode()) { AccessControlContext context = AccessController.getContext(); return Subject.getSubject(context); } else if (isTgtCacheAuthMode()) { return KerberosAuthenticationManager.getTgtCacheAuthentication().getSubject(); - } else if (isKeytabAuthMode()) { - String principal = sessConfMap.get(AUTH_KYUUBI_CLIENT_PRINCIPAL); - String keytab = sessConfMap.get(AUTH_KYUUBI_CLIENT_KEYTAB); - return KerberosAuthenticationManager.getKeytabAuthentication(principal, keytab).getSubject(); } else { // This should never happen throw new IllegalArgumentException("Unsupported auth mode"); @@ -1339,7 +1358,7 @@ public void waitLaunchEngineToComplete() throws SQLException { } private void fetchLaunchEngineResult() { - if (launchEngineOpHandle == null) return; + if (launchEngineOpHandle == null || !launchEngineOpSupportResult) return; TFetchResultsReq tFetchResultsReq = new TFetchResultsReq( @@ -1357,6 +1376,8 @@ private void fetchLaunchEngineResult() { engineName = value; } else if ("url".equals(key)) { engineUrl = value; + } else if ("refId".equals(key)) { + engineRefId = value; } } } catch (Exception e) { @@ -1375,4 +1396,8 @@ public String getEngineName() { public String getEngineUrl() { return engineUrl; } + + public String getEngineRefId() { + return engineRefId; + } } diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiDatabaseMetaData.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiDatabaseMetaData.java index f5e29f8e7d6..c6ab3a277c4 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiDatabaseMetaData.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiDatabaseMetaData.java @@ -531,7 +531,7 @@ public ResultSet getProcedureColumns( @Override public String getProcedureTerm() throws SQLException { - return new String("UDF"); + return "UDF"; } @Override diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiPreparedStatement.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiPreparedStatement.java index 43c2a030bc8..1e53f940157 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiPreparedStatement.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiPreparedStatement.java @@ -26,9 +26,7 @@ import java.sql.Timestamp; import java.sql.Types; import java.text.MessageFormat; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Scanner; import org.apache.hive.service.rpc.thrift.TCLIService; import org.apache.hive.service.rpc.thrift.TSessionHandle; @@ -81,57 +79,7 @@ public int executeUpdate() throws SQLException { /** update the SQL string with parameters set by setXXX methods of {@link PreparedStatement} */ private String updateSql(final String sql, HashMap parameters) throws SQLException { - List parts = splitSqlStatement(sql); - - StringBuilder newSql = new StringBuilder(parts.get(0)); - for (int i = 1; i < parts.size(); i++) { - if (!parameters.containsKey(i)) { - throw new KyuubiSQLException("Parameter #" + i + " is unset"); - } - newSql.append(parameters.get(i)); - newSql.append(parts.get(i)); - } - return newSql.toString(); - } - - /** - * Splits the parametered sql statement at parameter boundaries. - * - *

    taking into account ' and \ escaping. - * - *

    output for: 'select 1 from ? where a = ?' ['select 1 from ',' where a = ',''] - */ - private List splitSqlStatement(String sql) { - List parts = new ArrayList<>(); - int apCount = 0; - int off = 0; - boolean skip = false; - - for (int i = 0; i < sql.length(); i++) { - char c = sql.charAt(i); - if (skip) { - skip = false; - continue; - } - switch (c) { - case '\'': - apCount++; - break; - case '\\': - skip = true; - break; - case '?': - if ((apCount & 1) == 0) { - parts.add(sql.substring(off, i)); - off = i + 1; - } - break; - default: - break; - } - } - parts.add(sql.substring(off, sql.length())); - return parts; + return Utils.updateSql(sql, parameters); } @Override @@ -220,7 +168,7 @@ public void setObject(int parameterIndex, Object x) throws SQLException { // Can't infer a type. throw new KyuubiSQLException( MessageFormat.format( - "Can't infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.", + "Cannot infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.", x.getClass().getName())); } } diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiQueryResultSet.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiQueryResultSet.java index f06ada5d4be..242ec772021 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiQueryResultSet.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiQueryResultSet.java @@ -26,6 +26,7 @@ import org.apache.kyuubi.jdbc.hive.cli.RowSet; import org.apache.kyuubi.jdbc.hive.cli.RowSetFactory; import org.apache.kyuubi.jdbc.hive.common.HiveDecimal; +import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,6 +48,7 @@ public class KyuubiQueryResultSet extends KyuubiBaseResultSet { private boolean emptyResultSet = false; private boolean isScrollable = false; private boolean fetchFirst = false; + private boolean hasMoreToFetch = false; private final TProtocolVersion protocol; @@ -223,9 +225,6 @@ private void retrieveSchema() throws SQLException { metadataResp = client.GetResultSetMetadata(metadataReq); Utils.verifySuccess(metadataResp.getStatus()); - StringBuilder namesSb = new StringBuilder(); - StringBuilder typesSb = new StringBuilder(); - TTableSchema schema = metadataResp.getSchema(); if (schema == null || !schema.isSetColumns()) { // TODO: should probably throw an exception here. @@ -235,10 +234,6 @@ private void retrieveSchema() throws SQLException { List columns = schema.getColumns(); for (int pos = 0; pos < schema.getColumnsSize(); pos++) { - if (pos != 0) { - namesSb.append(","); - typesSb.append(","); - } String columnName = columns.get(pos).getColumnName(); columnNames.add(columnName); normalizedColumnNames.add(columnName.toLowerCase()); @@ -324,25 +319,20 @@ public boolean next() throws SQLException { try { TFetchOrientation orientation = TFetchOrientation.FETCH_NEXT; if (fetchFirst) { - // If we are asked to start from begining, clear the current fetched resultset + // If we are asked to start from beginning, clear the current fetched resultset orientation = TFetchOrientation.FETCH_FIRST; fetchedRows = null; fetchedRowsItr = null; fetchFirst = false; } if (fetchedRows == null || !fetchedRowsItr.hasNext()) { - TFetchResultsReq fetchReq = new TFetchResultsReq(stmtHandle, orientation, fetchSize); - TFetchResultsResp fetchResp; - fetchResp = client.FetchResults(fetchReq); - Utils.verifySuccessWithInfo(fetchResp.getStatus()); - - TRowSet results = fetchResp.getResults(); - fetchedRows = RowSetFactory.create(results, protocol); - fetchedRowsItr = fetchedRows.iterator(); + fetchResult(orientation); } if (fetchedRowsItr.hasNext()) { row = fetchedRowsItr.next(); + } else if (hasMoreToFetch) { + fetchResult(orientation); } else { return false; } @@ -357,6 +347,18 @@ public boolean next() throws SQLException { return true; } + private void fetchResult(TFetchOrientation orientation) throws SQLException, TException { + TFetchResultsReq fetchReq = new TFetchResultsReq(stmtHandle, orientation, fetchSize); + TFetchResultsResp fetchResp; + fetchResp = client.FetchResults(fetchReq); + Utils.verifySuccessWithInfo(fetchResp.getStatus()); + hasMoreToFetch = fetchResp.isSetHasMoreRows() && fetchResp.isHasMoreRows(); + + TRowSet results = fetchResp.getResults(); + fetchedRows = RowSetFactory.create(results, protocol); + fetchedRowsItr = fetchedRows.iterator(); + } + @Override public ResultSetMetaData getMetaData() throws SQLException { if (isClosed) { diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiSQLException.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiSQLException.java index 1ac0adf04ac..7d26f807898 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiSQLException.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/KyuubiSQLException.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.List; import org.apache.hive.service.rpc.thrift.TStatus; +import org.apache.kyuubi.util.reflect.DynConstructors; public class KyuubiSQLException extends SQLException { @@ -186,7 +187,10 @@ private static Throwable toStackTrace( private static Throwable newInstance(String className, String message) { try { - return (Throwable) Class.forName(className).getConstructor(String.class).newInstance(message); + return DynConstructors.builder() + .impl(className, String.class) + .buildChecked() + .newInstance(message); } catch (Exception e) { return new RuntimeException(className + ":" + message); } diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/Utils.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/Utils.java index 1daa322ecd2..d0167e3e490 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/Utils.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/Utils.java @@ -22,6 +22,7 @@ import java.net.InetAddress; import java.net.URI; import java.net.UnknownHostException; +import java.sql.PreparedStatement; import java.sql.SQLException; import java.util.*; import java.util.regex.Matcher; @@ -89,6 +90,62 @@ static void verifySuccess(TStatus status, boolean withInfo) throws SQLException throw new KyuubiSQLException(status); } + /** + * Splits the parametered sql statement at parameter boundaries. + * + *

    taking into account ' and \ escaping. + * + *

    output for: 'select 1 from ? where a = ?' ['select 1 from ',' where a = ',''] + */ + static List splitSqlStatement(String sql) { + List parts = new ArrayList<>(); + int apCount = 0; + int off = 0; + boolean skip = false; + + for (int i = 0; i < sql.length(); i++) { + char c = sql.charAt(i); + if (skip) { + skip = false; + continue; + } + switch (c) { + case '\'': + apCount++; + break; + case '\\': + skip = true; + break; + case '?': + if ((apCount & 1) == 0) { + parts.add(sql.substring(off, i)); + off = i + 1; + } + break; + default: + break; + } + } + parts.add(sql.substring(off)); + return parts; + } + + /** update the SQL string with parameters set by setXXX methods of {@link PreparedStatement} */ + public static String updateSql(final String sql, HashMap parameters) + throws SQLException { + List parts = splitSqlStatement(sql); + + StringBuilder newSql = new StringBuilder(parts.get(0)); + for (int i = 1; i < parts.size(); i++) { + if (!parameters.containsKey(i)) { + throw new KyuubiSQLException("Parameter #" + i + " is unset"); + } + newSql.append(parameters.get(i)); + newSql.append(parts.get(i)); + } + return newSql.toString(); + } + public static JdbcConnectionParams parseURL(String uri) throws JdbcUriParseException, SQLException, ZooKeeperHiveClientException { return parseURL(uri, new Properties()); @@ -494,7 +551,10 @@ public static synchronized String getVersion() { if (KYUUBI_CLIENT_VERSION == null) { try { Properties prop = new Properties(); - prop.load(Utils.class.getClassLoader().getResourceAsStream("version.properties")); + prop.load( + Utils.class + .getClassLoader() + .getResourceAsStream("org/apache/kyuubi/version.properties")); KYUUBI_CLIENT_VERSION = prop.getProperty(KYUUBI_CLIENT_VERSION_KEY, "unknown"); } catch (Exception e) { LOG.error("Error getting kyuubi client version", e); diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/ZooKeeperHiveClientHelper.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/ZooKeeperHiveClientHelper.java index 349fc8dfb6b..948fd333463 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/ZooKeeperHiveClientHelper.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/ZooKeeperHiveClientHelper.java @@ -17,27 +17,30 @@ package org.apache.kyuubi.jdbc.hive; +import com.google.common.annotations.VisibleForTesting; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.CuratorFrameworkFactory; -import org.apache.curator.retry.ExponentialBackoffRetry; +import org.apache.kyuubi.shaded.curator.framework.CuratorFramework; +import org.apache.kyuubi.shaded.curator.framework.CuratorFrameworkFactory; +import org.apache.kyuubi.shaded.curator.retry.ExponentialBackoffRetry; class ZooKeeperHiveClientHelper { // Pattern for key1=value1;key2=value2 private static final Pattern kvPattern = Pattern.compile("([^=;]*)=([^;]*);?"); - private static String getZooKeeperNamespace(JdbcConnectionParams connParams) { + @VisibleForTesting + protected static String getZooKeeperNamespace(JdbcConnectionParams connParams) { String zooKeeperNamespace = connParams.getSessionVars().get(JdbcConnectionParams.ZOOKEEPER_NAMESPACE); if ((zooKeeperNamespace == null) || (zooKeeperNamespace.isEmpty())) { zooKeeperNamespace = JdbcConnectionParams.ZOOKEEPER_DEFAULT_NAMESPACE; } + zooKeeperNamespace = zooKeeperNamespace.replaceAll("^/+", "").replaceAll("/+$", ""); return zooKeeperNamespace; } @@ -108,7 +111,7 @@ static void configureConnParams(JdbcConnectionParams connParams) try (CuratorFramework zooKeeperClient = getZkClient(connParams)) { List serverHosts = getServerHosts(connParams, zooKeeperClient); // Now pick a server node randomly - String serverNode = serverHosts.get(new Random().nextInt(serverHosts.size())); + String serverNode = serverHosts.get(ThreadLocalRandom.current().nextInt(serverHosts.size())); updateParamsWithZKServerNode(connParams, zooKeeperClient, serverNode); } catch (Exception e) { throw new ZooKeeperHiveClientException( diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/auth/HttpKerberosRequestInterceptor.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/auth/HttpKerberosRequestInterceptor.java index 278cef0b4a7..02d168c3f5b 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/auth/HttpKerberosRequestInterceptor.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/auth/HttpKerberosRequestInterceptor.java @@ -65,7 +65,7 @@ protected void addHttpAuthHeader(HttpRequest httpRequest, HttpContext httpContex httpRequest.addHeader( HttpAuthUtils.AUTHORIZATION, HttpAuthUtils.NEGOTIATE + " " + kerberosAuthHeader); } catch (Exception e) { - throw new HttpException(e.getMessage(), e); + throw new HttpException(e.getMessage() == null ? "" : e.getMessage(), e); } finally { kerberosLock.unlock(); } diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/auth/HttpRequestInterceptorBase.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/auth/HttpRequestInterceptorBase.java index 9ce5a330b7c..42641c219c9 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/auth/HttpRequestInterceptorBase.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/auth/HttpRequestInterceptorBase.java @@ -110,7 +110,7 @@ public void process(HttpRequest httpRequest, HttpContext httpContext) httpRequest.addHeader("Cookie", cookieHeaderKeyValues.toString()); } } catch (Exception e) { - throw new HttpException(e.getMessage(), e); + throw new HttpException(e.getMessage() == null ? "" : e.getMessage(), e); } } diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/cli/ColumnBuffer.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/cli/ColumnBuffer.java index e703cb1f00c..bd5124f9524 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/cli/ColumnBuffer.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/cli/ColumnBuffer.java @@ -228,8 +228,9 @@ public Object get(int index) { return stringVars.get(index); case BINARY_TYPE: return binaryVars.get(index).array(); + default: + return null; } - return null; } @Override diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/Date.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/Date.java index 1b49c268a4b..720c7517f52 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/Date.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/Date.java @@ -65,6 +65,7 @@ public String toString() { return localDate.format(PRINT_FORMATTER); } + @Override public int hashCode() { return localDate.hashCode(); } @@ -164,6 +165,7 @@ public int getDayOfWeek() { } /** Return a copy of this object. */ + @Override public Object clone() { // LocalDateTime is immutable. return new Date(this.localDate); diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/FastHiveDecimalImpl.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/FastHiveDecimalImpl.java index d3dba0f7b7a..65f17e73443 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/FastHiveDecimalImpl.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/FastHiveDecimalImpl.java @@ -5182,7 +5182,6 @@ public static boolean fastRoundIntegerDown( fastResult.fastIntegerDigitCount = 0; fastResult.fastScale = 0; } else { - fastResult.fastSignum = 0; fastResult.fastSignum = fastSignum; fastResult.fastIntegerDigitCount = fastRawPrecision(fastResult); fastResult.fastScale = 0; diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/Timestamp.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/Timestamp.java index cdb6b10ce52..7e02835b748 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/Timestamp.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/Timestamp.java @@ -95,6 +95,7 @@ public String toString() { return localDateTime.format(PRINT_FORMATTER); } + @Override public int hashCode() { return localDateTime.hashCode(); } @@ -207,6 +208,7 @@ public int getDayOfWeek() { } /** Return a copy of this object. */ + @Override public Object clone() { // LocalDateTime is immutable. return new Timestamp(this.localDateTime); diff --git a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/TimestampTZUtil.java b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/TimestampTZUtil.java index a938e16889a..be16926cbe3 100644 --- a/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/TimestampTZUtil.java +++ b/kyuubi-hive-jdbc/src/main/java/org/apache/kyuubi/jdbc/hive/common/TimestampTZUtil.java @@ -98,7 +98,7 @@ private static String handleSingleDigitHourOffset(String s) { Matcher matcher = SINGLE_DIGIT_PATTERN.matcher(s); if (matcher.find()) { int index = matcher.start() + 1; - s = s.substring(0, index) + "0" + s.substring(index, s.length()); + s = s.substring(0, index) + "0" + s.substring(index); } return s; } diff --git a/kyuubi-hive-jdbc/src/main/resources/version.properties b/kyuubi-hive-jdbc/src/main/resources/org/apache/kyuubi/version.properties similarity index 100% rename from kyuubi-hive-jdbc/src/main/resources/version.properties rename to kyuubi-hive-jdbc/src/main/resources/org/apache/kyuubi/version.properties diff --git a/kyuubi-hive-jdbc/src/test/java/org/apache/kyuubi/jdbc/hive/TestJdbcDriver.java b/kyuubi-hive-jdbc/src/test/java/org/apache/kyuubi/jdbc/hive/TestJdbcDriver.java index 228ad00ee2d..efdf7309277 100644 --- a/kyuubi-hive-jdbc/src/test/java/org/apache/kyuubi/jdbc/hive/TestJdbcDriver.java +++ b/kyuubi-hive-jdbc/src/test/java/org/apache/kyuubi/jdbc/hive/TestJdbcDriver.java @@ -24,6 +24,7 @@ import java.io.File; import java.io.FileWriter; import java.io.IOException; +import java.nio.file.Files; import java.util.Arrays; import java.util.Collection; import org.junit.AfterClass; @@ -67,14 +68,14 @@ public static Collection data() { public static void setUpBeforeClass() throws Exception { file = new File(System.getProperty("user.dir") + File.separator + "Init.sql"); if (!file.exists()) { - file.createNewFile(); + Files.createFile(file.toPath()); } } @AfterClass public static void cleanUpAfterClass() throws Exception { if (file != null) { - file.delete(); + Files.deleteIfExists(file.toPath()); } } diff --git a/kyuubi-hive-jdbc/src/test/java/org/apache/kyuubi/jdbc/hive/ZooKeeperHiveClientHelperTest.java b/kyuubi-hive-jdbc/src/test/java/org/apache/kyuubi/jdbc/hive/ZooKeeperHiveClientHelperTest.java new file mode 100644 index 00000000000..d1fd78f473e --- /dev/null +++ b/kyuubi-hive-jdbc/src/test/java/org/apache/kyuubi/jdbc/hive/ZooKeeperHiveClientHelperTest.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.jdbc.hive; + +import static org.apache.kyuubi.jdbc.hive.Utils.extractURLComponents; +import static org.junit.Assert.assertEquals; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Properties; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@RunWith(Parameterized.class) +public class ZooKeeperHiveClientHelperTest { + + private String uri; + + @Parameterized.Parameters + public static Collection data() { + return Arrays.asList( + new String[][] { + {"jdbc:hive2://hostname:10018/db;zooKeeperNamespace=zookeeper/namespace"}, + {"jdbc:hive2://hostname:10018/db;zooKeeperNamespace=/zookeeper/namespace"}, + {"jdbc:hive2://hostname:10018/db;zooKeeperNamespace=zookeeper/namespace/"}, + {"jdbc:hive2://hostname:10018/db;zooKeeperNamespace=/zookeeper/namespace/"}, + {"jdbc:hive2://hostname:10018/db;zooKeeperNamespace=///zookeeper/namespace///"} + }); + } + + public ZooKeeperHiveClientHelperTest(String uri) { + this.uri = uri; + } + + @Test + public void testGetZooKeeperNamespace() throws JdbcUriParseException { + JdbcConnectionParams jdbcConnectionParams = extractURLComponents(uri, new Properties()); + assertEquals( + "zookeeper/namespace", + ZooKeeperHiveClientHelper.getZooKeeperNamespace(jdbcConnectionParams)); + } +} diff --git a/kyuubi-metrics/pom.xml b/kyuubi-metrics/pom.xml index 2edeb73c7ce..5a95291bd59 100644 --- a/kyuubi-metrics/pom.xml +++ b/kyuubi-metrics/pom.xml @@ -21,10 +21,10 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT - kyuubi-metrics_2.12 + kyuubi-metrics_${scala.binary.version} jar Kyuubi Project Metrics https://kyuubi.apache.org/ diff --git a/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/JsonReporterService.scala b/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/JsonReporterService.scala index cb0ef740431..7b172fc1eb9 100644 --- a/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/JsonReporterService.scala +++ b/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/JsonReporterService.scala @@ -65,7 +65,7 @@ class JsonReporterService(registry: MetricRegistry) Files.setPosixFilePermissions(tmpPath, PosixFilePermissions.fromString("rwxr--r--")) Files.move(tmpPath, reportPath, StandardCopyOption.REPLACE_EXISTING) } catch { - case NonFatal(e) => error("Error writing metrics to json file" + reportPath, e) + case NonFatal(e) => error(s"Error writing metrics to json file: $reportPath", e) } finally { if (writer != null) writer.close() } diff --git a/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConf.scala b/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConf.scala index ad734ced5d7..fe11f6eb18b 100644 --- a/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConf.scala +++ b/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConf.scala @@ -32,7 +32,7 @@ object MetricsConf { .booleanConf .createWithDefault(true) - val METRICS_REPORTERS: ConfigEntry[Seq[String]] = buildConf("kyuubi.metrics.reporters") + val METRICS_REPORTERS: ConfigEntry[Set[String]] = buildConf("kyuubi.metrics.reporters") .doc("A comma-separated list for all metrics reporters" + "

      " + "
    • CONSOLE - ConsoleReporter which outputs measurements to CONSOLE periodically.
    • " + @@ -43,12 +43,10 @@ object MetricsConf { "
    ") .version("1.2.0") .stringConf - .transform(_.toUpperCase()) - .toSequence() - .checkValue( - _.forall(ReporterType.values.map(_.toString).contains), - s"the reporter type should be one or more of ${ReporterType.values.mkString(",")}") - .createWithDefault(Seq(JSON.toString)) + .transformToUpperCase + .toSet() + .checkValues(ReporterType) + .createWithDefault(Set(JSON.toString)) val METRICS_CONSOLE_INTERVAL: ConfigEntry[Long] = buildConf("kyuubi.metrics.console.interval") .doc("How often should report metrics to console") diff --git a/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConstants.scala b/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConstants.scala index e97fd28ea25..f615467f3f0 100644 --- a/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConstants.scala +++ b/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConstants.scala @@ -26,6 +26,7 @@ object MetricsConstants { final val BUFFER_POOL: String = KYUUBI + "buffer_pool" final val THREAD_STATE: String = KYUUBI + "thread_state" final val CLASS_LOADING: String = KYUUBI + "class_loading" + final val JVM: String = KYUUBI + "jvm" final val EXEC_POOL_ALIVE: String = KYUUBI + "exec.pool.threads.alive" final val EXEC_POOL_ACTIVE: String = KYUUBI + "exec.pool.threads.active" diff --git a/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsSystem.scala b/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsSystem.scala index 99da1f1b06e..26344ca56a3 100644 --- a/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsSystem.scala +++ b/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsSystem.scala @@ -67,6 +67,7 @@ class MetricsSystem extends CompositeService("MetricsSystem") { } override def initialize(conf: KyuubiConf): Unit = synchronized { + registry.registerAll(MetricsConstants.JVM, new JvmAttributeGaugeSet) registry.registerAll(MetricsConstants.GC_METRIC, new GarbageCollectorMetricSet) registry.registerAll(MetricsConstants.MEMORY_USAGE, new MemoryUsageGaugeSet) registry.registerAll( diff --git a/kyuubi-metrics/src/test/scala/org/apache/kyuubi/metrics/MetricsSystemSuite.scala b/kyuubi-metrics/src/test/scala/org/apache/kyuubi/metrics/MetricsSystemSuite.scala index 611531d73f9..bac20181ca5 100644 --- a/kyuubi-metrics/src/test/scala/org/apache/kyuubi/metrics/MetricsSystemSuite.scala +++ b/kyuubi-metrics/src/test/scala/org/apache/kyuubi/metrics/MetricsSystemSuite.scala @@ -49,7 +49,7 @@ class MetricsSystemSuite extends KyuubiFunSuite { val conf = KyuubiConf() .set(MetricsConf.METRICS_ENABLED, true) - .set(MetricsConf.METRICS_REPORTERS, Seq(ReporterType.PROMETHEUS.toString)) + .set(MetricsConf.METRICS_REPORTERS, Set(ReporterType.PROMETHEUS.toString)) .set(MetricsConf.METRICS_PROMETHEUS_PORT, 0) // random port .set(MetricsConf.METRICS_PROMETHEUS_PATH, testContextPath) val metricsSystem = new MetricsSystem() @@ -77,7 +77,7 @@ class MetricsSystemSuite extends KyuubiFunSuite { .set(MetricsConf.METRICS_ENABLED, true) .set( MetricsConf.METRICS_REPORTERS, - ReporterType.values.filterNot(_ == ReporterType.PROMETHEUS).map(_.toString).toSeq) + ReporterType.values.filterNot(_ == ReporterType.PROMETHEUS).map(_.toString)) .set(MetricsConf.METRICS_JSON_INTERVAL, Duration.ofSeconds(1).toMillis) .set(MetricsConf.METRICS_JSON_LOCATION, reportPath.toString) val metricsSystem = new MetricsSystem() diff --git a/kyuubi-rest-client/pom.xml b/kyuubi-rest-client/pom.xml index 6ba88e8dc8b..7d7e595c4f5 100644 --- a/kyuubi-rest-client/pom.xml +++ b/kyuubi-rest-client/pom.xml @@ -21,7 +21,7 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT kyuubi-rest-client @@ -77,11 +77,22 @@ true
    + + org.apache.kyuubi + kyuubi-util + ${project.version} + + org.slf4j slf4j-api + + org.slf4j + jcl-over-slf4j + + org.apache.logging.log4j log4j-slf4j-impl diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/AdminRestApi.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/AdminRestApi.java index b8bfe7ee15c..3b220cbc234 100644 --- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/AdminRestApi.java +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/AdminRestApi.java @@ -22,6 +22,9 @@ import java.util.List; import java.util.Map; import org.apache.kyuubi.client.api.v1.dto.Engine; +import org.apache.kyuubi.client.api.v1.dto.OperationData; +import org.apache.kyuubi.client.api.v1.dto.ServerData; +import org.apache.kyuubi.client.api.v1.dto.SessionData; public class AdminRestApi { private KyuubiRestClient client; @@ -44,11 +47,21 @@ public String refreshUserDefaultsConf() { return this.getClient().post(path, null, client.getAuthHeader()); } + public String refreshKubernetesConf() { + String path = String.format("%s/%s", API_BASE_PATH, "refresh/kubernetes_conf"); + return this.getClient().post(path, null, client.getAuthHeader()); + } + public String refreshUnlimitedUsers() { String path = String.format("%s/%s", API_BASE_PATH, "refresh/unlimited_users"); return this.getClient().post(path, null, client.getAuthHeader()); } + public String refreshDenyUsers() { + String path = String.format("%s/%s", API_BASE_PATH, "refresh/deny_users"); + return this.getClient().post(path, null, client.getAuthHeader()); + } + public String deleteEngine( String engineType, String shareLevel, String subdomain, String hs2ProxyUser) { Map params = new HashMap<>(); @@ -60,18 +73,51 @@ public String deleteEngine( } public List listEngines( - String engineType, String shareLevel, String subdomain, String hs2ProxyUser) { + String engineType, String shareLevel, String subdomain, String hs2ProxyUser, String all) { Map params = new HashMap<>(); params.put("type", engineType); params.put("sharelevel", shareLevel); params.put("subdomain", subdomain); params.put("hive.server2.proxy.user", hs2ProxyUser); + params.put("all", all); Engine[] result = this.getClient() .get(API_BASE_PATH + "/engine", params, Engine[].class, client.getAuthHeader()); return Arrays.asList(result); } + public List listSessions() { + SessionData[] result = + this.getClient() + .get(API_BASE_PATH + "/sessions", null, SessionData[].class, client.getAuthHeader()); + return Arrays.asList(result); + } + + public String closeSession(String sessionHandleStr) { + String url = String.format("%s/sessions/%s", API_BASE_PATH, sessionHandleStr); + return this.getClient().delete(url, null, client.getAuthHeader()); + } + + public List listOperations() { + OperationData[] result = + this.getClient() + .get( + API_BASE_PATH + "/operations", null, OperationData[].class, client.getAuthHeader()); + return Arrays.asList(result); + } + + public String closeOperation(String operationHandleStr) { + String url = String.format("%s/operations/%s", API_BASE_PATH, operationHandleStr); + return this.getClient().delete(url, null, client.getAuthHeader()); + } + + public List listServers() { + ServerData[] result = + this.getClient() + .get(API_BASE_PATH + "/server", null, ServerData[].class, client.getAuthHeader()); + return Arrays.asList(result); + } + private IRestClient getClient() { return this.client.getHttpClient(); } diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/BatchRestApi.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/BatchRestApi.java index f5099568b21..7d113308df1 100644 --- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/BatchRestApi.java +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/BatchRestApi.java @@ -63,10 +63,23 @@ public GetBatchesResponse listBatches( Long endTime, int from, int size) { + return listBatches(batchType, batchUser, batchState, null, createTime, endTime, from, size); + } + + public GetBatchesResponse listBatches( + String batchType, + String batchUser, + String batchState, + String batchName, + Long createTime, + Long endTime, + int from, + int size) { Map params = new HashMap<>(); params.put("batchType", batchType); params.put("batchUser", batchUser); params.put("batchState", batchState); + params.put("batchName", batchName); if (null != createTime && createTime > 0) { params.put("createTime", createTime); } @@ -102,8 +115,7 @@ private IRestClient getClient() { private void setClientVersion(BatchRequest request) { if (request != null) { - Map newConf = new HashMap<>(); - newConf.putAll(request.getConf()); + Map newConf = new HashMap<>(request.getConf()); newConf.put(VersionUtils.KYUUBI_CLIENT_VERSION_KEY, VersionUtils.getVersion()); request.setConf(newConf); } diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/IRestClient.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/IRestClient.java index 66897df54e2..0eaffebd246 100644 --- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/IRestClient.java +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/IRestClient.java @@ -32,6 +32,10 @@ public interface IRestClient extends AutoCloseable { String post(String path, String body, String authHeader); + T put(String path, String body, Class type, String authHeader); + + String put(String path, String body, String authHeader); + T delete(String path, Map params, Class type, String authHeader); String delete(String path, Map params, String authHeader); diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/KyuubiRestClient.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/KyuubiRestClient.java index dbcc89b16d3..c83eff7e0a3 100644 --- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/KyuubiRestClient.java +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/KyuubiRestClient.java @@ -30,6 +30,8 @@ public class KyuubiRestClient implements AutoCloseable, Cloneable { private RestClientConf conf; + private List hostUrls; + private List baseUrls; private ApiVersion version; @@ -77,14 +79,20 @@ public void setHostUrls(List hostUrls) { if (hostUrls.isEmpty()) { throw new IllegalArgumentException("hostUrls cannot be blank."); } + this.hostUrls = hostUrls; List baseUrls = initBaseUrls(hostUrls, version); this.httpClient = RetryableRestClient.getRestClient(baseUrls, this.conf); } + public List getHostUrls() { + return hostUrls; + } + private KyuubiRestClient() {} private KyuubiRestClient(Builder builder) { this.version = builder.version; + this.hostUrls = builder.hostUrls; this.baseUrls = initBaseUrls(builder.hostUrls, builder.version); RestClientConf conf = new RestClientConf(); diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/OperationRestApi.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/OperationRestApi.java new file mode 100644 index 00000000000..ad659a5d463 --- /dev/null +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/OperationRestApi.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.client; + +import java.util.HashMap; +import java.util.Map; +import org.apache.kyuubi.client.api.v1.dto.*; +import org.apache.kyuubi.client.util.JsonUtils; + +public class OperationRestApi { + + private KyuubiRestClient client; + + private static final String API_BASE_PATH = "operations"; + + private OperationRestApi() {} + + public OperationRestApi(KyuubiRestClient client) { + this.client = client; + } + + public KyuubiOperationEvent getOperationEvent(String operationHandleStr) { + String path = String.format("%s/%s/event", API_BASE_PATH, operationHandleStr); + return this.getClient() + .get(path, new HashMap<>(), KyuubiOperationEvent.class, client.getAuthHeader()); + } + + public String applyOperationAction(OpActionRequest request, String operationHandleStr) { + String path = String.format("%s/%s", API_BASE_PATH, operationHandleStr); + return this.getClient().put(path, JsonUtils.toJson(request), client.getAuthHeader()); + } + + public ResultSetMetaData getResultSetMetadata(String operationHandleStr) { + String path = String.format("%s/%s/resultsetmetadata", API_BASE_PATH, operationHandleStr); + return this.getClient() + .get(path, new HashMap<>(), ResultSetMetaData.class, client.getAuthHeader()); + } + + public OperationLog getOperationLog(String operationHandleStr, int maxRows) { + String path = String.format("%s/%s/log", API_BASE_PATH, operationHandleStr); + Map params = new HashMap<>(); + params.put("maxrows", maxRows); + return this.getClient().get(path, params, OperationLog.class, client.getAuthHeader()); + } + + public ResultRowSet getNextRowSet(String operationHandleStr) { + return getNextRowSet(operationHandleStr, null, null); + } + + public ResultRowSet getNextRowSet( + String operationHandleStr, String fetchOrientation, Integer maxRows) { + String path = String.format("%s/%s/rowset", API_BASE_PATH, operationHandleStr); + Map params = new HashMap<>(); + if (fetchOrientation != null) params.put("fetchorientation", fetchOrientation); + if (maxRows != null) params.put("maxrows", maxRows); + return this.getClient().get(path, params, ResultRowSet.class, client.getAuthHeader()); + } + + private IRestClient getClient() { + return this.client.getHttpClient(); + } +} diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/RestClient.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/RestClient.java index 20e57b96376..e6d1d967420 100644 --- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/RestClient.java +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/RestClient.java @@ -114,7 +114,7 @@ public T post( contentBody = new FileBody((File) payload); break; default: - throw new RuntimeException("Unsupported multi part type:" + multiPart); + throw new RuntimeException("Unsupported multi part type:" + multiPart.getType()); } entityBuilder.addPart(s, contentBody); }); @@ -126,6 +126,21 @@ public T post( return JsonUtils.fromJson(responseJson, type); } + @Override + public T put(String path, String body, Class type, String authHeader) { + String responseJson = put(path, body, authHeader); + return JsonUtils.fromJson(responseJson, type); + } + + @Override + public String put(String path, String body, String authHeader) { + RequestBuilder putRequestBuilder = RequestBuilder.put(); + if (body != null) { + putRequestBuilder.setEntity(new StringEntity(body, StandardCharsets.UTF_8)); + } + return doRequest(buildURI(path), authHeader, putRequestBuilder); + } + @Override public T delete(String path, Map params, Class type, String authHeader) { String responseJson = delete(path, params, authHeader); diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/RetryableRestClient.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/RetryableRestClient.java index dcd052acae4..d13151c2e4c 100644 --- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/RetryableRestClient.java +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/RetryableRestClient.java @@ -22,7 +22,7 @@ import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.util.List; -import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.kyuubi.client.exception.RetryableKyuubiRestException; import org.slf4j.Logger; @@ -44,7 +44,7 @@ public class RetryableRestClient implements InvocationHandler { private RetryableRestClient(List uris, RestClientConf conf) { this.conf = conf; this.uris = uris; - this.currentUriIndex = new Random(System.currentTimeMillis()).nextInt(uris.size()); + this.currentUriIndex = ThreadLocalRandom.current().nextInt(uris.size()); newRestClient(); } diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/SessionRestApi.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/SessionRestApi.java index fbb424102db..a4c3bb7ab24 100644 --- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/SessionRestApi.java +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/SessionRestApi.java @@ -20,7 +20,8 @@ import java.util.Arrays; import java.util.HashMap; import java.util.List; -import org.apache.kyuubi.client.api.v1.dto.SessionData; +import org.apache.kyuubi.client.api.v1.dto.*; +import org.apache.kyuubi.client.util.JsonUtils; public class SessionRestApi { @@ -41,6 +42,102 @@ public List listSessions() { return Arrays.asList(result); } + public SessionHandle openSession(SessionOpenRequest sessionOpenRequest) { + return this.getClient() + .post( + API_BASE_PATH, + JsonUtils.toJson(sessionOpenRequest), + SessionHandle.class, + client.getAuthHeader()); + } + + public String closeSession(String sessionHandleStr) { + String path = String.format("%s/%s", API_BASE_PATH, sessionHandleStr); + return this.getClient().delete(path, new HashMap<>(), client.getAuthHeader()); + } + + public KyuubiSessionEvent getSessionEvent(String sessionHandleStr) { + String path = String.format("%s/%s", API_BASE_PATH, sessionHandleStr); + return this.getClient() + .get(path, new HashMap<>(), KyuubiSessionEvent.class, client.getAuthHeader()); + } + + public InfoDetail getSessionInfo(String sessionHandleStr, int infoType) { + String path = String.format("%s/%s/info/%s", API_BASE_PATH, sessionHandleStr, infoType); + return this.getClient().get(path, new HashMap<>(), InfoDetail.class, client.getAuthHeader()); + } + + public int getOpenSessionCount() { + String path = String.format("%s/count", API_BASE_PATH); + return this.getClient() + .get(path, new HashMap<>(), SessionOpenCount.class, client.getAuthHeader()) + .getOpenSessionCount(); + } + + public ExecPoolStatistic getExecPoolStatistic() { + String path = String.format("%s/execPool/statistic", API_BASE_PATH); + return this.getClient() + .get(path, new HashMap<>(), ExecPoolStatistic.class, client.getAuthHeader()); + } + + public OperationHandle executeStatement(String sessionHandleStr, StatementRequest request) { + String path = String.format("%s/%s/operations/statement", API_BASE_PATH, sessionHandleStr); + return this.getClient() + .post(path, JsonUtils.toJson(request), OperationHandle.class, client.getAuthHeader()); + } + + public OperationHandle getTypeInfo(String sessionHandleStr) { + String path = String.format("%s/%s/operations/typeInfo", API_BASE_PATH, sessionHandleStr); + return this.getClient().post(path, "", OperationHandle.class, client.getAuthHeader()); + } + + public OperationHandle getCatalogs(String sessionHandleStr) { + String path = String.format("%s/%s/operations/catalogs", API_BASE_PATH, sessionHandleStr); + return this.getClient().post(path, "", OperationHandle.class, client.getAuthHeader()); + } + + public OperationHandle getSchemas(String sessionHandleStr, GetSchemasRequest request) { + String path = String.format("%s/%s/operations/schemas", API_BASE_PATH, sessionHandleStr); + return this.getClient() + .post(path, JsonUtils.toJson(request), OperationHandle.class, client.getAuthHeader()); + } + + public OperationHandle getTables(String sessionHandleStr, GetTablesRequest request) { + String path = String.format("%s/%s/operations/tables", API_BASE_PATH, sessionHandleStr); + return this.getClient() + .post(path, JsonUtils.toJson(request), OperationHandle.class, client.getAuthHeader()); + } + + public OperationHandle getTableTypes(String sessionHandleStr) { + String path = String.format("%s/%s/operations/tableTypes", API_BASE_PATH, sessionHandleStr); + return this.getClient().post(path, "", OperationHandle.class, client.getAuthHeader()); + } + + public OperationHandle getColumns(String sessionHandleStr, GetColumnsRequest request) { + String path = String.format("%s/%s/operations/columns", API_BASE_PATH, sessionHandleStr); + return this.getClient() + .post(path, JsonUtils.toJson(request), OperationHandle.class, client.getAuthHeader()); + } + + public OperationHandle getFunctions(String sessionHandleStr, GetFunctionsRequest request) { + String path = String.format("%s/%s/operations/functions", API_BASE_PATH, sessionHandleStr); + return this.getClient() + .post(path, JsonUtils.toJson(request), OperationHandle.class, client.getAuthHeader()); + } + + public OperationHandle getPrimaryKeys(String sessionHandleStr, GetPrimaryKeysRequest request) { + String path = String.format("%s/%s/operations/primaryKeys", API_BASE_PATH, sessionHandleStr); + return this.getClient() + .post(path, JsonUtils.toJson(request), OperationHandle.class, client.getAuthHeader()); + } + + public OperationHandle getCrossReference( + String sessionHandleStr, GetCrossReferenceRequest request) { + String path = String.format("%s/%s/operations/crossReference", API_BASE_PATH, sessionHandleStr); + return this.getClient() + .post(path, JsonUtils.toJson(request), OperationHandle.class, client.getAuthHeader()); + } + private IRestClient getClient() { return this.client.getHttpClient(); } diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/Count.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/Count.java new file mode 100644 index 00000000000..8f77ccd138d --- /dev/null +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/Count.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.client.api.v1.dto; + +import java.util.Objects; +import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; + +public class Count { + private Integer count; + + public Count() {} + + public Count(Integer count) { + this.count = count; + } + + public Integer getCount() { + return count; + } + + public void setCount(Integer count) { + this.count = count; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Count that = (Count) o; + return Objects.equals(getCount(), that.getCount()); + } + + @Override + public int hashCode() { + return Objects.hash(getCount()); + } + + @Override + public String toString() { + return ReflectionToStringBuilder.toString(this, ToStringStyle.JSON_STYLE); + } +} diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/KyuubiOperationEvent.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/KyuubiOperationEvent.java new file mode 100644 index 00000000000..13c40eecf78 --- /dev/null +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/KyuubiOperationEvent.java @@ -0,0 +1,343 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.client.api.v1.dto; + +import java.util.Map; + +public class KyuubiOperationEvent { + + private String statementId; + + private String remoteId; + + private String statement; + + private boolean shouldRunAsync; + + private String state; + + private long eventTime; + + private long createTime; + + private long startTime; + + private long completeTime; + + private Throwable exception; + + private String sessionId; + + private String sessionUser; + + private String sessionType; + + private String kyuubiInstance; + + private Map metrics; + + public KyuubiOperationEvent() {} + + public KyuubiOperationEvent( + String statementId, + String remoteId, + String statement, + boolean shouldRunAsync, + String state, + long eventTime, + long createTime, + long startTime, + long completeTime, + Throwable exception, + String sessionId, + String sessionUser, + String sessionType, + String kyuubiInstance, + Map metrics) { + this.statementId = statementId; + this.remoteId = remoteId; + this.statement = statement; + this.shouldRunAsync = shouldRunAsync; + this.state = state; + this.eventTime = eventTime; + this.createTime = createTime; + this.startTime = startTime; + this.completeTime = completeTime; + this.exception = exception; + this.sessionId = sessionId; + this.sessionUser = sessionUser; + this.sessionType = sessionType; + this.kyuubiInstance = kyuubiInstance; + this.metrics = metrics; + } + + public static KyuubiOperationEvent.KyuubiOperationEventBuilder builder() { + return new KyuubiOperationEvent.KyuubiOperationEventBuilder(); + } + + public static class KyuubiOperationEventBuilder { + private String statementId; + + private String remoteId; + + private String statement; + + private boolean shouldRunAsync; + + private String state; + + private long eventTime; + + private long createTime; + + private long startTime; + + private long completeTime; + + private Throwable exception; + + private String sessionId; + + private String sessionUser; + + private String sessionType; + + private String kyuubiInstance; + + private Map metrics; + + public KyuubiOperationEventBuilder() {} + + public KyuubiOperationEvent.KyuubiOperationEventBuilder statementId(final String statementId) { + this.statementId = statementId; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder remoteId(final String remoteId) { + this.remoteId = remoteId; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder statement(final String statement) { + this.statement = statement; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder shouldRunAsync( + final boolean shouldRunAsync) { + this.shouldRunAsync = shouldRunAsync; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder state(final String state) { + this.state = state; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder eventTime(final long eventTime) { + this.eventTime = eventTime; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder createTime(final long createTime) { + this.createTime = createTime; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder startTime(final long startTime) { + this.startTime = startTime; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder completeTime(final long completeTime) { + this.completeTime = completeTime; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder exception(final Throwable exception) { + this.exception = exception; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder sessionId(final String sessionId) { + this.sessionId = sessionId; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder sessionUser(final String sessionUser) { + this.sessionUser = sessionUser; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder sessionType(final String sessionType) { + this.sessionType = sessionType; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder kyuubiInstance( + final String kyuubiInstance) { + this.kyuubiInstance = kyuubiInstance; + return this; + } + + public KyuubiOperationEvent.KyuubiOperationEventBuilder metrics( + final Map metrics) { + this.metrics = metrics; + return this; + } + + public KyuubiOperationEvent build() { + return new KyuubiOperationEvent( + statementId, + remoteId, + statement, + shouldRunAsync, + state, + eventTime, + createTime, + startTime, + completeTime, + exception, + sessionId, + sessionUser, + sessionType, + kyuubiInstance, + metrics); + } + } + + public String getStatementId() { + return statementId; + } + + public void setStatementId(String statementId) { + this.statementId = statementId; + } + + public String getRemoteId() { + return remoteId; + } + + public void setRemoteId(String remoteId) { + this.remoteId = remoteId; + } + + public String getStatement() { + return statement; + } + + public void setStatement(String statement) { + this.statement = statement; + } + + public boolean isShouldRunAsync() { + return shouldRunAsync; + } + + public void setShouldRunAsync(boolean shouldRunAsync) { + this.shouldRunAsync = shouldRunAsync; + } + + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } + + public long getEventTime() { + return eventTime; + } + + public void setEventTime(long eventTime) { + this.eventTime = eventTime; + } + + public long getCreateTime() { + return createTime; + } + + public void setCreateTime(long createTime) { + this.createTime = createTime; + } + + public long getStartTime() { + return startTime; + } + + public void setStartTime(long startTime) { + this.startTime = startTime; + } + + public long getCompleteTime() { + return completeTime; + } + + public void setCompleteTime(long completeTime) { + this.completeTime = completeTime; + } + + public Throwable getException() { + return exception; + } + + public void setException(Throwable exception) { + this.exception = exception; + } + + public String getSessionId() { + return sessionId; + } + + public void setSessionId(String sessionId) { + this.sessionId = sessionId; + } + + public String getSessionUser() { + return sessionUser; + } + + public void setSessionUser(String sessionUser) { + this.sessionUser = sessionUser; + } + + public String getSessionType() { + return sessionType; + } + + public void setSessionType(String sessionType) { + this.sessionType = sessionType; + } + + public String getKyuubiInstance() { + return kyuubiInstance; + } + + public void setKyuubiInstance(String kyuubiInstance) { + this.kyuubiInstance = kyuubiInstance; + } + + public Map getMetrics() { + return metrics; + } + + public void setMetrics(Map metrics) { + this.metrics = metrics; + } +} diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/KyuubiSessionEvent.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/KyuubiSessionEvent.java new file mode 100644 index 00000000000..34d306fedb9 --- /dev/null +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/KyuubiSessionEvent.java @@ -0,0 +1,361 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.client.api.v1.dto; + +import java.util.Map; + +public class KyuubiSessionEvent { + + private String sessionId; + + private int clientVersion; + + private String sessionType; + + private String sessionName; + + private String remoteSessionId; + + private String engineId; + + private String user; + + private String clientIp; + + private String serverIp; + + private Map conf; + + private long eventTime; + + private long openedTime; + + private long startTime; + + private long endTime; + + private int totalOperations; + + private Throwable exception; + + public KyuubiSessionEvent() {} + + public KyuubiSessionEvent( + String sessionId, + int clientVersion, + String sessionType, + String sessionName, + String remoteSessionId, + String engineId, + String user, + String clientIp, + String serverIp, + Map conf, + long eventTime, + long openedTime, + long startTime, + long endTime, + int totalOperations, + Throwable exception) { + this.sessionId = sessionId; + this.clientVersion = clientVersion; + this.sessionType = sessionType; + this.sessionName = sessionName; + this.remoteSessionId = remoteSessionId; + this.engineId = engineId; + this.user = user; + this.clientIp = clientIp; + this.serverIp = serverIp; + this.conf = conf; + this.eventTime = eventTime; + this.openedTime = openedTime; + this.startTime = startTime; + this.endTime = endTime; + this.totalOperations = totalOperations; + this.exception = exception; + } + + public static KyuubiSessionEvent.KyuubiSessionEventBuilder builder() { + return new KyuubiSessionEvent.KyuubiSessionEventBuilder(); + } + + public static class KyuubiSessionEventBuilder { + private String sessionId; + + private int clientVersion; + + private String sessionType; + + private String sessionName; + + private String remoteSessionId; + + private String engineId; + + private String user; + + private String clientIp; + + private String serverIp; + + private Map conf; + + private long eventTime; + + private long openedTime; + + private long startTime; + + private long endTime; + + private int totalOperations; + + private Throwable exception; + + public KyuubiSessionEventBuilder() {} + + public KyuubiSessionEvent.KyuubiSessionEventBuilder sessionId(final String sessionId) { + this.sessionId = sessionId; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder remoteSessionId( + final String remoteSessionId) { + this.remoteSessionId = remoteSessionId; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder clientVersion(final int clientVersion) { + this.clientVersion = clientVersion; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder sessionType(final String sessionType) { + this.sessionType = sessionType; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder sessionName(final String sessionName) { + this.sessionName = sessionName; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder engineId(final String engineId) { + this.engineId = engineId; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder user(final String user) { + this.user = user; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder clientIp(final String clientIp) { + this.clientIp = clientIp; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder serverIp(final String serverIp) { + this.serverIp = serverIp; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder conf(final Map conf) { + this.conf = conf; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder eventTime(final long eventTime) { + this.eventTime = eventTime; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder openedTime(final long openedTime) { + this.openedTime = openedTime; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder startTime(final long startTime) { + this.startTime = startTime; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder endTime(final long endTime) { + this.endTime = endTime; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder totalOperations(final int totalOperations) { + this.totalOperations = totalOperations; + return this; + } + + public KyuubiSessionEvent.KyuubiSessionEventBuilder exception(final Throwable exception) { + this.exception = exception; + return this; + } + + public KyuubiSessionEvent build() { + return new KyuubiSessionEvent( + sessionId, + clientVersion, + sessionType, + sessionName, + remoteSessionId, + engineId, + user, + clientIp, + serverIp, + conf, + eventTime, + openedTime, + startTime, + endTime, + totalOperations, + exception); + } + } + + public String getSessionId() { + return sessionId; + } + + public void setSessionId(String sessionId) { + this.sessionId = sessionId; + } + + public int getClientVersion() { + return clientVersion; + } + + public void setClientVersion(int clientVersion) { + this.clientVersion = clientVersion; + } + + public String getSessionType() { + return sessionType; + } + + public void setSessionType(String sessionType) { + this.sessionType = sessionType; + } + + public String getSessionName() { + return sessionName; + } + + public void setSessionName(String sessionName) { + this.sessionName = sessionName; + } + + public String getRemoteSessionId() { + return remoteSessionId; + } + + public void setRemoteSessionId(String remoteSessionId) { + this.remoteSessionId = remoteSessionId; + } + + public String getEngineId() { + return engineId; + } + + public void setEngineId(String engineId) { + this.engineId = engineId; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public String getClientIp() { + return clientIp; + } + + public void setClientIp(String clientIp) { + this.clientIp = clientIp; + } + + public String getServerIp() { + return serverIp; + } + + public void setServerIp(String serverIp) { + this.serverIp = serverIp; + } + + public Map getConf() { + return conf; + } + + public void setConf(Map conf) { + this.conf = conf; + } + + public long getEventTime() { + return eventTime; + } + + public void setEventTime(long eventTime) { + this.eventTime = eventTime; + } + + public long getOpenedTime() { + return openedTime; + } + + public void setOpenedTime(long openedTime) { + this.openedTime = openedTime; + } + + public long getStartTime() { + return startTime; + } + + public void setStartTime(long startTime) { + this.startTime = startTime; + } + + public long getEndTime() { + return endTime; + } + + public void setEndTime(long endTime) { + this.endTime = endTime; + } + + public int getTotalOperations() { + return totalOperations; + } + + public void setTotalOperations(int totalOperations) { + this.totalOperations = totalOperations; + } + + public Throwable getException() { + return exception; + } + + public void setException(Throwable exception) { + this.exception = exception; + } +} diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/OperationData.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/OperationData.java new file mode 100644 index 00000000000..70c2dd3f3a1 --- /dev/null +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/OperationData.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.client.api.v1.dto; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; + +public class OperationData { + private String identifier; + private String statement; + private String state; + private Long createTime; + private Long startTime; + private Long completeTime; + private String exception; + private String sessionId; + private String sessionUser; + private String sessionType; + private String kyuubiInstance; + private Map metrics; + + public OperationData() {} + + public OperationData( + String identifier, + String statement, + String state, + Long createTime, + Long startTime, + Long completeTime, + String exception, + String sessionId, + String sessionUser, + String sessionType, + String kyuubiInstance, + Map metrics) { + this.identifier = identifier; + this.statement = statement; + this.state = state; + this.createTime = createTime; + this.startTime = startTime; + this.completeTime = completeTime; + this.exception = exception; + this.sessionId = sessionId; + this.sessionUser = sessionUser; + this.sessionType = sessionType; + this.kyuubiInstance = kyuubiInstance; + this.metrics = metrics; + } + + public String getIdentifier() { + return identifier; + } + + public void setIdentifier(String identifier) { + this.identifier = identifier; + } + + public String getStatement() { + return statement; + } + + public void setStatement(String statement) { + this.statement = statement; + } + + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } + + public Long getCreateTime() { + return createTime; + } + + public void setCreateTime(Long createTime) { + this.createTime = createTime; + } + + public Long getStartTime() { + return startTime; + } + + public void setStartTime(Long startTime) { + this.startTime = startTime; + } + + public Long getCompleteTime() { + return completeTime; + } + + public void setCompleteTime(Long completeTime) { + this.completeTime = completeTime; + } + + public String getException() { + return exception; + } + + public void setException(String exception) { + this.exception = exception; + } + + public String getSessionId() { + return sessionId; + } + + public void setSessionId(String sessionId) { + this.sessionId = sessionId; + } + + public String getSessionUser() { + return sessionUser; + } + + public void setSessionUser(String sessionUser) { + this.sessionUser = sessionUser; + } + + public String getSessionType() { + return sessionType; + } + + public void setSessionType(String sessionType) { + this.sessionType = sessionType; + } + + public String getKyuubiInstance() { + return kyuubiInstance; + } + + public void setKyuubiInstance(String kyuubiInstance) { + this.kyuubiInstance = kyuubiInstance; + } + + public Map getMetrics() { + if (null == metrics) { + return Collections.emptyMap(); + } + return metrics; + } + + public void setMetrics(Map metrics) { + this.metrics = metrics; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + OperationData that = (OperationData) o; + return Objects.equals(getIdentifier(), that.getIdentifier()); + } + + @Override + public int hashCode() { + return Objects.hash(getIdentifier()); + } + + @Override + public String toString() { + return ReflectionToStringBuilder.toString(this, ToStringStyle.JSON_STYLE); + } +} diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/OperationHandle.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/OperationHandle.java new file mode 100644 index 00000000000..394e6c157c7 --- /dev/null +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/OperationHandle.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.client.api.v1.dto; + +import java.util.Objects; +import java.util.UUID; +import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; + +public class OperationHandle { + + private UUID identifier; + + public OperationHandle() {} + + public OperationHandle(UUID identifier) { + this.identifier = identifier; + } + + public UUID getIdentifier() { + return identifier; + } + + public void setIdentifier(UUID identifier) { + this.identifier = identifier; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + OperationHandle that = (OperationHandle) o; + return Objects.equals(identifier, that.identifier); + } + + @Override + public int hashCode() { + return Objects.hash(identifier); + } + + @Override + public String toString() { + return ReflectionToStringBuilder.toString(this, ToStringStyle.JSON_STYLE); + } +} diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/ServerData.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/ServerData.java new file mode 100644 index 00000000000..7b68763d28b --- /dev/null +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/ServerData.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.client.api.v1.dto; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +public class ServerData { + private String nodeName; + private String namespace; + private String instance; + private String host; + private int port; + private Map attributes; + private String status; + + public ServerData() {} + + public ServerData( + String nodeName, + String namespace, + String instance, + String host, + int port, + Map attributes, + String status) { + this.nodeName = nodeName; + this.namespace = namespace; + this.instance = instance; + this.host = host; + this.port = port; + this.attributes = attributes; + this.status = status; + } + + public String getNodeName() { + return nodeName; + } + + public ServerData setNodeName(String nodeName) { + this.nodeName = nodeName; + return this; + } + + public String getNamespace() { + return namespace; + } + + public ServerData setNamespace(String namespace) { + this.namespace = namespace; + return this; + } + + public String getInstance() { + return instance; + } + + public ServerData setInstance(String instance) { + this.instance = instance; + return this; + } + + public String getHost() { + return host; + } + + public ServerData setHost(String host) { + this.host = host; + return this; + } + + public int getPort() { + return port; + } + + public ServerData setPort(int port) { + this.port = port; + return this; + } + + public Map getAttributes() { + if (null == attributes) { + return Collections.emptyMap(); + } + return attributes; + } + + public ServerData setAttributes(Map attributes) { + this.attributes = attributes; + return this; + } + + public String getStatus() { + return status; + } + + public ServerData setStatus(String status) { + this.status = status; + return this; + } + + @Override + public int hashCode() { + return Objects.hash(nodeName, namespace, instance, port, attributes, status); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + ServerData server = (ServerData) obj; + return port == server.port + && Objects.equals(nodeName, server.nodeName) + && Objects.equals(namespace, server.namespace) + && Objects.equals(instance, server.instance) + && Objects.equals(host, server.host) + && Objects.equals(status, server.status); + } +} diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/SessionData.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/SessionData.java index bae6f39dabd..ae7dfdec984 100644 --- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/SessionData.java +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/SessionData.java @@ -31,6 +31,10 @@ public class SessionData { private Long createTime; private Long duration; private Long idleTime; + private String exception; + private String sessionType; + private String kyuubiInstance; + private String engineId; public SessionData() {} @@ -41,7 +45,11 @@ public SessionData( Map conf, Long createTime, Long duration, - Long idleTime) { + Long idleTime, + String exception, + String sessionType, + String kyuubiInstance, + String engineId) { this.identifier = identifier; this.user = user; this.ipAddr = ipAddr; @@ -49,6 +57,10 @@ public SessionData( this.createTime = createTime; this.duration = duration; this.idleTime = idleTime; + this.exception = exception; + this.sessionType = sessionType; + this.kyuubiInstance = kyuubiInstance; + this.engineId = engineId; } public String getIdentifier() { @@ -110,6 +122,38 @@ public void setIdleTime(Long idleTime) { this.idleTime = idleTime; } + public String getException() { + return exception; + } + + public void setException(String exception) { + this.exception = exception; + } + + public String getSessionType() { + return sessionType; + } + + public void setSessionType(String sessionType) { + this.sessionType = sessionType; + } + + public String getKyuubiInstance() { + return kyuubiInstance; + } + + public void setKyuubiInstance(String kyuubiInstance) { + this.kyuubiInstance = kyuubiInstance; + } + + public String getEngineId() { + return engineId; + } + + public void setEngineId(String engineId) { + this.engineId = engineId; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/SessionOpenRequest.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/SessionOpenRequest.java index 2d23aac5717..06eb29e9723 100644 --- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/SessionOpenRequest.java +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/SessionOpenRequest.java @@ -24,24 +24,14 @@ import org.apache.commons.lang3.builder.ToStringStyle; public class SessionOpenRequest { - private int protocolVersion; private Map configs; public SessionOpenRequest() {} - public SessionOpenRequest(int protocolVersion, Map configs) { - this.protocolVersion = protocolVersion; + public SessionOpenRequest(Map configs) { this.configs = configs; } - public int getProtocolVersion() { - return protocolVersion; - } - - public void setProtocolVersion(int protocolVersion) { - this.protocolVersion = protocolVersion; - } - public Map getConfigs() { if (null == configs) { return Collections.emptyMap(); @@ -58,13 +48,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; SessionOpenRequest that = (SessionOpenRequest) o; - return getProtocolVersion() == that.getProtocolVersion() - && Objects.equals(getConfigs(), that.getConfigs()); + return Objects.equals(getConfigs(), that.getConfigs()); } @Override public int hashCode() { - return Objects.hash(getProtocolVersion(), getConfigs()); + return Objects.hash(getConfigs()); } @Override diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/StatementRequest.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/StatementRequest.java index 436017f3c1e..f2dc060d5ec 100644 --- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/StatementRequest.java +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/StatementRequest.java @@ -17,6 +17,8 @@ package org.apache.kyuubi.client.api.v1.dto; +import java.util.Collections; +import java.util.Map; import java.util.Objects; import org.apache.commons.lang3.builder.ReflectionToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; @@ -25,13 +27,20 @@ public class StatementRequest { private String statement; private boolean runAsync; private Long queryTimeout; + private Map confOverlay; public StatementRequest() {} public StatementRequest(String statement, boolean runAsync, Long queryTimeout) { + this(statement, runAsync, queryTimeout, Collections.emptyMap()); + } + + public StatementRequest( + String statement, boolean runAsync, Long queryTimeout, Map confOverlay) { this.statement = statement; this.runAsync = runAsync; this.queryTimeout = queryTimeout; + this.confOverlay = confOverlay; } public String getStatement() { @@ -58,6 +67,17 @@ public void setQueryTimeout(Long queryTimeout) { this.queryTimeout = queryTimeout; } + public Map getConfOverlay() { + if (confOverlay == null) { + return Collections.emptyMap(); + } + return confOverlay; + } + + public void setConfOverlay(Map confOverlay) { + this.confOverlay = confOverlay; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/auth/SpnegoAuthHeaderGenerator.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/auth/SpnegoAuthHeaderGenerator.java index 435a850142f..c66c6465ed1 100644 --- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/auth/SpnegoAuthHeaderGenerator.java +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/auth/SpnegoAuthHeaderGenerator.java @@ -17,13 +17,13 @@ package org.apache.kyuubi.client.auth; -import java.lang.reflect.Field; -import java.lang.reflect.Method; import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.Base64; import javax.security.auth.Subject; import org.apache.kyuubi.client.exception.KyuubiRestException; +import org.apache.kyuubi.util.reflect.DynFields; +import org.apache.kyuubi.util.reflect.DynMethods; import org.ietf.jgss.GSSContext; import org.ietf.jgss.GSSException; import org.ietf.jgss.GSSManager; @@ -61,13 +61,17 @@ public String generateAuthHeader() { private String generateToken(String server) throws Exception { Subject subject; try { - Class ugiClz = Class.forName(UGI_CLASS); - Method ugiGetCurrentUserMethod = ugiClz.getDeclaredMethod("getCurrentUser"); - Object ugiCurrentUser = ugiGetCurrentUserMethod.invoke(null); + Object ugiCurrentUser = + DynMethods.builder("getCurrentUser") + .hiddenImpl(Class.forName(UGI_CLASS)) + .buildStaticChecked() + .invoke(); LOG.debug("The user credential is {}", ugiCurrentUser); - Field ugiSubjectField = ugiCurrentUser.getClass().getDeclaredField("subject"); - ugiSubjectField.setAccessible(true); - subject = (Subject) ugiSubjectField.get(ugiCurrentUser); + subject = + DynFields.builder() + .hiddenImpl(ugiCurrentUser.getClass(), "subject") + .buildChecked(ugiCurrentUser) + .get(); } catch (ClassNotFoundException e) { // TODO do kerberos authentication using JDK class directly LOG.error("Hadoop UGI class {} is required for SPNEGO authentication.", UGI_CLASS); diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/util/VersionUtils.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/util/VersionUtils.java index bcabca5b9f8..1f8cedf4b0e 100644 --- a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/util/VersionUtils.java +++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/util/VersionUtils.java @@ -31,7 +31,10 @@ public static synchronized String getVersion() { if (KYUUBI_CLIENT_VERSION == null) { try { Properties prop = new Properties(); - prop.load(VersionUtils.class.getClassLoader().getResourceAsStream("version.properties")); + prop.load( + VersionUtils.class + .getClassLoader() + .getResourceAsStream("org/apache/kyuubi/version.properties")); KYUUBI_CLIENT_VERSION = prop.getProperty(KYUUBI_CLIENT_VERSION_KEY, "unknown"); } catch (Exception e) { LOG.error("Error getting kyuubi client version", e); diff --git a/kyuubi-rest-client/src/main/resources/version.properties b/kyuubi-rest-client/src/main/resources/org/apache/kyuubi/version.properties similarity index 100% rename from kyuubi-rest-client/src/main/resources/version.properties rename to kyuubi-rest-client/src/main/resources/org/apache/kyuubi/version.properties diff --git a/kyuubi-server/pom.xml b/kyuubi-server/pom.xml index aebce5ddaa8..a8b133d2792 100644 --- a/kyuubi-server/pom.xml +++ b/kyuubi-server/pom.xml @@ -21,10 +21,10 @@ org.apache.kyuubi kyuubi-parent - 1.8.0-SNAPSHOT + 1.9.0-SNAPSHOT - kyuubi-server_2.12 + kyuubi-server_${scala.binary.version} jar Kyuubi Project Server https://kyuubi.apache.org/ @@ -36,6 +36,12 @@ ${project.version} + + org.apache.kyuubi + kyuubi-hive-jdbc + ${project.version} + + org.apache.kyuubi kyuubi-events_${scala.binary.version} @@ -92,6 +98,11 @@ kubernetes-client + + io.fabric8 + kubernetes-httpclient-okhttp + + org.apache.hive hive-metastore @@ -241,11 +252,21 @@ derby + + org.xerial + sqlite-jdbc + + io.trino trino-client + + org.eclipse.jetty + jetty-proxy + + org.glassfish.jersey.test-framework jersey-test-framework-core @@ -384,6 +405,23 @@ swagger-ui + + org.apache.kafka + kafka-clients + + + + com.dimafeng + testcontainers-scala-scalatest_${scala.binary.version} + test + + + + com.dimafeng + testcontainers-scala-kafka_${scala.binary.version} + test + + org.apache.hive hive-exec @@ -416,42 +454,6 @@ test - - org.apache.spark - spark-avro_${scala.binary.version} - test - - - - org.apache.parquet - parquet-avro - test - - - - org.apache.hudi - hudi-common - test - - - - org.apache.hudi - hudi-spark-common_${scala.binary.version} - test - - - - org.apache.hudi - hudi-spark_${scala.binary.version} - test - - - - org.apache.hudi - hudi-spark3.1.x_${scala.binary.version} - test - - io.delta delta-core_${scala.binary.version} @@ -484,7 +486,7 @@ org.scalatestplus - mockito-4-6_${scala.binary.version} + mockito-4-11_${scala.binary.version} test diff --git a/kyuubi-server/src/main/antlr4/org/apache/kyuubi/sql/KyuubiTrinoFeBaseLexer.g4 b/kyuubi-server/src/main/antlr4/org/apache/kyuubi/sql/KyuubiTrinoFeBaseLexer.g4 index 0cc02de6435..83810a073c7 100644 --- a/kyuubi-server/src/main/antlr4/org/apache/kyuubi/sql/KyuubiTrinoFeBaseLexer.g4 +++ b/kyuubi-server/src/main/antlr4/org/apache/kyuubi/sql/KyuubiTrinoFeBaseLexer.g4 @@ -43,6 +43,10 @@ FALSE: 'FALSE'; LIKE: 'LIKE'; IN: 'IN'; WHERE: 'WHERE'; +EXECUTE: 'EXECUTE'; +PREPARE: 'PREPARE'; +DEALLOCATE: 'DEALLOCATE'; +USING: 'USING'; ESCAPE: 'ESCAPE'; AUTO_INCREMENT: 'AUTO_INCREMENT'; @@ -98,7 +102,16 @@ SOURCE_DATA_TYPE: 'SOURCE_DATA_TYPE'; IS_AUTOINCREMENT: 'IS_AUTOINCREMENT'; IS_GENERATEDCOLUMN: 'IS_GENERATEDCOLUMN'; VARCHAR: 'VARCHAR'; +TINYINT: 'TINYINT'; SMALLINT: 'SMALLINT'; +INTEGER: 'INTEGER'; +BIGINT: 'BIGINT'; +REAL: 'REAL'; +DOUBLE: 'DOUBLE'; +DECIMAL: 'DECIMAL'; +DATE: 'DATE'; +TIME: 'TIME'; +TIMESTAMP: 'TIMESTAMP'; CAST: 'CAST'; AS: 'AS'; KEY_SEQ: 'KEY_SEQ'; @@ -114,6 +127,10 @@ STRING : '\'' ( ~'\'' | '\'\'' )* '\'' ; +STRING_MARK + : '\'' + ; + SIMPLE_COMMENT : '--' ~[\r\n]* '\r'? '\n'? -> channel(HIDDEN) ; @@ -125,6 +142,10 @@ BRACKETED_COMMENT WS : [ \r\n\t]+ -> channel(HIDDEN) ; +IDENTIFIER + : [A-Za-z_$0-9\u0080-\uFFFF]*?[A-Za-z_$\u0080-\uFFFF]+?[A-Za-z_$0-9\u0080-\uFFFF]* + ; + // Catch-all for anything we can't recognize. // We use this to be able to ignore and recover all the text // when splitting statements with DelimiterLexer diff --git a/kyuubi-server/src/main/antlr4/org/apache/kyuubi/sql/KyuubiTrinoFeBaseParser.g4 b/kyuubi-server/src/main/antlr4/org/apache/kyuubi/sql/KyuubiTrinoFeBaseParser.g4 index 6af00af5de5..72811e59231 100644 --- a/kyuubi-server/src/main/antlr4/org/apache/kyuubi/sql/KyuubiTrinoFeBaseParser.g4 +++ b/kyuubi-server/src/main/antlr4/org/apache/kyuubi/sql/KyuubiTrinoFeBaseParser.g4 @@ -54,9 +54,20 @@ statement CAST LEFT_PAREN NULL AS SMALLINT RIGHT_PAREN KEY_SEQ COMMA CAST LEFT_PAREN NULL AS VARCHAR RIGHT_PAREN PK_NAME WHERE FALSE #getPrimaryKeys + | EXECUTE IDENTIFIER (USING parameterList)? #execute + | PREPARE IDENTIFIER FROM statement #prepare + | DEALLOCATE PREPARE IDENTIFIER #deallocate | .*? #passThrough ; +anyStr + : ( ~',' )* + ; + +parameterList + : (TINYINT|SMALLINT|INTEGER|BIGINT|DOUBLE|REAL|DECIMAL|DATE|TIME|TIMESTAMP)? anyStr (',' (TINYINT|SMALLINT|INTEGER|BIGINT|DOUBLE|REAL|DECIMAL|DATE|TIME|TIMESTAMP)? anyStr)* + ; + tableCatalogFilter : (TABLE_CAT | TABLE_CATALOG) IS NULL #nullCatalog | (TABLE_CAT | TABLE_CATALOG) EQ catalog=STRING+ #catalogFilter diff --git a/kyuubi-server/src/main/resources/sql/derby/003-KYUUBI-5078.derby.sql b/kyuubi-server/src/main/resources/sql/derby/003-KYUUBI-5078.derby.sql new file mode 100644 index 00000000000..dfdfe6069d0 --- /dev/null +++ b/kyuubi-server/src/main/resources/sql/derby/003-KYUUBI-5078.derby.sql @@ -0,0 +1 @@ +ALTER TABLE metadata ALTER COLUMN kyuubi_instance DROP NOT NULL; diff --git a/kyuubi-server/src/main/resources/sql/derby/004-KYUUBI-5131.derby.sql b/kyuubi-server/src/main/resources/sql/derby/004-KYUUBI-5131.derby.sql new file mode 100644 index 00000000000..6a3142ffd3d --- /dev/null +++ b/kyuubi-server/src/main/resources/sql/derby/004-KYUUBI-5131.derby.sql @@ -0,0 +1 @@ +CREATE INDEX metadata_create_time_index ON metadata(create_time); diff --git a/kyuubi-server/src/main/resources/sql/derby/metadata-store-schema-1.8.0.derby.sql b/kyuubi-server/src/main/resources/sql/derby/metadata-store-schema-1.8.0.derby.sql new file mode 100644 index 00000000000..8d333bda2bd --- /dev/null +++ b/kyuubi-server/src/main/resources/sql/derby/metadata-store-schema-1.8.0.derby.sql @@ -0,0 +1,38 @@ +-- Derby does not support `CREATE TABLE IF NOT EXISTS` + +-- the metadata table ddl + +CREATE TABLE metadata( + key_id bigint PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, -- the auto increment key id + identifier varchar(36) NOT NULL, -- the identifier id, which is an UUID + session_type varchar(32) NOT NULL, -- the session type, SQL or BATCH + real_user varchar(255) NOT NULL, -- the real user + user_name varchar(255) NOT NULL, -- the user name, might be a proxy user + ip_address varchar(128), -- the client ip address + kyuubi_instance varchar(1024), -- the kyuubi instance that creates this + state varchar(128) NOT NULL, -- the session state + resource varchar(1024), -- the main resource + class_name varchar(1024), -- the main class name + request_name varchar(1024), -- the request name + request_conf clob, -- the request config map + request_args clob, -- the request arguments + create_time BIGINT NOT NULL, -- the metadata create time + engine_type varchar(32) NOT NULL, -- the engine type + cluster_manager varchar(128), -- the engine cluster manager + engine_open_time bigint, -- the engine open time + engine_id varchar(128), -- the engine application id + engine_name clob, -- the engine application name + engine_url varchar(1024), -- the engine tracking url + engine_state varchar(32), -- the engine application state + engine_error clob, -- the engine application diagnose + end_time bigint, -- the metadata end time + peer_instance_closed boolean default FALSE -- closed by peer kyuubi instance +); + +CREATE UNIQUE INDEX metadata_unique_identifier_index ON metadata(identifier); + +CREATE INDEX metadata_user_name_index ON metadata(user_name); + +CREATE INDEX metadata_engine_type_index ON metadata(engine_type); + +CREATE INDEX metadata_create_time_index ON metadata(create_time); diff --git a/kyuubi-server/src/main/resources/sql/derby/upgrade-1.7.0-to-1.8.0.derby.sql b/kyuubi-server/src/main/resources/sql/derby/upgrade-1.7.0-to-1.8.0.derby.sql new file mode 100644 index 00000000000..234510665f8 --- /dev/null +++ b/kyuubi-server/src/main/resources/sql/derby/upgrade-1.7.0-to-1.8.0.derby.sql @@ -0,0 +1,2 @@ +RUN '003-KYUUBI-5078.derby.sql'; +RUN '004-KYUUBI-5131.derby.sql'; diff --git a/kyuubi-server/src/main/resources/sql/mysql/003-KYUUBI-5078.mysql.sql b/kyuubi-server/src/main/resources/sql/mysql/003-KYUUBI-5078.mysql.sql new file mode 100644 index 00000000000..1d730cd4cf2 --- /dev/null +++ b/kyuubi-server/src/main/resources/sql/mysql/003-KYUUBI-5078.mysql.sql @@ -0,0 +1,3 @@ +SELECT '< KYUUBI-5078: Make kyuubi_instance nullable in metadata table schema' AS ' '; + +ALTER TABLE metadata MODIFY kyuubi_instance varchar(1024) COMMENT 'the kyuubi instance that creates this'; diff --git a/kyuubi-server/src/main/resources/sql/mysql/004-KYUUBI-5131.mysql.sql b/kyuubi-server/src/main/resources/sql/mysql/004-KYUUBI-5131.mysql.sql new file mode 100644 index 00000000000..e743fc3d73e --- /dev/null +++ b/kyuubi-server/src/main/resources/sql/mysql/004-KYUUBI-5131.mysql.sql @@ -0,0 +1,3 @@ +SELECT '< KYUUBI-5131: Create index on metastore.create_time' AS ' '; + +ALTER TABLE metadata ADD INDEX create_time_index(create_time); diff --git a/kyuubi-server/src/main/resources/sql/mysql/metadata-store-schema-1.8.0.mysql.sql b/kyuubi-server/src/main/resources/sql/mysql/metadata-store-schema-1.8.0.mysql.sql new file mode 100644 index 00000000000..77df8fa0562 --- /dev/null +++ b/kyuubi-server/src/main/resources/sql/mysql/metadata-store-schema-1.8.0.mysql.sql @@ -0,0 +1,32 @@ +-- the metadata table ddl + +CREATE TABLE IF NOT EXISTS metadata( + key_id bigint PRIMARY KEY AUTO_INCREMENT COMMENT 'the auto increment key id', + identifier varchar(36) NOT NULL COMMENT 'the identifier id, which is an UUID', + session_type varchar(32) NOT NULL COMMENT 'the session type, SQL or BATCH', + real_user varchar(255) NOT NULL COMMENT 'the real user', + user_name varchar(255) NOT NULL COMMENT 'the user name, might be a proxy user', + ip_address varchar(128) COMMENT 'the client ip address', + kyuubi_instance varchar(1024) COMMENT 'the kyuubi instance that creates this', + state varchar(128) NOT NULL COMMENT 'the session state', + resource varchar(1024) COMMENT 'the main resource', + class_name varchar(1024) COMMENT 'the main class name', + request_name varchar(1024) COMMENT 'the request name', + request_conf mediumtext COMMENT 'the request config map', + request_args mediumtext COMMENT 'the request arguments', + create_time BIGINT NOT NULL COMMENT 'the metadata create time', + engine_type varchar(32) NOT NULL COMMENT 'the engine type', + cluster_manager varchar(128) COMMENT 'the engine cluster manager', + engine_open_time bigint COMMENT 'the engine open time', + engine_id varchar(128) COMMENT 'the engine application id', + engine_name mediumtext COMMENT 'the engine application name', + engine_url varchar(1024) COMMENT 'the engine tracking url', + engine_state varchar(32) COMMENT 'the engine application state', + engine_error mediumtext COMMENT 'the engine application diagnose', + end_time bigint COMMENT 'the metadata end time', + peer_instance_closed boolean default '0' COMMENT 'closed by peer kyuubi instance', + UNIQUE INDEX unique_identifier_index(identifier), + INDEX user_name_index(user_name), + INDEX engine_type_index(engine_type), + INDEX create_time_index(create_time) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; diff --git a/kyuubi-server/src/main/resources/sql/mysql/upgrade-1.7.0-to-1.8.0.mysql.sql b/kyuubi-server/src/main/resources/sql/mysql/upgrade-1.7.0-to-1.8.0.mysql.sql new file mode 100644 index 00000000000..473997448ba --- /dev/null +++ b/kyuubi-server/src/main/resources/sql/mysql/upgrade-1.7.0-to-1.8.0.mysql.sql @@ -0,0 +1,4 @@ +SELECT '< Upgrading MetaStore schema from 1.7.0 to 1.8.0 >' AS ' '; +SOURCE 003-KYUUBI-5078.mysql.sql; +SOURCE 004-KYUUBI-5131.mysql.sql; +SELECT '< Finished upgrading MetaStore schema from 1.7.0 to 1.8.0 >' AS ' '; diff --git a/kyuubi-server/src/main/resources/sql/sqlite/README b/kyuubi-server/src/main/resources/sql/sqlite/README new file mode 100644 index 00000000000..de15931f552 --- /dev/null +++ b/kyuubi-server/src/main/resources/sql/sqlite/README @@ -0,0 +1,82 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +Kyuubi MetaStore Upgrade HowTo +============================== + +This document describes how to upgrade the schema of a SQLite backed +Kyuubi MetaStore instance from one release version of Kyuubi to another +release version of Kyuubi. For example, by following the steps listed +below it is possible to upgrade a Kyuubi 1.8.0 MetaStore schema to a +Kyuubi 1.9.0 MetaStore schema. Before attempting this project we +strongly recommend that you read through all of the steps in this +document and familiarize yourself with the required tools. + +MetaStore Upgrade Steps +======================= + +1) Shutdown your MetaStore instance and restrict access to the + MetaStore's SQLite database. It is very important that no one else + accesses or modifies the contents of database while you are + performing the schema upgrade. + +2) Create a backup of your SQLite metastore database. This will allow + you to revert any changes made during the upgrade process if + something goes wrong. The `sqlite3` command is the easiest way to + create a backup of a SQLite database: + + % sqlite3 .db '.backup _backup.db' + +3) Dump your metastore database schema to a file. We use the `sqlite3` + utility again, but this time with a command line option that + specifies we are only interested in dumping the DDL statements + required to create the schema: + + % sqlite3 .db '.schema' > schema-x.y.z.sqlite.sql + +4) The schema upgrade scripts assume that the schema you are upgrading + closely matches the official schema for your particular version of + Kyuubi. The files in this directory with names like + "metadata-store-schema-x.y.z.sqlite.sql" contain dumps of the official schemas + corresponding to each of the released versions of Kyuubi. You can + determine differences between your schema and the official schema + by diffing the contents of the official dump with the schema dump + you created in the previous step. Some differences are acceptable + and will not interfere with the upgrade process, but others need to + be resolved manually or the upgrade scripts will fail to complete. + +5) You are now ready to run the schema upgrade scripts. If you are + upgrading from Kyuubi 1.8.0 to Kyuubi 1.9.0 you need to run the + upgrade-1.8.0-to-1.9.0.sqlite.sql script, but if you are upgrading + from 1.8.0 to 2.0.0 you will need to run the 1.8.0 to 1.9.0 upgrade + script followed by the 1.9.0 to 2.0.0 upgrade script. + + % sqlite3 .db + sqlite> .read upgrade-1.8.0-to-1.9.0.sqlite.sql + sqlite> .read upgrade-1.9.0-to-2.0.0.sqlite.sql + + These scripts should run to completion without any errors. If you + do encounter errors you need to analyze the cause and attempt to + trace it back to one of the preceding steps. + +6) The final step of the upgrade process is validating your freshly + upgraded schema against the official schema for your particular + version of Kyuubi. This is accomplished by repeating steps (3) and + (4), but this time comparing against the official version of the + upgraded schema, e.g. if you upgraded the schema to Kyuubi 1.9.0 then + you will want to compare your schema dump against the contents of + metadata-store-schema-1.9.0.sqlite.sql diff --git a/kyuubi-server/src/main/resources/sql/sqlite/metadata-store-schema-1.8.0.sqlite.sql b/kyuubi-server/src/main/resources/sql/sqlite/metadata-store-schema-1.8.0.sqlite.sql new file mode 100644 index 00000000000..656de6e5d62 --- /dev/null +++ b/kyuubi-server/src/main/resources/sql/sqlite/metadata-store-schema-1.8.0.sqlite.sql @@ -0,0 +1,36 @@ +-- the metadata table ddl + +CREATE TABLE IF NOT EXISTS metadata( + key_id INTEGER PRIMARY KEY AUTOINCREMENT, -- the auto increment key id + identifier varchar(36) NOT NULL, -- the identifier id, which is an UUID + session_type varchar(32) NOT NULL, -- the session type, SQL or BATCH + real_user varchar(255) NOT NULL, -- the real user + user_name varchar(255) NOT NULL, -- the user name, might be a proxy user + ip_address varchar(128), -- the client ip address + kyuubi_instance varchar(1024), -- the kyuubi instance that creates this + state varchar(128) NOT NULL, -- the session state + resource varchar(1024), -- the main resource + class_name varchar(1024), -- the main class name + request_name varchar(1024), -- the request name + request_conf mediumtext, -- the request config map + request_args mediumtext, -- the request arguments + create_time BIGINT NOT NULL, -- the metadata create time + engine_type varchar(32) NOT NULL, -- the engine type + cluster_manager varchar(128), -- the engine cluster manager + engine_open_time bigint, -- the engine open time + engine_id varchar(128), -- the engine application id + engine_name mediumtext, -- the engine application name + engine_url varchar(1024), -- the engine tracking url + engine_state varchar(32), -- the engine application state + engine_error mediumtext, -- the engine application diagnose + end_time bigint, -- the metadata end time + peer_instance_closed boolean default '0' -- closed by peer kyuubi instance +); + +CREATE UNIQUE INDEX IF NOT EXISTS metadata_unique_identifier_index ON metadata(identifier); + +CREATE INDEX IF NOT EXISTS metadata_user_name_index ON metadata(user_name); + +CREATE INDEX IF NOT EXISTS metadata_engine_type_index ON metadata(engine_type); + +CREATE INDEX IF NOT EXISTS metadata_create_time_index ON metadata(create_time); diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/client/KyuubiSyncThriftClient.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/client/KyuubiSyncThriftClient.scala index 12a4c824ca8..ad7191c090c 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/client/KyuubiSyncThriftClient.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/client/KyuubiSyncThriftClient.scala @@ -52,6 +52,8 @@ class KyuubiSyncThriftClient private ( @volatile private var _engineUrl: Option[String] = _ @volatile private var _engineName: Option[String] = _ + private[kyuubi] def engineConnectionClosed: Boolean = !protocol.getTransport.isOpen + private val lock = new ReentrantLock() // Visible for testing. @@ -59,11 +61,14 @@ class KyuubiSyncThriftClient private ( @volatile private var _aliveProbeSessionHandle: TSessionHandle = _ @volatile private var remoteEngineBroken: Boolean = false + @volatile private var clientClosedByAliveProbe: Boolean = false private val engineAliveProbeClient = engineAliveProbeProtocol.map(new TCLIService.Client(_)) private var engineAliveThreadPool: ScheduledExecutorService = _ @volatile private var engineLastAlive: Long = _ - private var asyncRequestExecutor: ExecutorService = _ + private lazy val asyncRequestExecutor: ExecutorService = + ThreadUtils.newDaemonSingleThreadScheduledExecutor( + "async-request-executor-" + SessionHandle(_remoteSessionHandle)) @VisibleForTesting @volatile private[kyuubi] var asyncRequestInterrupted: Boolean = false @@ -71,11 +76,6 @@ class KyuubiSyncThriftClient private ( @VisibleForTesting private[kyuubi] def getEngineAliveProbeProtocol: Option[TProtocol] = engineAliveProbeProtocol - private def newAsyncRequestExecutor(): ExecutorService = { - ThreadUtils.newDaemonSingleThreadScheduledExecutor( - "async-request-executor-" + _remoteSessionHandle) - } - private def shutdownAsyncRequestExecutor(): Unit = { Option(asyncRequestExecutor).filterNot(_.isShutdown).foreach(ThreadUtils.shutdown(_)) asyncRequestInterrupted = true @@ -86,7 +86,7 @@ class KyuubiSyncThriftClient private ( "engine-alive-probe-" + _aliveProbeSessionHandle) val task = new Runnable { override def run(): Unit = { - if (!remoteEngineBroken) { + if (!remoteEngineBroken && !engineConnectionClosed) { engineAliveProbeClient.foreach { client => val tGetInfoReq = new TGetInfoReq() tGetInfoReq.setSessionHandle(_aliveProbeSessionHandle) @@ -108,7 +108,19 @@ class KyuubiSyncThriftClient private ( } } } else { + warn(s"Removing Clients for ${_remoteSessionHandle}") + Seq(protocol).union(engineAliveProbeProtocol.toSeq).foreach { tProtocol => + Utils.tryLogNonFatalError { + if (tProtocol.getTransport.isOpen) { + tProtocol.getTransport.close() + } + } + } + clientClosedByAliveProbe = true shutdownAsyncRequestExecutor() + Option(engineAliveThreadPool).foreach { pool => + ThreadUtils.shutdown(pool, Duration(engineAliveProbeInterval, TimeUnit.MILLISECONDS)) + } } } } @@ -123,19 +135,16 @@ class KyuubiSyncThriftClient private ( /** * Lock every rpc call to send them sequentially */ - private def withLockAcquired[T](block: => T): T = { - try { - lock.lock() - if (!protocol.getTransport.isOpen) { - throw KyuubiSQLException.connectionDoesNotExist() - } - block - } finally lock.unlock() + private def withLockAcquired[T](block: => T): T = Utils.withLockRequired(lock) { + if (engineConnectionClosed) { + throw KyuubiSQLException.connectionDoesNotExist() + } + block } private def withLockAcquiredAsyncRequest[T](block: => T): T = withLockAcquired { - if (asyncRequestExecutor == null || asyncRequestExecutor.isShutdown) { - asyncRequestExecutor = newAsyncRequestExecutor() + if (asyncRequestExecutor.isShutdown) { + throw KyuubiSQLException.connectionDoesNotExist() } val task = asyncRequestExecutor.submit(() => { @@ -199,6 +208,7 @@ class KyuubiSyncThriftClient private ( } def closeSession(): Unit = { + if (clientClosedByAliveProbe) return try { if (_remoteSessionHandle != null) { val req = new TCloseSessionReq(_remoteSessionHandle) diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/credentials/HadoopCredentialsManager.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/credentials/HadoopCredentialsManager.scala index fe710e67839..b51255b716f 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/credentials/HadoopCredentialsManager.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/credentials/HadoopCredentialsManager.scala @@ -17,13 +17,11 @@ package org.apache.kyuubi.credentials -import java.util.ServiceLoader import java.util.concurrent._ import scala.collection.JavaConverters._ import scala.collection.mutable -import scala.concurrent.Future -import scala.concurrent.Promise +import scala.concurrent.{Future, Promise} import scala.concurrent.duration.Duration import scala.util.{Failure, Success, Try} @@ -35,6 +33,7 @@ import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ import org.apache.kyuubi.service.AbstractService import org.apache.kyuubi.util.{KyuubiHadoopUtils, ThreadUtils} +import org.apache.kyuubi.util.reflect.ReflectUtils._ /** * [[HadoopCredentialsManager]] manages and renews delegation tokens, which are used by SQL engines @@ -315,13 +314,10 @@ object HadoopCredentialsManager extends Logging { private val providerEnabledConfig = "kyuubi.credentials.%s.enabled" def loadProviders(kyuubiConf: KyuubiConf): Map[String, HadoopDelegationTokenProvider] = { - val loader = - ServiceLoader.load( - classOf[HadoopDelegationTokenProvider], - Utils.getContextOrKyuubiClassLoader) val providers = mutable.ArrayBuffer[HadoopDelegationTokenProvider]() - val iterator = loader.iterator + val iterator = + loadFromServiceLoader[HadoopDelegationTokenProvider](Utils.getContextOrKyuubiClassLoader) while (iterator.hasNext) { try { providers += iterator.next diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/ApplicationOperation.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/ApplicationOperation.scala index 93d495895ad..23a49c1ae5f 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/ApplicationOperation.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/ApplicationOperation.scala @@ -35,30 +35,48 @@ trait ApplicationOperation { /** * Called before other method to do a quick skip * - * @param clusterManager the underlying cluster manager or just local instance + * @param appMgrInfo the application manager information */ - def isSupported(clusterManager: Option[String]): Boolean + def isSupported(appMgrInfo: ApplicationManagerInfo): Boolean /** * Kill the app/engine by the unique application tag * + * @param appMgrInfo the application manager information * @param tag the unique application tag for engine instance. * For example, * if the Hadoop Yarn is used, for spark applications, * the tag will be preset via spark.yarn.tags + * @param proxyUser the proxy user to use for executing kill commands. + * For secured YARN cluster, the Kyuubi Server's user typically + * has no permission to kill the application. Admin user or + * application owner should be used instead. * @return a message contains response describing how the kill process. * * @note For implementations, please suppress exceptions and always return KillResponse */ - def killApplicationByTag(tag: String): KillResponse + def killApplicationByTag( + appMgrInfo: ApplicationManagerInfo, + tag: String, + proxyUser: Option[String] = None): KillResponse /** * Get the engine/application status by the unique application tag * + * @param appMgrInfo the application manager information * @param tag the unique application tag for engine instance. + * @param submitTime engine submit to resourceManager time + * @param proxyUser the proxy user to use for creating YARN client + * For secured YARN cluster, the Kyuubi Server's user may have no permission + * to operate the application. Admin user or application owner could be used + * instead. * @return [[ApplicationInfo]] */ - def getApplicationInfoByTag(tag: String): ApplicationInfo + def getApplicationInfoByTag( + appMgrInfo: ApplicationManagerInfo, + tag: String, + proxyUser: Option[String] = None, + submitTime: Option[Long] = None): ApplicationInfo } object ApplicationState extends Enumeration { @@ -99,6 +117,30 @@ case class ApplicationInfo( } } +object ApplicationInfo { + val NOT_FOUND: ApplicationInfo = ApplicationInfo(null, null, ApplicationState.NOT_FOUND) + val UNKNOWN: ApplicationInfo = ApplicationInfo(null, null, ApplicationState.UNKNOWN) +} + object ApplicationOperation { val NOT_FOUND = "APPLICATION_NOT_FOUND" } + +case class KubernetesInfo(context: Option[String] = None, namespace: Option[String] = None) + +case class ApplicationManagerInfo( + resourceManager: Option[String], + kubernetesInfo: KubernetesInfo = KubernetesInfo()) + +object ApplicationManagerInfo { + final val DEFAULT_KUBERNETES_NAMESPACE = "default" + + def apply( + resourceManager: Option[String], + kubernetesContext: Option[String], + kubernetesNamespace: Option[String]): ApplicationManagerInfo = { + new ApplicationManagerInfo( + resourceManager, + KubernetesInfo(kubernetesContext, kubernetesNamespace)) + } +} diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala index 84b7707e8e2..6122a6f138f 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala @@ -17,8 +17,9 @@ package org.apache.kyuubi.engine -import java.util.concurrent.TimeUnit +import java.util.concurrent.{Semaphore, TimeUnit} +import scala.collection.JavaConverters._ import scala.util.Random import com.codahale.metrics.MetricRegistry @@ -28,8 +29,9 @@ import org.apache.kyuubi.{KYUUBI_VERSION, KyuubiSQLException, Logging, Utils} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_ENGINE_SUBMIT_TIME_KEY -import org.apache.kyuubi.engine.EngineType.{EngineType, FLINK_SQL, HIVE_SQL, JDBC, SPARK_SQL, TRINO} +import org.apache.kyuubi.engine.EngineType._ import org.apache.kyuubi.engine.ShareLevel.{CONNECTION, GROUP, SERVER, ShareLevel} +import org.apache.kyuubi.engine.chat.ChatProcessBuilder import org.apache.kyuubi.engine.flink.FlinkProcessBuilder import org.apache.kyuubi.engine.hive.HiveProcessBuilder import org.apache.kyuubi.engine.jdbc.JdbcProcessBuilder @@ -40,6 +42,8 @@ import org.apache.kyuubi.ha.client.{DiscoveryClient, DiscoveryClientProvider, Di import org.apache.kyuubi.metrics.MetricsConstants.{ENGINE_FAIL, ENGINE_TIMEOUT, ENGINE_TOTAL} import org.apache.kyuubi.metrics.MetricsSystem import org.apache.kyuubi.operation.log.OperationLog +import org.apache.kyuubi.plugin.GroupProvider +import org.apache.kyuubi.server.KyuubiServer /** * The description and functionality of an engine at server side @@ -51,9 +55,10 @@ import org.apache.kyuubi.operation.log.OperationLog private[kyuubi] class EngineRef( conf: KyuubiConf, user: String, - primaryGroup: String, + groupProvider: GroupProvider, engineRefId: String, - engineManager: KyuubiApplicationManager) + engineManager: KyuubiApplicationManager, + startupProcessSemaphore: Option[Semaphore] = None) extends Logging { // The corresponding ServerSpace where the engine belongs to private val serverSpace: String = conf.get(HA_NAMESPACE) @@ -66,7 +71,8 @@ private[kyuubi] class EngineRef( private val engineType: EngineType = EngineType.withName(conf.get(ENGINE_TYPE)) // Server-side engine pool size threshold - private val poolThreshold: Int = conf.get(ENGINE_POOL_SIZE_THRESHOLD) + private val poolThreshold: Int = Option(KyuubiServer.kyuubiServer).map(_.getConf) + .getOrElse(KyuubiConf()).get(ENGINE_POOL_SIZE_THRESHOLD) private val clientPoolSize: Int = conf.get(ENGINE_POOL_SIZE) @@ -82,10 +88,12 @@ private[kyuubi] class EngineRef( private var builder: ProcBuilder = _ + private[kyuubi] def getEngineRefId(): String = engineRefId + // Launcher of the engine private[kyuubi] val appUser: String = shareLevel match { case SERVER => Utils.currentUser - case GROUP => primaryGroup + case GROUP => groupProvider.primaryGroup(user, conf.getAll.asJava) case _ => user } @@ -187,22 +195,34 @@ private[kyuubi] class EngineRef( case TRINO => new TrinoProcessBuilder(appUser, conf, engineRefId, extraEngineLog) case HIVE_SQL => + conf.setIfMissing(HiveProcessBuilder.HIVE_ENGINE_NAME, defaultEngineName) new HiveProcessBuilder(appUser, conf, engineRefId, extraEngineLog) case JDBC => new JdbcProcessBuilder(appUser, conf, engineRefId, extraEngineLog) + case CHAT => + new ChatProcessBuilder(appUser, conf, engineRefId, extraEngineLog) } MetricsSystem.tracing(_.incCount(ENGINE_TOTAL)) + var acquiredPermit = false try { + if (!startupProcessSemaphore.forall(_.tryAcquire(timeout, TimeUnit.MILLISECONDS))) { + MetricsSystem.tracing(_.incCount(MetricRegistry.name(ENGINE_TIMEOUT, appUser))) + throw KyuubiSQLException( + s"Timeout($timeout ms, you can modify ${ENGINE_INIT_TIMEOUT.key} to change it) to" + + s" acquires a permit from engine builder semaphore.") + } + acquiredPermit = true val redactedCmd = builder.toString info(s"Launching engine:\n$redactedCmd") builder.validateConf val process = builder.start var exitValue: Option[Int] = None + var lastApplicationInfo: Option[ApplicationInfo] = None while (engineRef.isEmpty) { if (exitValue.isEmpty && process.waitFor(1, TimeUnit.SECONDS)) { exitValue = Some(process.exitValue()) - if (exitValue.get != 0) { + if (exitValue != Some(0)) { val error = builder.getError MetricsSystem.tracing { ms => ms.incCount(MetricRegistry.name(ENGINE_FAIL, appUser)) @@ -212,11 +232,33 @@ private[kyuubi] class EngineRef( } } + if (started + timeout <= System.currentTimeMillis()) { + val killMessage = + engineManager.killApplication(builder.appMgrInfo(), engineRefId, Some(appUser)) + builder.close(true) + MetricsSystem.tracing(_.incCount(MetricRegistry.name(ENGINE_TIMEOUT, appUser))) + throw KyuubiSQLException( + s"Timeout($timeout ms, you can modify ${ENGINE_INIT_TIMEOUT.key} to change it) to" + + s" launched $engineType engine with $redactedCmd. $killMessage", + builder.getError) + } + engineRef = discoveryClient.getEngineByRefId(engineSpace, engineRefId) + // even the submit process succeeds, the application might meet failure when initializing, // check the engine application state from engine manager and fast fail on engine terminate - if (exitValue == Some(0)) { + if (engineRef.isEmpty && exitValue == Some(0)) { Option(engineManager).foreach { engineMgr => - engineMgr.getApplicationInfo(builder.clusterManager(), engineRefId).foreach { appInfo => + if (lastApplicationInfo.isDefined) { + TimeUnit.SECONDS.sleep(1) + } + + val applicationInfo = engineMgr.getApplicationInfo( + builder.appMgrInfo(), + engineRefId, + Some(appUser), + Some(started)) + + applicationInfo.foreach { appInfo => if (ApplicationState.isTerminated(appInfo.state)) { MetricsSystem.tracing { ms => ms.incCount(MetricRegistry.name(ENGINE_FAIL, appUser)) @@ -230,25 +272,23 @@ private[kyuubi] class EngineRef( builder.getError) } } - } - } - if (started + timeout <= System.currentTimeMillis()) { - val killMessage = engineManager.killApplication(builder.clusterManager(), engineRefId) - process.destroyForcibly() - MetricsSystem.tracing(_.incCount(MetricRegistry.name(ENGINE_TIMEOUT, appUser))) - throw KyuubiSQLException( - s"Timeout($timeout ms, you can modify ${ENGINE_INIT_TIMEOUT.key} to change it) to" + - s" launched $engineType engine with $redactedCmd. $killMessage", - builder.getError) + lastApplicationInfo = applicationInfo + } } - engineRef = discoveryClient.getEngineByRefId(engineSpace, engineRefId) } engineRef.get } finally { + if (acquiredPermit) startupProcessSemaphore.foreach(_.release()) + val waitCompletion = conf.get(KyuubiConf.SESSION_ENGINE_STARTUP_WAIT_COMPLETION) + val destroyProcess = !waitCompletion && builder.isClusterMode() + if (destroyProcess) { + info("Destroy the builder process because waitCompletion is false" + + " and the engine is running in cluster mode.") + } // we must close the process builder whether session open is success or failure since // we have a log capture thread in process builder. - builder.close() + builder.close(destroyProcess) } } @@ -270,9 +310,9 @@ private[kyuubi] class EngineRef( def close(): Unit = { if (shareLevel == CONNECTION && builder != null) { try { - val clusterManager = builder.clusterManager() + val appMgrInfo = builder.appMgrInfo() builder.close(true) - engineManager.killApplication(clusterManager, engineRefId) + engineManager.killApplication(appMgrInfo, engineRefId, Some(appUser)) } catch { case e: Exception => warn(s"Error closing engine builder, engineRefId: $engineRefId", e) diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/JpsApplicationOperation.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/JpsApplicationOperation.scala index bd482b86bf5..1d0d58d167c 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/JpsApplicationOperation.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/JpsApplicationOperation.scala @@ -41,8 +41,9 @@ class JpsApplicationOperation extends ApplicationOperation { } } - override def isSupported(clusterManager: Option[String]): Boolean = { - runner != null && (clusterManager.isEmpty || clusterManager.get == "local") + override def isSupported(appMgrInfo: ApplicationManagerInfo): Boolean = { + runner != null && + (appMgrInfo.resourceManager.isEmpty || appMgrInfo.resourceManager.get == "local") } private def getEngine(tag: String): Option[String] = { @@ -80,11 +81,18 @@ class JpsApplicationOperation extends ApplicationOperation { } } - override def killApplicationByTag(tag: String): KillResponse = { + override def killApplicationByTag( + appMgrInfo: ApplicationManagerInfo, + tag: String, + proxyUser: Option[String] = None): KillResponse = { killJpsApplicationByTag(tag, true) } - override def getApplicationInfoByTag(tag: String): ApplicationInfo = { + override def getApplicationInfoByTag( + appMgrInfo: ApplicationManagerInfo, + tag: String, + proxyUser: Option[String] = None, + submitTime: Option[Long] = None): ApplicationInfo = { val commandOption = getEngine(tag) if (commandOption.nonEmpty) { val idAndCmd = commandOption.get diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationAuditLogger.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationAuditLogger.scala new file mode 100644 index 00000000000..731b9d7b5ba --- /dev/null +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationAuditLogger.scala @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine + +import io.fabric8.kubernetes.api.model.Pod + +import org.apache.kyuubi.Logging +import org.apache.kyuubi.engine.KubernetesApplicationOperation.{toApplicationState, LABEL_KYUUBI_UNIQUE_KEY, SPARK_APP_ID_LABEL} + +object KubernetesApplicationAuditLogger extends Logging { + final private val AUDIT_BUFFER = new ThreadLocal[StringBuilder]() { + override protected def initialValue: StringBuilder = new StringBuilder() + } + + def audit(kubernetesInfo: KubernetesInfo, pod: Pod): Unit = { + val sb = AUDIT_BUFFER.get() + sb.setLength(0) + sb.append(s"label=${pod.getMetadata.getLabels.get(LABEL_KYUUBI_UNIQUE_KEY)}").append("\t") + sb.append(s"context=${kubernetesInfo.context.orNull}").append("\t") + sb.append(s"namespace=${kubernetesInfo.namespace.orNull}").append("\t") + sb.append(s"pod=${pod.getMetadata.getName}").append("\t") + sb.append(s"appId=${pod.getMetadata.getLabels.get(SPARK_APP_ID_LABEL)}").append("\t") + sb.append(s"appState=${toApplicationState(pod.getStatus.getPhase)}") + info(sb.toString()) + } +} diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala index bee69b11762..16a0c29d149 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala @@ -17,122 +17,253 @@ package org.apache.kyuubi.engine -import io.fabric8.kubernetes.api.model.{Pod, PodList} +import java.util.Locale +import java.util.concurrent.{ConcurrentHashMap, TimeUnit} + +import scala.collection.JavaConverters._ + +import com.google.common.cache.{Cache, CacheBuilder, RemovalNotification} +import io.fabric8.kubernetes.api.model.Pod import io.fabric8.kubernetes.client.KubernetesClient -import io.fabric8.kubernetes.client.dsl.FilterWatchListDeletable +import io.fabric8.kubernetes.client.informers.{ResourceEventHandler, SharedIndexInformer} -import org.apache.kyuubi.Logging +import org.apache.kyuubi.{KyuubiException, Logging, Utils} import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.engine.ApplicationState.{ApplicationState, FAILED, FINISHED, PENDING, RUNNING, UNKNOWN} -import org.apache.kyuubi.engine.KubernetesApplicationOperation.{toApplicationState, SPARK_APP_ID_LABEL} +import org.apache.kyuubi.engine.ApplicationState.{isTerminated, ApplicationState, FAILED, FINISHED, NOT_FOUND, PENDING, RUNNING, UNKNOWN} +import org.apache.kyuubi.engine.KubernetesApplicationOperation.{toApplicationState, toLabel, LABEL_KYUUBI_UNIQUE_KEY, SPARK_APP_ID_LABEL} import org.apache.kyuubi.util.KubernetesUtils class KubernetesApplicationOperation extends ApplicationOperation with Logging { - @volatile - private var kubernetesClient: KubernetesClient = _ - private var jpsOperation: JpsApplicationOperation = _ + private val kubernetesClients: ConcurrentHashMap[KubernetesInfo, KubernetesClient] = + new ConcurrentHashMap[KubernetesInfo, KubernetesClient] + private val enginePodInformers: ConcurrentHashMap[KubernetesInfo, SharedIndexInformer[Pod]] = + new ConcurrentHashMap[KubernetesInfo, SharedIndexInformer[Pod]] - override def initialize(conf: KyuubiConf): Unit = { - jpsOperation = new JpsApplicationOperation - jpsOperation.initialize(conf) + private var submitTimeout: Long = _ + private var kyuubiConf: KyuubiConf = _ + + private def allowedContexts: Set[String] = + kyuubiConf.get(KyuubiConf.KUBERNETES_CONTEXT_ALLOW_LIST) + private def allowedNamespaces: Set[String] = + kyuubiConf.get(KyuubiConf.KUBERNETES_NAMESPACE_ALLOW_LIST) + + // key is kyuubi_unique_key + private val appInfoStore: ConcurrentHashMap[String, ApplicationInfo] = + new ConcurrentHashMap[String, ApplicationInfo] + // key is kyuubi_unique_key + private var cleanupTerminatedAppInfoTrigger: Cache[String, ApplicationState] = _ + + private def getOrCreateKubernetesClient(kubernetesInfo: KubernetesInfo): KubernetesClient = { + checkKubernetesInfo(kubernetesInfo) + kubernetesClients.computeIfAbsent(kubernetesInfo, kInfo => buildKubernetesClient(kInfo)) + } + + // Visible for testing + private[engine] def checkKubernetesInfo(kubernetesInfo: KubernetesInfo): Unit = { + val context = kubernetesInfo.context + val namespace = kubernetesInfo.namespace + + if (allowedContexts.nonEmpty && context.exists(!allowedContexts.contains(_))) { + throw new KyuubiException( + s"Kubernetes context $context is not in the allowed list[$allowedContexts]") + } + + if (allowedNamespaces.nonEmpty && namespace.exists(!allowedNamespaces.contains(_))) { + throw new KyuubiException( + s"Kubernetes namespace $namespace is not in the allowed list[$allowedNamespaces]") + } + } - info("Start initializing Kubernetes Client.") - kubernetesClient = KubernetesUtils.buildKubernetesClient(conf) match { + private def buildKubernetesClient(kubernetesInfo: KubernetesInfo): KubernetesClient = { + val kubernetesConf = + kyuubiConf.getKubernetesConf(kubernetesInfo.context, kubernetesInfo.namespace) + KubernetesUtils.buildKubernetesClient(kubernetesConf) match { case Some(client) => - info(s"Initialized Kubernetes Client connect to: ${client.getMasterUrl}") + info(s"[$kubernetesInfo] Initialized Kubernetes Client connect to: ${client.getMasterUrl}") + val enginePodInformer = client.pods() + .withLabel(LABEL_KYUUBI_UNIQUE_KEY) + .inform(new SparkEnginePodEventHandler(kubernetesInfo)) + info(s"[$kubernetesInfo] Start Kubernetes Client Informer.") + enginePodInformers.put(kubernetesInfo, enginePodInformer) client - case None => - warn("Fail to init Kubernetes Client for Kubernetes Application Operation") - null + + case None => throw new KyuubiException(s"Fail to build Kubernetes client for $kubernetesInfo") } } - override def isSupported(clusterManager: Option[String]): Boolean = { - kubernetesClient != null && clusterManager.nonEmpty && - clusterManager.get.toLowerCase.startsWith("k8s") + override def initialize(conf: KyuubiConf): Unit = { + kyuubiConf = conf + info("Start initializing Kubernetes application operation.") + submitTimeout = conf.get(KyuubiConf.ENGINE_KUBERNETES_SUBMIT_TIMEOUT) + // Defer cleaning terminated application information + val retainPeriod = conf.get(KyuubiConf.KUBERNETES_TERMINATED_APPLICATION_RETAIN_PERIOD) + cleanupTerminatedAppInfoTrigger = CacheBuilder.newBuilder() + .expireAfterWrite(retainPeriod, TimeUnit.MILLISECONDS) + .removalListener((notification: RemovalNotification[String, ApplicationState]) => { + Option(appInfoStore.remove(notification.getKey)).foreach { removed => + info(s"Remove terminated application ${removed.id} with " + + s"[${toLabel(notification.getKey)}, state: ${removed.state}]") + } + }) + .build() + } + + override def isSupported(appMgrInfo: ApplicationManagerInfo): Boolean = { + // TODO add deploy mode to check whether is supported + kyuubiConf != null && + appMgrInfo.resourceManager.exists(_.toLowerCase(Locale.ROOT).startsWith("k8s")) } - override def killApplicationByTag(tag: String): KillResponse = { - if (kubernetesClient != null) { - debug(s"Deleting application info from Kubernetes cluster by $tag tag") - try { - // Need driver only - val operation = findDriverPodByTag(tag) - val podList = operation.list().getItems - if (podList.size() != 0) { - toApplicationState(podList.get(0).getStatus.getPhase) match { - case FAILED | UNKNOWN => + override def killApplicationByTag( + appMgrInfo: ApplicationManagerInfo, + tag: String, + proxyUser: Option[String] = None): KillResponse = { + if (kyuubiConf == null) { + throw new IllegalStateException("Methods initialize and isSupported must be called ahead") + } + val kubernetesInfo = appMgrInfo.kubernetesInfo + val kubernetesClient = getOrCreateKubernetesClient(kubernetesInfo) + debug(s"[$kubernetesInfo] Deleting application[${toLabel(tag)}]'s info from Kubernetes cluster") + try { + Option(appInfoStore.get(tag)) match { + case Some(info) => + debug(s"Application[${toLabel(tag)}] is in ${info.state} state") + info.state match { + case NOT_FOUND | FAILED | UNKNOWN => ( false, - s"Target Pod ${podList.get(0).getMetadata.getName} is in FAILED or UNKNOWN status") + s"[$kubernetesInfo] Target application[${toLabel(tag)}] is in ${info.state} state") case _ => ( - operation.delete(), - s"Operation of deleted appId: ${podList.get(0).getMetadata.getName} is completed") + !kubernetesClient.pods.withName(info.name).delete().isEmpty, + s"[$kubernetesInfo] Operation of deleted" + + s" application[appId: ${info.id}, ${toLabel(tag)}] is completed") } - } else { - // client mode - jpsOperation.killApplicationByTag(tag) - } - } catch { - case e: Exception => - (false, s"Failed to terminate application with $tag, due to ${e.getMessage}") + case None => + warn(s"No application info found, trying to delete pod with ${toLabel(tag)}") + ( + !kubernetesClient.pods.withLabel(LABEL_KYUUBI_UNIQUE_KEY, tag).delete().isEmpty, + s"[$kubernetesInfo] Operation of deleted pod with ${toLabel(tag)} is completed") } - } else { - throw new IllegalStateException("Methods initialize and isSupported must be called ahead") + } catch { + case e: Exception => + ( + false, + s"[$kubernetesInfo] Failed to terminate application[${toLabel(tag)}], " + + s"due to ${e.getMessage}") } } - override def getApplicationInfoByTag(tag: String): ApplicationInfo = { - if (kubernetesClient != null) { - debug(s"Getting application info from Kubernetes cluster by $tag tag") - try { - val operation = findDriverPodByTag(tag) - val podList = operation.list().getItems - if (podList.size() != 0) { - val pod = podList.get(0) - val info = ApplicationInfo( - // spark pods always tag label `spark-app-selector:` - id = pod.getMetadata.getLabels.get(SPARK_APP_ID_LABEL), - name = pod.getMetadata.getName, - state = KubernetesApplicationOperation.toApplicationState(pod.getStatus.getPhase), - error = Option(pod.getStatus.getReason)) - debug(s"Successfully got application info by $tag: $info") - info - } else { - // client mode - jpsOperation.getApplicationInfoByTag(tag) - } - } catch { - case e: Exception => - error(s"Failed to get application with $tag, due to ${e.getMessage}") - ApplicationInfo(id = null, name = null, ApplicationState.NOT_FOUND) - } - } else { + override def getApplicationInfoByTag( + appMgrInfo: ApplicationManagerInfo, + tag: String, + proxyUser: Option[String] = None, + submitTime: Option[Long] = None): ApplicationInfo = { + if (kyuubiConf == null) { throw new IllegalStateException("Methods initialize and isSupported must be called ahead") } + debug(s"Getting application[${toLabel(tag)}]'s info from Kubernetes cluster") + try { + // need to initialize the kubernetes client if not exists + getOrCreateKubernetesClient(appMgrInfo.kubernetesInfo) + val appInfo = appInfoStore.getOrDefault(tag, ApplicationInfo.NOT_FOUND) + (appInfo.state, submitTime) match { + // Kyuubi should wait second if pod is not be created + case (NOT_FOUND, Some(_submitTime)) => + val elapsedTime = System.currentTimeMillis - _submitTime + if (elapsedTime > submitTimeout) { + error(s"Can't find target driver pod by ${toLabel(tag)}, " + + s"elapsed time: ${elapsedTime}ms exceeds ${submitTimeout}ms.") + ApplicationInfo.NOT_FOUND + } else { + warn(s"Waiting for driver pod with ${toLabel(tag)} to be created, " + + s"elapsed time: ${elapsedTime}ms, return UNKNOWN status") + ApplicationInfo.UNKNOWN + } + case (NOT_FOUND, None) => + ApplicationInfo.NOT_FOUND + case _ => + debug(s"Successfully got application[${toLabel(tag)}]'s info: $appInfo") + appInfo + } + } catch { + case e: Exception => + error(s"Failed to get application by ${toLabel(tag)}, due to ${e.getMessage}") + ApplicationInfo.NOT_FOUND + } } - private def findDriverPodByTag(tag: String): FilterWatchListDeletable[Pod, PodList] = { - val operation = kubernetesClient.pods() - .withLabel(KubernetesApplicationOperation.LABEL_KYUUBI_UNIQUE_KEY, tag) - val size = operation.list().getItems.size() - if (size != 1) { - warn(s"Get Tag: ${tag} Driver Pod In Kubernetes size: ${size}, we expect 1") + override def stop(): Unit = { + enginePodInformers.asScala.foreach { case (_, informer) => + Utils.tryLogNonFatalError(informer.stop()) + } + enginePodInformers.clear() + + kubernetesClients.asScala.foreach { case (_, client) => + Utils.tryLogNonFatalError(client.close()) + } + kubernetesClients.clear() + + if (cleanupTerminatedAppInfoTrigger != null) { + cleanupTerminatedAppInfoTrigger.cleanUp() + cleanupTerminatedAppInfoTrigger = null } - operation } - override def stop(): Unit = { - if (kubernetesClient != null) { - try { - kubernetesClient.close() - } catch { - case e: Exception => error(e.getMessage) + private class SparkEnginePodEventHandler(kubernetesInfo: KubernetesInfo) + extends ResourceEventHandler[Pod] { + + override def onAdd(pod: Pod): Unit = { + if (isSparkEnginePod(pod)) { + updateApplicationState(pod) + KubernetesApplicationAuditLogger.audit(kubernetesInfo, pod) + } + } + + override def onUpdate(oldPod: Pod, newPod: Pod): Unit = { + if (isSparkEnginePod(newPod)) { + updateApplicationState(newPod) + val appState = toApplicationState(newPod.getStatus.getPhase) + if (isTerminated(appState)) { + markApplicationTerminated(newPod) + } + KubernetesApplicationAuditLogger.audit(kubernetesInfo, newPod) + } + } + + override def onDelete(pod: Pod, deletedFinalStateUnknown: Boolean): Unit = { + if (isSparkEnginePod(pod)) { + updateApplicationState(pod) + markApplicationTerminated(pod) + KubernetesApplicationAuditLogger.audit(kubernetesInfo, pod) } } } + + private def isSparkEnginePod(pod: Pod): Boolean = { + val labels = pod.getMetadata.getLabels + labels.containsKey(LABEL_KYUUBI_UNIQUE_KEY) && labels.containsKey(SPARK_APP_ID_LABEL) + } + + private def updateApplicationState(pod: Pod): Unit = { + val appState = toApplicationState(pod.getStatus.getPhase) + debug(s"Driver Informer changes pod: ${pod.getMetadata.getName} to state: $appState") + appInfoStore.put( + pod.getMetadata.getLabels.get(LABEL_KYUUBI_UNIQUE_KEY), + ApplicationInfo( + id = pod.getMetadata.getLabels.get(SPARK_APP_ID_LABEL), + name = pod.getMetadata.getName, + state = appState, + error = Option(pod.getStatus.getReason))) + } + + private def markApplicationTerminated(pod: Pod): Unit = synchronized { + val key = pod.getMetadata.getLabels.get(LABEL_KYUUBI_UNIQUE_KEY) + if (cleanupTerminatedAppInfoTrigger.getIfPresent(key) == null) { + cleanupTerminatedAppInfoTrigger.put(key, toApplicationState(pod.getStatus.getPhase)) + } + } } object KubernetesApplicationOperation extends Logging { @@ -141,6 +272,8 @@ object KubernetesApplicationOperation extends Logging { val KUBERNETES_SERVICE_HOST = "KUBERNETES_SERVICE_HOST" val KUBERNETES_SERVICE_PORT = "KUBERNETES_SERVICE_PORT" + def toLabel(tag: String): String = s"label: $LABEL_KYUUBI_UNIQUE_KEY=$tag" + def toApplicationState(state: String): ApplicationState = state match { // https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/core/types.go#L2396 // https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/ @@ -148,10 +281,10 @@ object KubernetesApplicationOperation extends Logging { case "Running" => RUNNING case "Succeeded" => FINISHED case "Failed" | "Error" => FAILED - case "Unknown" => ApplicationState.UNKNOWN + case "Unknown" => UNKNOWN case _ => warn(s"The kubernetes driver pod state: $state is not supported, " + "mark the application state as UNKNOWN.") - ApplicationState.UNKNOWN + UNKNOWN } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala index 70c13001262..f8b64005359 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala @@ -20,9 +20,8 @@ package org.apache.kyuubi.engine import java.io.File import java.net.{URI, URISyntaxException} import java.nio.file.{Files, Path} -import java.util.{Locale, ServiceLoader} +import java.util.Locale -import scala.collection.JavaConverters._ import scala.util.control.NonFatal import org.apache.kyuubi.{KyuubiException, Utils} @@ -31,14 +30,13 @@ import org.apache.kyuubi.engine.KubernetesApplicationOperation.LABEL_KYUUBI_UNIQ import org.apache.kyuubi.engine.flink.FlinkProcessBuilder import org.apache.kyuubi.engine.spark.SparkProcessBuilder import org.apache.kyuubi.service.AbstractService +import org.apache.kyuubi.util.reflect.ReflectUtils._ class KyuubiApplicationManager extends AbstractService("KyuubiApplicationManager") { // TODO: maybe add a configuration is better - private val operations = { - ServiceLoader.load(classOf[ApplicationOperation], Utils.getContextOrKyuubiClassLoader) - .iterator().asScala.toSeq - } + private val operations = + loadFromServiceLoader[ApplicationOperation](Utils.getContextOrKyuubiClassLoader).toSeq override def initialize(conf: KyuubiConf): Unit = { operations.foreach { op => @@ -62,11 +60,14 @@ class KyuubiApplicationManager extends AbstractService("KyuubiApplicationManager super.stop() } - def killApplication(resourceManager: Option[String], tag: String): KillResponse = { + def killApplication( + appMgrInfo: ApplicationManagerInfo, + tag: String, + proxyUser: Option[String] = None): KillResponse = { var (killed, lastMessage): KillResponse = (false, null) for (operation <- operations if !killed) { - if (operation.isSupported(resourceManager)) { - val (k, m) = operation.killApplicationByTag(tag) + if (operation.isSupported(appMgrInfo)) { + val (k, m) = operation.killApplicationByTag(appMgrInfo, tag, proxyUser) killed = k lastMessage = m } @@ -75,7 +76,7 @@ class KyuubiApplicationManager extends AbstractService("KyuubiApplicationManager val finalMessage = if (lastMessage == null) { s"No ${classOf[ApplicationOperation]} Service found in ServiceLoader" + - s" for $resourceManager" + s" for $appMgrInfo" } else { lastMessage } @@ -83,11 +84,13 @@ class KyuubiApplicationManager extends AbstractService("KyuubiApplicationManager } def getApplicationInfo( - clusterManager: Option[String], - tag: String): Option[ApplicationInfo] = { - val operation = operations.find(_.isSupported(clusterManager)) + appMgrInfo: ApplicationManagerInfo, + tag: String, + proxyUser: Option[String] = None, + submitTime: Option[Long] = None): Option[ApplicationInfo] = { + val operation = operations.find(_.isSupported(appMgrInfo)) operation match { - case Some(op) => Some(op.getApplicationInfoByTag(tag)) + case Some(op) => Some(op.getApplicationInfoByTag(appMgrInfo, tag, proxyUser, submitTime)) case None => None } } @@ -104,10 +107,10 @@ object KyuubiApplicationManager { conf.set("spark.kubernetes.driver.label." + LABEL_KYUUBI_UNIQUE_KEY, tag) } - private def setupFlinkK8sTag(tag: String, conf: KyuubiConf): Unit = { - val originalTag = conf.getOption(FlinkProcessBuilder.TAG_KEY).map(_ + ",").getOrElse("") + private def setupFlinkYarnTag(tag: String, conf: KyuubiConf): Unit = { + val originalTag = conf.getOption(FlinkProcessBuilder.YARN_TAG_KEY).map(_ + ",").getOrElse("") val newTag = s"${originalTag}KYUUBI" + Some(tag).filterNot(_.isEmpty).map("," + _).getOrElse("") - conf.set(FlinkProcessBuilder.TAG_KEY, newTag) + conf.set(FlinkProcessBuilder.YARN_TAG_KEY, newTag) } val uploadWorkDir: Path = { @@ -175,9 +178,9 @@ object KyuubiApplicationManager { // if the master is not identified ahead, add all tags setupSparkYarnTag(applicationTag, conf) setupSparkK8sTag(applicationTag, conf) - case ("FLINK", _) => + case ("FLINK", Some("YARN")) => // running flink on other platforms is not yet supported - setupFlinkK8sTag(applicationTag, conf) + setupFlinkYarnTag(applicationTag, conf) // other engine types are running locally yet case _ => } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/ProcBuilder.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/ProcBuilder.scala index 4c7330b4dd5..44b317c71ea 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/ProcBuilder.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/ProcBuilder.scala @@ -155,7 +155,10 @@ trait ProcBuilder { @volatile private var error: Throwable = UNCAUGHT_ERROR private val engineLogMaxLines = conf.get(KyuubiConf.SESSION_ENGINE_STARTUP_MAX_LOG_LINES) - private val waitCompletion = conf.get(KyuubiConf.SESSION_ENGINE_STARTUP_WAIT_COMPLETION) + + private val engineStartupDestroyTimeout = + conf.get(KyuubiConf.SESSION_ENGINE_STARTUP_DESTROY_TIMEOUT) + protected val lastRowsOfLog: EvictingQueue[String] = EvictingQueue.create(engineLogMaxLines) // Visible for test @volatile private[kyuubi] var logCaptureThreadReleased: Boolean = true @@ -249,14 +252,15 @@ trait ProcBuilder { process } - def close(destroyProcess: Boolean = !waitCompletion): Unit = synchronized { + def isClusterMode(): Boolean = false + + def close(destroyProcess: Boolean): Unit = synchronized { if (logCaptureThread != null) { logCaptureThread.interrupt() logCaptureThread = null } if (destroyProcess && process != null) { - info("Destroy the process, since waitCompletion is false.") - process.destroyForcibly() + Utils.terminateProcess(process, engineStartupDestroyTimeout) process = null } } @@ -336,15 +340,18 @@ trait ProcBuilder { protected def validateEnv(requiredEnv: String): Throwable = { KyuubiSQLException(s"$requiredEnv is not set! For more information on installing and " + s"configuring $requiredEnv, please visit https://kyuubi.readthedocs.io/en/master/" + - s"deployment/settings.html#environments") + s"configuration/settings.html#environments") } def clusterManager(): Option[String] = None + def appMgrInfo(): ApplicationManagerInfo = ApplicationManagerInfo(None) } object ProcBuilder extends Logging { private val PROC_BUILD_LOGGER = new NamedThreadFactory("process-logger-capture", daemon = true) private val UNCAUGHT_ERROR = new RuntimeException("Uncaught error") + + private[engine] val KYUUBI_ENGINE_LOG_PATH_KEY = "kyuubi.engine.engineLog.path" } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/YarnApplicationOperation.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/YarnApplicationOperation.scala index b38b1daa222..1f672ad701e 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/YarnApplicationOperation.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/YarnApplicationOperation.scala @@ -17,13 +17,18 @@ package org.apache.kyuubi.engine +import java.util.Locale + import scala.collection.JavaConverters._ +import org.apache.hadoop.conf.Configuration import org.apache.hadoop.yarn.api.records.{FinalApplicationStatus, YarnApplicationState} import org.apache.hadoop.yarn.client.api.YarnClient -import org.apache.kyuubi.Logging +import org.apache.kyuubi.{Logging, Utils} import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.KyuubiConf.YarnUserStrategy +import org.apache.kyuubi.config.KyuubiConf.YarnUserStrategy._ import org.apache.kyuubi.engine.ApplicationOperation._ import org.apache.kyuubi.engine.ApplicationState.ApplicationState import org.apache.kyuubi.engine.YarnApplicationOperation.toApplicationState @@ -31,85 +36,136 @@ import org.apache.kyuubi.util.KyuubiHadoopUtils class YarnApplicationOperation extends ApplicationOperation with Logging { - @volatile private var yarnClient: YarnClient = _ + private var yarnConf: Configuration = _ + @volatile private var adminYarnClient: Option[YarnClient] = None + private var submitTimeout: Long = _ override def initialize(conf: KyuubiConf): Unit = { - val yarnConf = KyuubiHadoopUtils.newYarnConfiguration(conf) - // YarnClient is thread-safe - val c = YarnClient.createYarnClient() - c.init(yarnConf) - c.start() - yarnClient = c - info(s"Successfully initialized yarn client: ${c.getServiceState}") + submitTimeout = conf.get(KyuubiConf.ENGINE_YARN_SUBMIT_TIMEOUT) + yarnConf = KyuubiHadoopUtils.newYarnConfiguration(conf) + + def createYarnClientWithCurrentUser(): Unit = { + val c = createYarnClient(yarnConf) + info(s"Creating admin YARN client with current user: ${Utils.currentUser}.") + adminYarnClient = Some(c) + } + + def createYarnClientWithProxyUser(proxyUser: String): Unit = Utils.doAs(proxyUser) { () => + val c = createYarnClient(yarnConf) + info(s"Creating admin YARN client with proxy user: $proxyUser.") + adminYarnClient = Some(c) + } + + YarnUserStrategy.withName(conf.get(KyuubiConf.YARN_USER_STRATEGY)) match { + case NONE => + createYarnClientWithCurrentUser() + case ADMIN if conf.get(KyuubiConf.YARN_USER_ADMIN) == Utils.currentUser => + createYarnClientWithCurrentUser() + case ADMIN => + createYarnClientWithProxyUser(conf.get(KyuubiConf.YARN_USER_ADMIN)) + case OWNER => + info("Skip initializing admin YARN client") + } } - override def isSupported(clusterManager: Option[String]): Boolean = { - yarnClient != null && clusterManager.nonEmpty && "yarn".equalsIgnoreCase(clusterManager.get) + private def createYarnClient(_yarnConf: Configuration): YarnClient = { + // YarnClient is thread-safe + val yarnClient = YarnClient.createYarnClient() + yarnClient.init(_yarnConf) + yarnClient.start() + yarnClient } - override def killApplicationByTag(tag: String): KillResponse = { - if (yarnClient != null) { - try { - val reports = yarnClient.getApplications(null, null, Set(tag).asJava) - if (reports.isEmpty) { - (false, NOT_FOUND) - } else { + private def withYarnClient[T](proxyUser: Option[String])(action: YarnClient => T): T = { + (adminYarnClient, proxyUser) match { + case (Some(yarnClient), _) => + action(yarnClient) + case (None, Some(user)) => + Utils.doAs(user) { () => + var yarnClient: YarnClient = null try { - val applicationId = reports.get(0).getApplicationId - yarnClient.killApplication(applicationId) - (true, s"Succeeded to terminate: $applicationId with $tag") - } catch { - case e: Exception => - (false, s"Failed to terminate application with $tag, due to ${e.getMessage}") + yarnClient = createYarnClient(yarnConf) + action(yarnClient) + } finally { + Utils.tryLogNonFatalError(yarnClient.close()) } } - } catch { - case e: Exception => - ( - false, - s"Failed to get while terminating application with tag $tag," + - s" due to ${e.getMessage}") - } - } else { - throw new IllegalStateException("Methods initialize and isSupported must be called ahead") + case (None, None) => + throw new IllegalStateException("Methods initialize and isSupported must be called ahead") } } - override def getApplicationInfoByTag(tag: String): ApplicationInfo = { - if (yarnClient != null) { - debug(s"Getting application info from Yarn cluster by $tag tag") + override def isSupported(appMgrInfo: ApplicationManagerInfo): Boolean = + appMgrInfo.resourceManager.exists(_.toLowerCase(Locale.ROOT).startsWith("yarn")) + + override def killApplicationByTag( + appMgrInfo: ApplicationManagerInfo, + tag: String, + proxyUser: Option[String] = None): KillResponse = withYarnClient(proxyUser) { yarnClient => + try { val reports = yarnClient.getApplications(null, null, Set(tag).asJava) if (reports.isEmpty) { - debug(s"Application with tag $tag not found") - ApplicationInfo(id = null, name = null, state = ApplicationState.NOT_FOUND) + (false, NOT_FOUND) } else { - val report = reports.get(0) - val info = ApplicationInfo( - id = report.getApplicationId.toString, - name = report.getName, - state = toApplicationState( - report.getApplicationId.toString, - report.getYarnApplicationState, - report.getFinalApplicationStatus), - url = Option(report.getTrackingUrl), - error = Option(report.getDiagnostics)) - debug(s"Successfully got application info by $tag: $info") - info + try { + val applicationId = reports.get(0).getApplicationId + yarnClient.killApplication(applicationId) + (true, s"Succeeded to terminate: $applicationId with $tag") + } catch { + case e: Exception => + (false, s"Failed to terminate application with $tag, due to ${e.getMessage}") + } } - } else { - throw new IllegalStateException("Methods initialize and isSupported must be called ahead") + } catch { + case e: Exception => + ( + false, + s"Failed to get while terminating application with tag $tag, due to ${e.getMessage}") } } - override def stop(): Unit = { - if (yarnClient != null) { - try { - yarnClient.stop() - } catch { - case e: Exception => error(e.getMessage) + override def getApplicationInfoByTag( + appMgrInfo: ApplicationManagerInfo, + tag: String, + proxyUser: Option[String] = None, + submitTime: Option[Long] = None): ApplicationInfo = withYarnClient(proxyUser) { yarnClient => + debug(s"Getting application info from Yarn cluster by $tag tag") + val reports = yarnClient.getApplications(null, null, Set(tag).asJava) + if (reports.isEmpty) { + debug(s"Application with tag $tag not found") + submitTime match { + case Some(_submitTime) => + val elapsedTime = System.currentTimeMillis - _submitTime + if (elapsedTime > submitTimeout) { + error(s"Can't find target yarn application by tag: $tag, " + + s"elapsed time: ${elapsedTime}ms exceeds ${submitTimeout}ms.") + ApplicationInfo.NOT_FOUND + } else { + warn("Wait for yarn application to be submitted, " + + s"elapsed time: ${elapsedTime}ms, return UNKNOWN status") + ApplicationInfo.UNKNOWN + } + case _ => ApplicationInfo.NOT_FOUND } + } else { + val report = reports.get(0) + val info = ApplicationInfo( + id = report.getApplicationId.toString, + name = report.getName, + state = toApplicationState( + report.getApplicationId.toString, + report.getYarnApplicationState, + report.getFinalApplicationStatus), + url = Option(report.getTrackingUrl), + error = Option(report.getDiagnostics)) + debug(s"Successfully got application info by $tag: $info") + info } } + + override def stop(): Unit = adminYarnClient.foreach { yarnClient => + Utils.tryLogNonFatalError(yarnClient.stop()) + } } object YarnApplicationOperation extends Logging { diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/chat/ChatProcessBuilder.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/chat/ChatProcessBuilder.scala new file mode 100644 index 00000000000..3e4a20de373 --- /dev/null +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/chat/ChatProcessBuilder.scala @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine.chat + +import java.io.File +import java.nio.file.{Files, Paths} +import java.util + +import scala.collection.JavaConverters._ +import scala.collection.mutable.ArrayBuffer + +import com.google.common.annotations.VisibleForTesting + +import org.apache.kyuubi.{Logging, SCALA_COMPILE_VERSION, Utils} +import org.apache.kyuubi.Utils.REDACTION_REPLACEMENT_TEXT +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.KyuubiConf._ +import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_USER_KEY +import org.apache.kyuubi.engine.ProcBuilder +import org.apache.kyuubi.operation.log.OperationLog + +class ChatProcessBuilder( + override val proxyUser: String, + override val conf: KyuubiConf, + val engineRefId: String, + val extraEngineLog: Option[OperationLog] = None) + extends ProcBuilder with Logging { + + @VisibleForTesting + def this(proxyUser: String, conf: KyuubiConf) { + this(proxyUser, conf, "") + } + + /** + * The short name of the engine process builder, we use this for form the engine jar paths now + * see `mainResource` + */ + override def shortName: String = "chat" + + override protected def module: String = "kyuubi-chat-engine" + + /** + * The class containing the main method + */ + override protected def mainClass: String = "org.apache.kyuubi.engine.chat.ChatEngine" + + override protected val commands: Array[String] = { + val buffer = new ArrayBuffer[String]() + buffer += executable + + val memory = conf.get(ENGINE_CHAT_MEMORY) + buffer += s"-Xmx$memory" + + val javaOptions = conf.get(ENGINE_CHAT_JAVA_OPTIONS) + javaOptions.foreach(buffer += _) + + buffer += "-cp" + val classpathEntries = new util.LinkedHashSet[String] + mainResource.foreach(classpathEntries.add) + mainResource.foreach { path => + val parent = Paths.get(path).getParent + val chatDevDepDir = parent + .resolve(s"scala-$SCALA_COMPILE_VERSION") + .resolve("jars") + if (Files.exists(chatDevDepDir)) { + // add dev classpath + classpathEntries.add(s"$chatDevDepDir${File.separator}*") + } else { + // add prod classpath + classpathEntries.add(s"$parent${File.separator}*") + } + } + + val extraCp = conf.get(ENGINE_CHAT_EXTRA_CLASSPATH) + extraCp.foreach(classpathEntries.add) + buffer += classpathEntries.asScala.mkString(File.pathSeparator) + buffer += mainClass + + buffer += "--conf" + buffer += s"$KYUUBI_SESSION_USER_KEY=$proxyUser" + + conf.getAll.foreach { case (k, v) => + buffer += "--conf" + buffer += s"$k=$v" + } + buffer.toArray + } + + override def toString: String = { + if (commands == null) { + super.toString() + } else { + Utils.redactCommandLineArgs(conf, commands).map { + case arg if arg.startsWith("-") || arg == mainClass => s"\\\n\t$arg" + case arg if arg.contains(ENGINE_CHAT_GPT_API_KEY.key) => + s"${ENGINE_CHAT_GPT_API_KEY.key}=$REDACTION_REPLACEMENT_TEXT" + case arg => arg + }.mkString(" ") + } + } +} diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala index b8146c4d2b6..f43adfbc216 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala @@ -21,15 +21,15 @@ import java.io.{File, FilenameFilter} import java.nio.file.{Files, Paths} import scala.collection.JavaConverters._ -import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.{ArrayBuffer, ListBuffer} import com.google.common.annotations.VisibleForTesting import org.apache.kyuubi._ -import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.{KyuubiConf, KyuubiReservedKeys} import org.apache.kyuubi.config.KyuubiConf._ import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_USER_KEY -import org.apache.kyuubi.engine.{KyuubiApplicationManager, ProcBuilder} +import org.apache.kyuubi.engine.{ApplicationManagerInfo, KyuubiApplicationManager, ProcBuilder} import org.apache.kyuubi.engine.flink.FlinkProcessBuilder._ import org.apache.kyuubi.operation.log.OperationLog @@ -50,88 +50,178 @@ class FlinkProcessBuilder( val flinkHome: String = getEngineHome(shortName) + val flinkExecutable: String = { + Paths.get(flinkHome, "bin", FLINK_EXEC_FILE).toFile.getCanonicalPath + } + + // flink.execution.target are required in Kyuubi conf currently + val executionTarget: Option[String] = conf.getOption("flink.execution.target") + override protected def module: String = "kyuubi-flink-sql-engine" override protected def mainClass: String = "org.apache.kyuubi.engine.flink.FlinkSQLEngine" override def env: Map[String, String] = conf.getEnvs + - (FLINK_PROXY_USER_KEY -> proxyUser) + ("FLINK_CONF_DIR" -> conf.getEnvs.getOrElse( + "FLINK_CONF_DIR", + s"$flinkHome${File.separator}conf")) + + override def clusterManager(): Option[String] = { + executionTarget match { + case Some("yarn-application") => Some("yarn") + case _ => None + } + } + + override def appMgrInfo(): ApplicationManagerInfo = { + ApplicationManagerInfo(clusterManager()) + } override protected val commands: Array[String] = { KyuubiApplicationManager.tagApplication(engineRefId, shortName, clusterManager(), conf) - val buffer = new ArrayBuffer[String]() - buffer += executable - - val memory = conf.get(ENGINE_FLINK_MEMORY) - buffer += s"-Xmx$memory" - val javaOptions = conf.get(ENGINE_FLINK_JAVA_OPTIONS) - if (javaOptions.isDefined) { - buffer += javaOptions.get - } + // unset engine credentials because Flink doesn't support them at the moment + conf.unset(KyuubiReservedKeys.KYUUBI_ENGINE_CREDENTIALS_KEY) + // flink.execution.target are required in Kyuubi conf currently + executionTarget match { + case Some("yarn-application") => + val buffer = new ArrayBuffer[String]() + buffer += flinkExecutable + buffer += "run-application" + + val flinkExtraJars = new ListBuffer[String] + // locate flink sql jars + val flinkSqlJars = Paths.get(flinkHome) + .resolve("opt") + .toFile + .listFiles(new FilenameFilter { + override def accept(dir: File, name: String): Boolean = { + name.toLowerCase.startsWith("flink-sql-client") || + name.toLowerCase.startsWith("flink-sql-gateway") + } + }).map(f => f.getAbsolutePath).sorted + flinkExtraJars ++= flinkSqlJars + + val userJars = conf.get(ENGINE_FLINK_APPLICATION_JARS) + userJars.foreach(jars => flinkExtraJars ++= jars.split(",")) + + val hiveConfDirOpt = env.get("HIVE_CONF_DIR") + hiveConfDirOpt.foreach { hiveConfDir => + val hiveConfFile = Paths.get(hiveConfDir).resolve("hive-site.xml") + if (!Files.exists(hiveConfFile)) { + throw new KyuubiException(s"The file $hiveConfFile does not exists. " + + s"Please put hive-site.xml when HIVE_CONF_DIR env $hiveConfDir is configured.") + } + flinkExtraJars += s"$hiveConfFile" + } - buffer += "-cp" - val classpathEntries = new java.util.LinkedHashSet[String] - // flink engine runtime jar - mainResource.foreach(classpathEntries.add) - // flink sql client jar - val flinkSqlClientPath = Paths.get(flinkHome) - .resolve("opt") - .toFile - .listFiles(new FilenameFilter { - override def accept(dir: File, name: String): Boolean = { - name.toLowerCase.startsWith("flink-sql-client") + buffer += "-t" + buffer += "yarn-application" + buffer += s"-Dyarn.ship-files=${flinkExtraJars.mkString(";")}" + buffer += s"-Dyarn.application.name=${conf.getOption(APP_KEY).get}" + buffer += s"-Dyarn.tags=${conf.getOption(YARN_TAG_KEY).get}" + buffer += "-Dcontainerized.master.env.FLINK_CONF_DIR=." + + hiveConfDirOpt.foreach { _ => + buffer += "-Dcontainerized.master.env.HIVE_CONF_DIR=." } - }).head.getAbsolutePath - classpathEntries.add(flinkSqlClientPath) - - // jars from flink lib - classpathEntries.add(s"$flinkHome${File.separator}lib${File.separator}*") - - // classpath contains flink configurations, default to flink.home/conf - classpathEntries.add(env.getOrElse("FLINK_CONF_DIR", s"$flinkHome${File.separator}conf")) - // classpath contains hadoop configurations - env.get("HADOOP_CONF_DIR").foreach(classpathEntries.add) - env.get("YARN_CONF_DIR").foreach(classpathEntries.add) - env.get("HBASE_CONF_DIR").foreach(classpathEntries.add) - val hadoopCp = env.get(FLINK_HADOOP_CLASSPATH_KEY) - hadoopCp.foreach(classpathEntries.add) - val extraCp = conf.get(ENGINE_FLINK_EXTRA_CLASSPATH) - extraCp.foreach(classpathEntries.add) - if (hadoopCp.isEmpty && extraCp.isEmpty) { - warn(s"The conf of ${FLINK_HADOOP_CLASSPATH_KEY} and ${ENGINE_FLINK_EXTRA_CLASSPATH.key}" + - s" is empty.") - debug("Detected development environment") - mainResource.foreach { path => - val devHadoopJars = Paths.get(path).getParent - .resolve(s"scala-$SCALA_COMPILE_VERSION") - .resolve("jars") - if (!Files.exists(devHadoopJars)) { - throw new KyuubiException(s"The path $devHadoopJars does not exists. " + - s"Please set ${FLINK_HADOOP_CLASSPATH_KEY} or ${ENGINE_FLINK_EXTRA_CLASSPATH.key} " + - s"for configuring location of hadoop client jars, etc") + + val customFlinkConf = conf.getAllWithPrefix("flink", "") + customFlinkConf.filter(_._1 != "app.name").foreach { case (k, v) => + buffer += s"-D$k=$v" } - classpathEntries.add(s"$devHadoopJars${File.separator}*") - } - } - buffer += classpathEntries.asScala.mkString(File.pathSeparator) - buffer += mainClass - buffer += "--conf" - buffer += s"$KYUUBI_SESSION_USER_KEY=$proxyUser" + buffer += "-c" + buffer += s"$mainClass" + buffer += s"${mainResource.get}" + + buffer += "--conf" + buffer += s"$KYUUBI_SESSION_USER_KEY=$proxyUser" + conf.getAll.foreach { case (k, v) => + if (k.startsWith("kyuubi.")) { + buffer += "--conf" + buffer += s"$k=$v" + } + } + + buffer.toArray + + case _ => + val buffer = new ArrayBuffer[String]() + buffer += executable + + val memory = conf.get(ENGINE_FLINK_MEMORY) + buffer += s"-Xmx$memory" + val javaOptions = conf.get(ENGINE_FLINK_JAVA_OPTIONS) + if (javaOptions.isDefined) { + buffer += javaOptions.get + } + + buffer += "-cp" + val classpathEntries = new java.util.LinkedHashSet[String] + // flink engine runtime jar + mainResource.foreach(classpathEntries.add) + // flink sql jars + Paths.get(flinkHome) + .resolve("opt") + .toFile + .listFiles(new FilenameFilter { + override def accept(dir: File, name: String): Boolean = { + name.toLowerCase.startsWith("flink-sql-client") || + name.toLowerCase.startsWith("flink-sql-gateway") + } + }).sorted.foreach(jar => classpathEntries.add(jar.getAbsolutePath)) + + // jars from flink lib + classpathEntries.add(s"$flinkHome${File.separator}lib${File.separator}*") + + // classpath contains flink configurations, default to flink.home/conf + classpathEntries.add(env.getOrElse("FLINK_CONF_DIR", s"$flinkHome${File.separator}conf")) + // classpath contains hadoop configurations + env.get("HADOOP_CONF_DIR").foreach(classpathEntries.add) + env.get("YARN_CONF_DIR").foreach(classpathEntries.add) + env.get("HBASE_CONF_DIR").foreach(classpathEntries.add) + env.get("HIVE_CONF_DIR").foreach(classpathEntries.add) + val hadoopCp = env.get(FLINK_HADOOP_CLASSPATH_KEY) + hadoopCp.foreach(classpathEntries.add) + val extraCp = conf.get(ENGINE_FLINK_EXTRA_CLASSPATH) + extraCp.foreach(classpathEntries.add) + if (hadoopCp.isEmpty && extraCp.isEmpty) { + warn(s"The conf of ${FLINK_HADOOP_CLASSPATH_KEY} and " + + s"${ENGINE_FLINK_EXTRA_CLASSPATH.key} is empty.") + debug("Detected development environment.") + mainResource.foreach { path => + val devHadoopJars = Paths.get(path).getParent + .resolve(s"scala-$SCALA_COMPILE_VERSION") + .resolve("jars") + if (!Files.exists(devHadoopJars)) { + throw new KyuubiException(s"The path $devHadoopJars does not exists. " + + s"Please set ${FLINK_HADOOP_CLASSPATH_KEY} or ${ENGINE_FLINK_EXTRA_CLASSPATH.key}" + + s" for configuring location of hadoop client jars, etc.") + } + classpathEntries.add(s"$devHadoopJars${File.separator}*") + } + } + buffer += classpathEntries.asScala.mkString(File.pathSeparator) + buffer += mainClass - for ((k, v) <- conf.getAll) { - buffer += "--conf" - buffer += s"$k=$v" + buffer += "--conf" + buffer += s"$KYUUBI_SESSION_USER_KEY=$proxyUser" + + conf.getAll.foreach { case (k, v) => + buffer += "--conf" + buffer += s"$k=$v" + } + buffer.toArray } - buffer.toArray } override def shortName: String = "flink" } object FlinkProcessBuilder { - final val APP_KEY = "yarn.application.name" - final val TAG_KEY = "yarn.tags" + final val FLINK_EXEC_FILE = "flink" + final val APP_KEY = "flink.app.name" + final val YARN_TAG_KEY = "yarn.tags" final val FLINK_HADOOP_CLASSPATH_KEY = "FLINK_HADOOP_CLASSPATH" final val FLINK_PROXY_USER_KEY = "HADOOP_PROXY_USER" } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/hive/HiveProcessBuilder.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/hive/HiveProcessBuilder.scala index e86597c5cc4..61fe55887ea 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/hive/HiveProcessBuilder.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/hive/HiveProcessBuilder.scala @@ -29,7 +29,7 @@ import com.google.common.annotations.VisibleForTesting import org.apache.kyuubi._ import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf.{ENGINE_HIVE_EXTRA_CLASSPATH, ENGINE_HIVE_JAVA_OPTIONS, ENGINE_HIVE_MEMORY} -import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_USER_KEY +import org.apache.kyuubi.config.KyuubiReservedKeys.{KYUUBI_ENGINE_ID, KYUUBI_SESSION_USER_KEY} import org.apache.kyuubi.engine.{KyuubiApplicationManager, ProcBuilder} import org.apache.kyuubi.engine.hive.HiveProcessBuilder._ import org.apache.kyuubi.operation.log.OperationLog @@ -106,6 +106,8 @@ class HiveProcessBuilder( buffer += "--conf" buffer += s"$KYUUBI_SESSION_USER_KEY=$proxyUser" + buffer += "--conf" + buffer += s"$KYUUBI_ENGINE_ID=$engineRefId" for ((k, v) <- conf.getAll) { buffer += "--conf" @@ -121,4 +123,5 @@ class HiveProcessBuilder( object HiveProcessBuilder { final val HIVE_HADOOP_CLASSPATH_KEY = "HIVE_HADOOP_CLASSPATH" + final val HIVE_ENGINE_NAME = "hive.engine.name" } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/spark/SparkBatchProcessBuilder.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/spark/SparkBatchProcessBuilder.scala index 98f9ea5a335..ef159bb93ad 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/spark/SparkBatchProcessBuilder.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/spark/SparkBatchProcessBuilder.scala @@ -36,7 +36,7 @@ class SparkBatchProcessBuilder( extends SparkProcessBuilder(proxyUser, conf, batchId, extraEngineLog) { import SparkProcessBuilder._ - override protected val commands: Array[String] = { + override protected lazy val commands: Array[String] = { val buffer = new ArrayBuffer[String]() buffer += executable Option(mainClass).foreach { cla => @@ -51,14 +51,15 @@ class SparkBatchProcessBuilder( // tag batch application KyuubiApplicationManager.tagApplication(batchId, "spark", clusterManager(), batchKyuubiConf) - (batchKyuubiConf.getAll ++ sparkAppNameConf()).foreach { case (k, v) => + (batchKyuubiConf.getAll ++ + sparkAppNameConf() ++ + engineLogPathConf() ++ + appendPodNameConf(batchConf)).foreach { case (k, v) => buffer += CONF buffer += s"${convertConfigKey(k)}=$v" } - setSparkUserName(proxyUser, buffer) - buffer += PROXY_USER - buffer += proxyUser + setupKerberos(buffer) assert(mainResource.isDefined) buffer += mainResource.get @@ -77,6 +78,14 @@ class SparkBatchProcessBuilder( override protected def module: String = "kyuubi-spark-batch-submit" override def clusterManager(): Option[String] = { - batchConf.get(MASTER_KEY).orElse(defaultMaster) + batchConf.get(MASTER_KEY).orElse(super.clusterManager()) + } + + override def kubernetesContext(): Option[String] = { + batchConf.get(KUBERNETES_CONTEXT_KEY).orElse(super.kubernetesContext()) + } + + override def kubernetesNamespace(): Option[String] = { + batchConf.get(KUBERNETES_NAMESPACE_KEY).orElse(super.kubernetesNamespace()) } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/spark/SparkProcessBuilder.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/spark/SparkProcessBuilder.scala index 874a36c0016..351eddb7567 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/spark/SparkProcessBuilder.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/spark/SparkProcessBuilder.scala @@ -19,7 +19,9 @@ package org.apache.kyuubi.engine.spark import java.io.{File, IOException} import java.nio.file.Paths +import java.util.Locale +import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import com.google.common.annotations.VisibleForTesting @@ -27,11 +29,13 @@ import org.apache.hadoop.security.UserGroupInformation import org.apache.kyuubi._ import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.engine.{KyuubiApplicationManager, ProcBuilder} +import org.apache.kyuubi.engine.{ApplicationManagerInfo, KyuubiApplicationManager, ProcBuilder} import org.apache.kyuubi.engine.KubernetesApplicationOperation.{KUBERNETES_SERVICE_HOST, KUBERNETES_SERVICE_PORT} +import org.apache.kyuubi.engine.ProcBuilder.KYUUBI_ENGINE_LOG_PATH_KEY import org.apache.kyuubi.ha.HighAvailabilityConf import org.apache.kyuubi.ha.client.AuthTypes import org.apache.kyuubi.operation.log.OperationLog +import org.apache.kyuubi.util.KubernetesUtils import org.apache.kyuubi.util.Validator class SparkProcessBuilder( @@ -98,7 +102,7 @@ class SparkProcessBuilder( } } - override protected val commands: Array[String] = { + override protected lazy val commands: Array[String] = { // complete `spark.master` if absent on kubernetes completeMasterUrl(conf) @@ -115,12 +119,22 @@ class SparkProcessBuilder( == AuthTypes.KERBEROS) { allConf = allConf ++ zkAuthKeytabFileConf(allConf) } - - allConf.foreach { case (k, v) => + // pass spark engine log path to spark conf + (allConf ++ engineLogPathConf ++ appendPodNameConf(allConf)).foreach { case (k, v) => buffer += CONF buffer += s"${convertConfigKey(k)}=$v" } + setupKerberos(buffer) + + mainResource.foreach { r => buffer += r } + + buffer.toArray + } + + override protected def module: String = "kyuubi-spark-sql-engine" + + protected def setupKerberos(buffer: ArrayBuffer[String]): Unit = { // if the keytab is specified, PROXY_USER is not supported tryKeytab() match { case None => @@ -130,14 +144,8 @@ class SparkProcessBuilder( case Some(name) => setSparkUserName(name, buffer) } - - mainResource.foreach { r => buffer += r } - - buffer.toArray } - override protected def module: String = "kyuubi-spark-sql-engine" - private def tryKeytab(): Option[String] = { val principal = conf.getOption(PRINCIPAL) val keytab = conf.getOption(KEYTAB) @@ -179,26 +187,69 @@ class SparkProcessBuilder( override def shortName: String = "spark" - protected lazy val defaultMaster: Option[String] = { + protected lazy val defaultsConf: Map[String, String] = { val confDir = env.getOrElse(SPARK_CONF_DIR, s"$sparkHome${File.separator}conf") - val defaults = - try { - val confFile = new File(s"$confDir${File.separator}$SPARK_CONF_FILE_NAME") - if (confFile.exists()) { - Utils.getPropertiesFromFile(Some(confFile)) - } else { - Map.empty[String, String] + try { + val confFile = new File(s"$confDir${File.separator}$SPARK_CONF_FILE_NAME") + if (confFile.exists()) { + Utils.getPropertiesFromFile(Some(confFile)) + } else { + Map.empty[String, String] + } + } catch { + case _: Exception => + warn(s"Failed to load spark configurations from $confDir") + Map.empty[String, String] + } + } + + override def appMgrInfo(): ApplicationManagerInfo = { + ApplicationManagerInfo( + clusterManager(), + kubernetesContext(), + kubernetesNamespace()) + } + + def appendPodNameConf(conf: Map[String, String]): Map[String, String] = { + val appName = conf.getOrElse(APP_KEY, "spark") + val map = mutable.Map.newBuilder[String, String] + if (clusterManager().exists(cm => cm.toLowerCase(Locale.ROOT).startsWith("k8s"))) { + if (!conf.contains(KUBERNETES_EXECUTOR_POD_NAME_PREFIX)) { + val prefix = KubernetesUtils.generateExecutorPodNamePrefix(appName, engineRefId) + map += (KUBERNETES_EXECUTOR_POD_NAME_PREFIX -> prefix) + } + if (deployMode().exists(_.toLowerCase(Locale.ROOT) == "cluster")) { + if (!conf.contains(KUBERNETES_DRIVER_POD_NAME)) { + val name = KubernetesUtils.generateDriverPodName(appName, engineRefId) + map += (KUBERNETES_DRIVER_POD_NAME -> name) } - } catch { - case _: Exception => - warn(s"Failed to load spark configurations from $confDir") - Map.empty[String, String] } - defaults.get(MASTER_KEY) + } + map.result().toMap } override def clusterManager(): Option[String] = { - conf.getOption(MASTER_KEY).orElse(defaultMaster) + conf.getOption(MASTER_KEY).orElse(defaultsConf.get(MASTER_KEY)) + } + + def deployMode(): Option[String] = { + conf.getOption(DEPLOY_MODE_KEY).orElse(defaultsConf.get(DEPLOY_MODE_KEY)) + } + + override def isClusterMode(): Boolean = { + clusterManager().map(_.toLowerCase(Locale.ROOT)) match { + case Some(m) if m.startsWith("yarn") || m.startsWith("k8s") => + deployMode().exists(_.toLowerCase(Locale.ROOT) == "cluster") + case _ => false + } + } + + def kubernetesContext(): Option[String] = { + conf.getOption(KUBERNETES_CONTEXT_KEY).orElse(defaultsConf.get(KUBERNETES_CONTEXT_KEY)) + } + + def kubernetesNamespace(): Option[String] = { + conf.getOption(KUBERNETES_NAMESPACE_KEY).orElse(defaultsConf.get(KUBERNETES_NAMESPACE_KEY)) } override def validateConf: Unit = Validator.validateConf(conf) @@ -214,12 +265,21 @@ class SparkProcessBuilder( } } } + + private[spark] def engineLogPathConf(): Map[String, String] = { + Map(KYUUBI_ENGINE_LOG_PATH_KEY -> engineLog.getAbsolutePath) + } } object SparkProcessBuilder { final val APP_KEY = "spark.app.name" final val TAG_KEY = "spark.yarn.tags" final val MASTER_KEY = "spark.master" + final val DEPLOY_MODE_KEY = "spark.submit.deployMode" + final val KUBERNETES_CONTEXT_KEY = "spark.kubernetes.context" + final val KUBERNETES_NAMESPACE_KEY = "spark.kubernetes.namespace" + final val KUBERNETES_DRIVER_POD_NAME = "spark.kubernetes.driver.pod.name" + final val KUBERNETES_EXECUTOR_POD_NAME_PREFIX = "spark.kubernetes.executor.podNamePrefix" final val INTERNAL_RESOURCE = "spark-internal" /** diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/trino/TrinoProcessBuilder.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/trino/TrinoProcessBuilder.scala index 7b68e464aa9..041219dd0fb 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/trino/TrinoProcessBuilder.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/trino/TrinoProcessBuilder.scala @@ -27,6 +27,7 @@ import scala.collection.mutable.ArrayBuffer import com.google.common.annotations.VisibleForTesting import org.apache.kyuubi.{Logging, SCALA_COMPILE_VERSION, Utils} +import org.apache.kyuubi.Utils.REDACTION_REPLACEMENT_TEXT import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_USER_KEY @@ -108,5 +109,19 @@ class TrinoProcessBuilder( override def shortName: String = "trino" - override def toString: String = Utils.redactCommandLineArgs(conf, commands).mkString("\n") + override def toString: String = { + if (commands == null) { + super.toString() + } else { + Utils.redactCommandLineArgs(conf, commands).map { + case arg if arg.contains(ENGINE_TRINO_CONNECTION_PASSWORD.key) => + s"${ENGINE_TRINO_CONNECTION_PASSWORD.key}=$REDACTION_REPLACEMENT_TEXT" + case arg if arg.contains(ENGINE_TRINO_CONNECTION_KEYSTORE_PASSWORD.key) => + s"${ENGINE_TRINO_CONNECTION_KEYSTORE_PASSWORD.key}=$REDACTION_REPLACEMENT_TEXT" + case arg if arg.contains(ENGINE_TRINO_CONNECTION_TRUSTSTORE_PASSWORD.key) => + s"${ENGINE_TRINO_CONNECTION_TRUSTSTORE_PASSWORD.key}=$REDACTION_REPLACEMENT_TEXT" + case arg => arg + }.mkString("\n") + } + } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/events/KyuubiOperationEvent.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/events/KyuubiOperationEvent.scala index 74a3a3fad39..2a103213e5a 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/events/KyuubiOperationEvent.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/events/KyuubiOperationEvent.scala @@ -42,6 +42,8 @@ import org.apache.kyuubi.session.KyuubiSession * @param sessionId the identifier of the parent session * @param sessionUser the authenticated client user * @param sessionType the type of the parent session + * @param kyuubiInstance the parent session connection url + * @param metrics the operation metrics */ case class KyuubiOperationEvent private ( statementId: String, @@ -56,7 +58,9 @@ case class KyuubiOperationEvent private ( exception: Option[Throwable], sessionId: String, sessionUser: String, - sessionType: String) extends KyuubiEvent { + sessionType: String, + kyuubiInstance: String, + metrics: Map[String, String]) extends KyuubiEvent { // operation events are partitioned by the date when the corresponding operations are // created. @@ -85,6 +89,8 @@ object KyuubiOperationEvent { status.exception, session.handle.identifier.toString, session.user, - session.sessionType.toString) + session.sessionType.toString, + session.connectionUrl, + operation.metrics) } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/events/ServerEventHandlerRegister.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/events/ServerEventHandlerRegister.scala index 4ddee48ddfd..ca6c776ac8c 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/events/ServerEventHandlerRegister.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/events/ServerEventHandlerRegister.scala @@ -19,8 +19,9 @@ package org.apache.kyuubi.events import java.net.InetAddress import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.config.KyuubiConf.{SERVER_EVENT_JSON_LOG_PATH, SERVER_EVENT_LOGGERS} -import org.apache.kyuubi.events.handler.{EventHandler, ServerJsonLoggingEventHandler} +import org.apache.kyuubi.config.KyuubiConf._ +import org.apache.kyuubi.events.handler.{EventHandler, ServerJsonLoggingEventHandler, ServerKafkaLoggingEventHandler} +import org.apache.kyuubi.events.handler.ServerKafkaLoggingEventHandler.KAFKA_SERVER_EVENT_HANDLER_PREFIX import org.apache.kyuubi.util.KyuubiHadoopUtils object ServerEventHandlerRegister extends EventHandlerRegister { @@ -36,6 +37,22 @@ object ServerEventHandlerRegister extends EventHandlerRegister { kyuubiConf) } + override def createKafkaEventHandler(kyuubiConf: KyuubiConf): EventHandler[KyuubiEvent] = { + val topic = kyuubiConf.get(SERVER_EVENT_KAFKA_TOPIC).getOrElse { + throw new IllegalArgumentException(s"${SERVER_EVENT_KAFKA_TOPIC.key} must be configured") + } + val closeTimeoutInMs = kyuubiConf.get(SERVER_EVENT_KAFKA_CLOSE_TIMEOUT) + val kafkaEventHandlerProducerConf = + kyuubiConf.getAllWithPrefix(KAFKA_SERVER_EVENT_HANDLER_PREFIX, "") + .filterKeys( + !List(SERVER_EVENT_KAFKA_TOPIC, SERVER_EVENT_KAFKA_CLOSE_TIMEOUT).map(_.key).contains(_)) + ServerKafkaLoggingEventHandler( + topic, + kafkaEventHandlerProducerConf, + kyuubiConf, + closeTimeoutInMs) + } + override protected def getLoggers(conf: KyuubiConf): Seq[String] = { conf.get(SERVER_EVENT_LOGGERS) } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/datalake/HudiOperationSuite.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandler.scala similarity index 62% rename from kyuubi-server/src/test/scala/org/apache/kyuubi/operation/datalake/HudiOperationSuite.scala rename to kyuubi-server/src/main/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandler.scala index 0c507504dea..08f8b0d7944 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/datalake/HudiOperationSuite.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandler.scala @@ -15,20 +15,17 @@ * limitations under the License. */ -package org.apache.kyuubi.operation.datalake +package org.apache.kyuubi.events.handler -import org.apache.kyuubi.WithKyuubiServer import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.operation.HudiMetadataTests -import org.apache.kyuubi.tags.HudiTest -@HudiTest -class HudiOperationSuite extends WithKyuubiServer with HudiMetadataTests { - override protected val conf: KyuubiConf = { - val kyuubiConf = KyuubiConf().set(KyuubiConf.ENGINE_IDLE_TIMEOUT, 20000L) - extraConfigs.foreach { case (k, v) => kyuubiConf.set(k, v) } - kyuubiConf - } +case class ServerKafkaLoggingEventHandler( + topic: String, + producerConf: Iterable[(String, String)], + kyuubiConf: KyuubiConf, + closeTimeoutInMs: Long) + extends KafkaLoggingEventHandler(topic, producerConf, kyuubiConf, closeTimeoutInMs) - override def jdbcUrl: String = getJdbcUrl +object ServerKafkaLoggingEventHandler { + val KAFKA_SERVER_EVENT_HANDLER_PREFIX = "kyuubi.backend.server.event.kafka" } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala index f061d977da7..779dc48ae6a 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala @@ -26,7 +26,7 @@ import com.codahale.metrics.MetricRegistry import com.google.common.annotations.VisibleForTesting import org.apache.hive.service.rpc.thrift._ -import org.apache.kyuubi.{KyuubiException, KyuubiSQLException} +import org.apache.kyuubi.{KyuubiException, KyuubiSQLException, Utils} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.engine.{ApplicationInfo, ApplicationState, KillResponse, ProcBuilder} import org.apache.kyuubi.engine.spark.SparkBatchProcessBuilder @@ -36,7 +36,7 @@ import org.apache.kyuubi.operation.FetchOrientation.FetchOrientation import org.apache.kyuubi.operation.OperationState.{isTerminal, CANCELED, OperationState, RUNNING} import org.apache.kyuubi.operation.log.OperationLog import org.apache.kyuubi.server.metadata.api.Metadata -import org.apache.kyuubi.session.KyuubiBatchSessionImpl +import org.apache.kyuubi.session.KyuubiBatchSession /** * The state of batch operation is special. In general, the lifecycle of state is: @@ -51,14 +51,14 @@ import org.apache.kyuubi.session.KyuubiBatchSessionImpl * user close the batch session that means the final status is CANCELED. */ class BatchJobSubmission( - session: KyuubiBatchSessionImpl, + session: KyuubiBatchSession, val batchType: String, val batchName: String, resource: String, className: String, batchConf: Map[String, String], batchArgs: Seq[String], - recoveryMetadata: Option[Metadata]) + metadata: Option[Metadata]) extends KyuubiApplicationOperation(session) { import BatchJobSubmission._ @@ -71,16 +71,16 @@ class BatchJobSubmission( private[kyuubi] val batchId: String = session.handle.identifier.toString @volatile private var _applicationInfo: Option[ApplicationInfo] = None - def getOrFetchCurrentApplicationInfo: Option[ApplicationInfo] = _applicationInfo match { - case Some(_) => _applicationInfo - case None => currentApplicationInfo - } + def getApplicationInfo: Option[ApplicationInfo] = _applicationInfo private var killMessage: KillResponse = (false, "UNKNOWN") def getKillMessage: KillResponse = killMessage - @volatile private var _appStartTime = recoveryMetadata.map(_.engineOpenTime).getOrElse(0L) + @volatile private var _appStartTime = metadata.map(_.engineOpenTime).getOrElse(0L) def appStartTime: Long = _appStartTime + def appStarted: Boolean = _appStartTime > 0 + + private lazy val _submitTime = if (appStarted) _appStartTime else System.currentTimeMillis @VisibleForTesting private[kyuubi] val builder: ProcBuilder = { @@ -102,12 +102,17 @@ class BatchJobSubmission( } } - override protected def currentApplicationInfo: Option[ApplicationInfo] = { - if (isTerminal(state) && _applicationInfo.nonEmpty) return _applicationInfo - // only the ApplicationInfo with non-empty id is valid for the operation + override def currentApplicationInfo(): Option[ApplicationInfo] = { + if (isTerminal(state) && _applicationInfo.map(_.state).exists(ApplicationState.isTerminated)) { + return _applicationInfo + } val applicationInfo = - applicationManager.getApplicationInfo(builder.clusterManager(), batchId).filter(_.id != null) - applicationInfo.foreach { _ => + applicationManager.getApplicationInfo( + builder.appMgrInfo(), + batchId, + Some(session.user), + Some(_submitTime)) + applicationId(applicationInfo).foreach { _ => if (_appStartTime <= 0) { _appStartTime = System.currentTimeMillis() } @@ -115,8 +120,12 @@ class BatchJobSubmission( applicationInfo } + private def applicationId(applicationInfo: Option[ApplicationInfo]): Option[String] = { + applicationInfo.filter(_.id != null).map(_.id).orElse(None) + } + private[kyuubi] def killBatchApplication(): KillResponse = { - applicationManager.killApplication(builder.clusterManager(), batchId) + applicationManager.killApplication(builder.appMgrInfo(), batchId, Some(session.user)) } private val applicationCheckInterval = @@ -124,31 +133,26 @@ class BatchJobSubmission( private val applicationStarvationTimeout = session.sessionConf.get(KyuubiConf.BATCH_APPLICATION_STARVATION_TIMEOUT) + private val applicationStartupDestroyTimeout = + session.sessionConf.get(KyuubiConf.SESSION_ENGINE_STARTUP_DESTROY_TIMEOUT) + private def updateBatchMetadata(): Unit = { - val endTime = - if (isTerminalState(state)) { - lastAccessTime - } else { - 0L - } + val endTime = if (isTerminalState(state)) lastAccessTime else 0L - if (isTerminalState(state)) { - if (_applicationInfo.isEmpty) { - _applicationInfo = - Option(ApplicationInfo(id = null, name = null, state = ApplicationState.NOT_FOUND)) - } + if (isTerminalState(state) && _applicationInfo.isEmpty) { + _applicationInfo = Some(ApplicationInfo.NOT_FOUND) } - _applicationInfo.foreach { status => + _applicationInfo.foreach { appInfo => val metadataToUpdate = Metadata( identifier = batchId, state = state.toString, engineOpenTime = appStartTime, - engineId = status.id, - engineName = status.name, - engineUrl = status.url.orNull, - engineState = status.state.toString, - engineError = status.error, + engineId = appInfo.id, + engineName = appInfo.name, + engineUrl = appInfo.url.orNull, + engineState = appInfo.state.toString, + engineError = appInfo.error, endTime = endTime) session.sessionManager.updateMetadata(metadataToUpdate) } @@ -157,11 +161,11 @@ class BatchJobSubmission( override def getOperationLog: Option[OperationLog] = Option(_operationLog) // we can not set to other state if it is canceled - private def setStateIfNotCanceled(newState: OperationState): Unit = state.synchronized { + private def setStateIfNotCanceled(newState: OperationState): Unit = withLockRequired { if (state != CANCELED) { setState(newState) - _applicationInfo.filter(_.id != null).foreach { ai => - session.getSessionEvent.foreach(_.engineId = ai.id) + applicationId(_applicationInfo).foreach { appId => + session.getSessionEvent.foreach(_.engineId = appId) } if (newState == RUNNING) { session.onEngineOpened() @@ -182,31 +186,27 @@ class BatchJobSubmission( override protected def runInternal(): Unit = session.handleSessionException { val asyncOperation: Runnable = () => { try { - if (recoveryMetadata.exists(_.peerInstanceClosed)) { - setState(OperationState.CANCELED) - } else { - // If it is in recovery mode, only re-submit batch job if previous state is PENDING and - // fail to fetch the status including appId from resource manager. Otherwise, monitor the - // submitted batch application. - recoveryMetadata.map { metadata => - if (metadata.state == OperationState.PENDING.toString) { - _applicationInfo = currentApplicationInfo - _applicationInfo.map(_.id) match { - case Some(null) => - submitAndMonitorBatchJob() - case Some(appId) => - monitorBatchJob(appId) - case None => - submitAndMonitorBatchJob() - } - } else { - monitorBatchJob(metadata.engineId) + metadata match { + case Some(metadata) if metadata.peerInstanceClosed => + setState(OperationState.CANCELED) + case Some(metadata) if metadata.state == OperationState.PENDING.toString => + // case 1: new batch job created using batch impl v2 + // case 2: batch job from recovery, do submission only when previous state is + // PENDING and fail to fetch the status by appId from resource manager, which + // is similar with case 1; otherwise, monitor the submitted batch application. + _applicationInfo = currentApplicationInfo() + applicationId(_applicationInfo) match { + case None => submitAndMonitorBatchJob() + case Some(appId) => monitorBatchJob(appId) } - }.getOrElse { + case Some(metadata) => + // batch job from recovery which was submitted + monitorBatchJob(metadata.engineId) + case None => + // brand-new job created using batch impl v1 submitAndMonitorBatchJob() - } - setStateIfNotCanceled(OperationState.FINISHED) } + setStateIfNotCanceled(OperationState.FINISHED) } catch { onError() } finally { @@ -232,10 +232,11 @@ class BatchJobSubmission( try { info(s"Submitting $batchType batch[$batchId] job:\n$builder") val process = builder.start - _applicationInfo = currentApplicationInfo while (!applicationFailed(_applicationInfo) && process.isAlive) { + updateApplicationInfoMetadataIfNeeded() if (!appStatusFirstUpdated) { - if (_applicationInfo.isDefined) { + // only the ApplicationInfo with non-empty id indicates that batch is RUNNING + if (applicationId(_applicationInfo).isDefined) { setStateIfNotCanceled(OperationState.RUNNING) updateBatchMetadata() appStatusFirstUpdated = true @@ -249,25 +250,41 @@ class BatchJobSubmission( } } process.waitFor(applicationCheckInterval, TimeUnit.MILLISECONDS) - _applicationInfo = currentApplicationInfo } if (applicationFailed(_applicationInfo)) { - process.destroyForcibly() - throw new RuntimeException(s"Batch job failed: ${_applicationInfo}") + Utils.terminateProcess(process, applicationStartupDestroyTimeout) + throw new KyuubiException(s"Batch job failed: ${_applicationInfo}") } else { process.waitFor() if (process.exitValue() != 0) { throw new KyuubiException(s"Process exit with value ${process.exitValue()}") } - Option(_applicationInfo.map(_.id)).foreach { + while (!appStarted && applicationId(_applicationInfo).isEmpty && + !applicationTerminated(_applicationInfo)) { + Thread.sleep(applicationCheckInterval) + updateApplicationInfoMetadataIfNeeded() + } + + applicationId(_applicationInfo) match { case Some(appId) => monitorBatchJob(appId) - case _ => + case None if !appStarted => + throw new KyuubiException(s"$batchType batch[$batchId] job failed: ${_applicationInfo}") + case None => } } } finally { - builder.close() + val waitCompletion = batchConf.get(KyuubiConf.SESSION_ENGINE_STARTUP_WAIT_COMPLETION.key) + .map(_.toBoolean).getOrElse( + session.sessionConf.get(KyuubiConf.SESSION_ENGINE_STARTUP_WAIT_COMPLETION)) + val destroyProcess = !waitCompletion && builder.isClusterMode() + if (destroyProcess) { + info("Destroy the builder process because waitCompletion is false" + + " and the engine is running in cluster mode.") + } + builder.close(destroyProcess) + updateApplicationInfoMetadataIfNeeded() cleanupUploadedResourceIfNeeded() } } @@ -275,29 +292,37 @@ class BatchJobSubmission( private def monitorBatchJob(appId: String): Unit = { info(s"Monitoring submitted $batchType batch[$batchId] job: $appId") if (_applicationInfo.isEmpty) { - _applicationInfo = currentApplicationInfo + _applicationInfo = currentApplicationInfo() } if (state == OperationState.PENDING) { setStateIfNotCanceled(OperationState.RUNNING) } if (_applicationInfo.isEmpty) { info(s"The $batchType batch[$batchId] job: $appId not found, assume that it has finished.") - } else if (applicationFailed(_applicationInfo)) { - throw new RuntimeException(s"$batchType batch[$batchId] job failed: ${_applicationInfo}") - } else { - updateBatchMetadata() - // TODO: add limit for max batch job submission lifetime - while (_applicationInfo.isDefined && !applicationTerminated(_applicationInfo)) { - Thread.sleep(applicationCheckInterval) - val newApplicationStatus = currentApplicationInfo - if (newApplicationStatus.map(_.state) != _applicationInfo.map(_.state)) { - _applicationInfo = newApplicationStatus - info(s"Batch report for $batchId, ${_applicationInfo}") - } - } + return + } + if (applicationFailed(_applicationInfo)) { + throw new KyuubiException(s"$batchType batch[$batchId] job failed: ${_applicationInfo}") + } + updateBatchMetadata() + // TODO: add limit for max batch job submission lifetime + while (_applicationInfo.isDefined && !applicationTerminated(_applicationInfo)) { + Thread.sleep(applicationCheckInterval) + updateApplicationInfoMetadataIfNeeded() + } + if (applicationFailed(_applicationInfo)) { + throw new KyuubiException(s"$batchType batch[$batchId] job failed: ${_applicationInfo}") + } + } - if (applicationFailed(_applicationInfo)) { - throw new RuntimeException(s"$batchType batch[$batchId] job failed: ${_applicationInfo}") + private def updateApplicationInfoMetadataIfNeeded(): Unit = { + if (applicationId(_applicationInfo).isEmpty || + !_applicationInfo.map(_.state).exists(ApplicationState.isTerminated)) { + val newApplicationStatus = currentApplicationInfo() + if (newApplicationStatus.map(_.state) != _applicationInfo.map(_.state)) { + _applicationInfo = newApplicationStatus + updateBatchMetadata() + info(s"Batch report for $batchId, ${_applicationInfo}") } } } @@ -312,7 +337,7 @@ class BatchJobSubmission( } } - override def close(): Unit = state.synchronized { + override def close(): Unit = withLockRequired { if (!isClosedOrCanceled) { try { getOperationLog.foreach(_.close()) @@ -325,14 +350,14 @@ class BatchJobSubmission( // fast fail if (isTerminalState(state)) { killMessage = (false, s"batch $batchId is already terminal so can not kill it.") - builder.close() + builder.close(true) cleanupUploadedResourceIfNeeded() return } try { killMessage = killBatchApplication() - builder.close() + builder.close(true) cleanupUploadedResourceIfNeeded() } finally { if (state == OperationState.INITIALIZED) { diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/ExecutedCommandExec.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/ExecutedCommandExec.scala index 98065b8cbaf..70b727e5e67 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/ExecutedCommandExec.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/ExecutedCommandExec.scala @@ -17,7 +17,7 @@ package org.apache.kyuubi.operation -import org.apache.hive.service.rpc.thrift.{TGetResultSetMetadataResp, TRowSet} +import org.apache.hive.service.rpc.thrift.{TFetchResultsResp, TGetResultSetMetadataResp} import org.apache.kyuubi.operation.FetchOrientation.FetchOrientation import org.apache.kyuubi.operation.log.OperationLog @@ -67,11 +67,17 @@ class ExecutedCommandExec( if (!shouldRunAsync) getBackgroundHandle.get() } - override def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet = { + override def getNextRowSetInternal( + order: FetchOrientation, + rowSetSize: Int): TFetchResultsResp = { validateDefaultFetchOrientation(order) assertState(OperationState.FINISHED) setHasResultSet(true) - command.getNextRowSet(order, rowSetSize, getProtocolVersion) + val rowSet = command.getNextRowSet(order, rowSetSize, getProtocolVersion) + val resp = new TFetchResultsResp(OK_STATUS) + resp.setResults(rowSet) + resp.setHasMoreRows(false) + resp } override def getResultSetMetadata: TGetResultSetMetadataResp = { diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiApplicationOperation.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiApplicationOperation.scala index b864f0101ef..93929c59cce 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiApplicationOperation.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiApplicationOperation.scala @@ -22,7 +22,7 @@ import java.util.{ArrayList => JArrayList} import scala.collection.JavaConverters._ -import org.apache.hive.service.rpc.thrift.{TColumn, TColumnDesc, TGetResultSetMetadataResp, TPrimitiveTypeEntry, TRow, TRowSet, TStringColumn, TTableSchema, TTypeDesc, TTypeEntry, TTypeId} +import org.apache.hive.service.rpc.thrift.{TColumn, TColumnDesc, TFetchResultsResp, TGetResultSetMetadataResp, TPrimitiveTypeEntry, TRow, TRowSet, TStringColumn, TTableSchema, TTypeDesc, TTypeEntry, TTypeId} import org.apache.kyuubi.engine.ApplicationInfo import org.apache.kyuubi.operation.FetchOrientation.FetchOrientation @@ -31,7 +31,11 @@ import org.apache.kyuubi.util.ThriftUtils abstract class KyuubiApplicationOperation(session: Session) extends KyuubiOperation(session) { - protected def currentApplicationInfo: Option[ApplicationInfo] + protected def currentApplicationInfo(): Option[ApplicationInfo] + + protected def applicationInfoMap: Option[Map[String, String]] = { + currentApplicationInfo().map(_.toMap) + } override def getResultSetMetadata: TGetResultSetMetadataResp = { val schema = new TTableSchema() @@ -50,8 +54,11 @@ abstract class KyuubiApplicationOperation(session: Session) extends KyuubiOperat resp } - override def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet = { - currentApplicationInfo.map(_.toMap).map { state => + override def getNextRowSetInternal( + order: FetchOrientation, + rowSetSize: Int): TFetchResultsResp = { + val resp = new TFetchResultsResp(OK_STATUS) + val rowSet = applicationInfoMap.map { state => val tRow = new TRowSet(0, new JArrayList[TRow](state.size)) Seq(state.keys, state.values.map(Option(_).getOrElse(""))).map(_.toSeq.asJava).foreach { col => @@ -60,5 +67,8 @@ abstract class KyuubiApplicationOperation(session: Session) extends KyuubiOperat } tRow }.getOrElse(ThriftUtils.EMPTY_ROW_SET) + resp.setResults(rowSet) + resp.setHasMoreRows(false) + resp } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiOperation.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiOperation.scala index 106a11e4b25..83e19cb6579 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiOperation.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiOperation.scala @@ -53,13 +53,28 @@ abstract class KyuubiOperation(session: Session) extends AbstractOperation(sessi def remoteOpHandle(): TOperationHandle = _remoteOpHandle + @volatile protected var _fetchLogCount = 0L + @volatile protected var _fetchResultsCount = 0L + + protected[kyuubi] def increaseFetchLogCount(count: Int): Unit = { + _fetchLogCount += count + } + + protected[kyuubi] def increaseFetchResultsCount(count: Int): Unit = { + _fetchResultsCount += count + } + + def metrics: Map[String, String] = Map( + "fetchLogCount" -> _fetchLogCount.toString, + "fetchResultsCount" -> _fetchResultsCount.toString) + protected def verifyTStatus(tStatus: TStatus): Unit = { ThriftUtils.verifyTStatus(tStatus) } protected def onError(action: String = "operating"): PartialFunction[Throwable, Unit] = { case e: Throwable => - state.synchronized { + withLockRequired { if (isTerminalState(state)) { warn(s"Ignore exception in terminal state with $statementId", e) } else { @@ -101,14 +116,14 @@ abstract class KyuubiOperation(session: Session) extends AbstractOperation(sessi } override protected def afterRun(): Unit = { - state.synchronized { + withLockRequired { if (!isTerminalState(state)) { setState(OperationState.FINISHED) } } } - override def cancel(): Unit = state.synchronized { + override def cancel(): Unit = withLockRequired { if (!isClosedOrCanceled) { setState(OperationState.CANCELED) MetricsSystem.tracing(_.decCount(MetricRegistry.name(OPERATION_OPEN, opType))) @@ -123,17 +138,10 @@ abstract class KyuubiOperation(session: Session) extends AbstractOperation(sessi } } - override def close(): Unit = state.synchronized { + override def close(): Unit = withLockRequired { if (!isClosedOrCanceled) { setState(OperationState.CLOSED) MetricsSystem.tracing(_.decCount(MetricRegistry.name(OPERATION_OPEN, opType))) - try { - // For launch engine operation, we use OperationLog to pass engine submit log but - // at that time we do not have remoteOpHandle - getOperationLog.foreach(_.close()) - } catch { - case e: IOException => error(e.getMessage, e) - } if (_remoteOpHandle != null) { try { client.closeOperation(_remoteOpHandle) @@ -143,6 +151,13 @@ abstract class KyuubiOperation(session: Session) extends AbstractOperation(sessi } } } + try { + // For launch engine operation, we use OperationLog to pass engine submit log but + // at that time we do not have remoteOpHandle + getOperationLog.foreach(_.close()) + } catch { + case e: IOException => error(e.getMessage, e) + } } override def getResultSetMetadata: TGetResultSetMetadataResp = { @@ -164,11 +179,17 @@ abstract class KyuubiOperation(session: Session) extends AbstractOperation(sessi } } - override def getNextRowSet(order: FetchOrientation, rowSetSize: Int): TRowSet = { + override def getNextRowSetInternal( + order: FetchOrientation, + rowSetSize: Int): TFetchResultsResp = { validateDefaultFetchOrientation(order) assertState(OperationState.FINISHED) setHasResultSet(true) - client.fetchResults(_remoteOpHandle, order, rowSetSize, fetchLog = false) + val rowset = client.fetchResults(_remoteOpHandle, order, rowSetSize, fetchLog = false) + val resp = new TFetchResultsResp(OK_STATUS) + resp.setResults(rowset) + resp.setHasMoreRows(false) + resp } override def shouldRunAsync: Boolean = false diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiOperationManager.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiOperationManager.scala index dd4889653cf..739c99cd78a 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiOperationManager.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiOperationManager.scala @@ -19,7 +19,7 @@ package org.apache.kyuubi.operation import java.util.concurrent.TimeUnit -import org.apache.hive.service.rpc.thrift.TRowSet +import org.apache.hive.service.rpc.thrift.{TFetchResultsResp, TStatus, TStatusCode} import org.apache.kyuubi.KyuubiSQLException import org.apache.kyuubi.config.KyuubiConf @@ -28,7 +28,7 @@ import org.apache.kyuubi.metrics.MetricsConstants.OPERATION_OPEN import org.apache.kyuubi.metrics.MetricsSystem import org.apache.kyuubi.operation.FetchOrientation.FetchOrientation import org.apache.kyuubi.server.metadata.api.Metadata -import org.apache.kyuubi.session.{KyuubiBatchSessionImpl, KyuubiSessionImpl, Session} +import org.apache.kyuubi.session.{KyuubiBatchSession, KyuubiSessionImpl, Session} import org.apache.kyuubi.sql.plan.command.RunnableCommand import org.apache.kyuubi.util.ThriftUtils @@ -74,14 +74,14 @@ class KyuubiOperationManager private (name: String) extends OperationManager(nam } def newBatchJobSubmissionOperation( - session: KyuubiBatchSessionImpl, + session: KyuubiBatchSession, batchType: String, batchName: String, resource: String, className: String, batchConf: Map[String, String], batchArgs: Seq[String], - recoveryMetadata: Option[Metadata]): BatchJobSubmission = { + metadata: Option[Metadata]): BatchJobSubmission = { val operation = new BatchJobSubmission( session, batchType, @@ -90,7 +90,7 @@ class KyuubiOperationManager private (name: String) extends OperationManager(nam className, batchConf, batchArgs, - recoveryMetadata) + metadata) addOperation(operation) operation } @@ -212,12 +212,12 @@ class KyuubiOperationManager private (name: String) extends OperationManager(nam override def getOperationLogRowSet( opHandle: OperationHandle, order: FetchOrientation, - maxRows: Int): TRowSet = { - + maxRows: Int): TFetchResultsResp = { + val resp = new TFetchResultsResp(new TStatus(TStatusCode.SUCCESS_STATUS)) val operation = getOperation(opHandle).asInstanceOf[KyuubiOperation] val operationLog = operation.getOperationLog - operationLog match { - case Some(log) => log.read(maxRows) + val rowSet = operationLog match { + case Some(log) => log.read(order, maxRows) case None => val remoteHandle = operation.remoteOpHandle() val client = operation.client @@ -227,6 +227,9 @@ class KyuubiOperationManager private (name: String) extends OperationManager(nam ThriftUtils.EMPTY_ROW_SET } } + resp.setResults(rowSet) + resp.setHasMoreRows(false) + resp } override def start(): Unit = synchronized { diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/LaunchEngine.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/LaunchEngine.scala index 3d9b4937fd5..758dccb9d1b 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/LaunchEngine.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/LaunchEngine.scala @@ -33,7 +33,7 @@ class LaunchEngine(session: KyuubiSessionImpl, override val shouldRunAsync: Bool } override def getOperationLog: Option[OperationLog] = Option(_operationLog) - override protected def currentApplicationInfo: Option[ApplicationInfo] = { + override protected def currentApplicationInfo(): Option[ApplicationInfo] = { Option(client).map { cli => ApplicationInfo( cli.engineId.orNull, @@ -68,4 +68,9 @@ class LaunchEngine(session: KyuubiSessionImpl, override val shouldRunAsync: Bool if (!shouldRunAsync) getBackgroundHandle.get() } + + override protected def applicationInfoMap: Option[Map[String, String]] = { + super.applicationInfoMap.map { _ + ("refId" -> session.engine.getEngineRefId()) } + } + } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/plugin/PluginLoader.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/plugin/PluginLoader.scala index 17ad6952425..da4c8e4a9d1 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/plugin/PluginLoader.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/plugin/PluginLoader.scala @@ -21,6 +21,7 @@ import scala.util.control.NonFatal import org.apache.kyuubi.KyuubiException import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.util.reflect.DynConstructors private[kyuubi] object PluginLoader { @@ -31,8 +32,7 @@ private[kyuubi] object PluginLoader { } try { - Class.forName(advisorClass.get).getConstructor().newInstance() - .asInstanceOf[SessionConfAdvisor] + DynConstructors.builder.impl(advisorClass.get).buildChecked[SessionConfAdvisor].newInstance() } catch { case _: ClassCastException => throw new KyuubiException( @@ -45,8 +45,7 @@ private[kyuubi] object PluginLoader { def loadGroupProvider(conf: KyuubiConf): GroupProvider = { val groupProviderClass = conf.get(KyuubiConf.GROUP_PROVIDER) try { - Class.forName(groupProviderClass).getConstructor().newInstance() - .asInstanceOf[GroupProvider] + DynConstructors.builder().impl(groupProviderClass).buildChecked[GroupProvider]().newInstance() } catch { case _: ClassCastException => throw new KyuubiException( diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/BackendServiceMetric.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/BackendServiceMetric.scala index 68bf11d7f99..9da4b78c036 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/BackendServiceMetric.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/BackendServiceMetric.scala @@ -20,7 +20,7 @@ package org.apache.kyuubi.server import org.apache.hive.service.rpc.thrift._ import org.apache.kyuubi.metrics.{MetricsConstants, MetricsSystem} -import org.apache.kyuubi.operation.{OperationHandle, OperationStatus} +import org.apache.kyuubi.operation.{KyuubiOperation, OperationHandle, OperationStatus} import org.apache.kyuubi.operation.FetchOrientation.FetchOrientation import org.apache.kyuubi.service.BackendService import org.apache.kyuubi.session.SessionHandle @@ -183,9 +183,10 @@ trait BackendServiceMetric extends BackendService { operationHandle: OperationHandle, orientation: FetchOrientation, maxRows: Int, - fetchLog: Boolean): TRowSet = { + fetchLog: Boolean): TFetchResultsResp = { MetricsSystem.timerTracing(MetricsConstants.BS_FETCH_RESULTS) { - val rowSet = super.fetchResults(operationHandle, orientation, maxRows, fetchLog) + val fetchResultsResp = super.fetchResults(operationHandle, orientation, maxRows, fetchLog) + val rowSet = fetchResultsResp.getResults // TODO: the statistics are wrong when we enabled the arrow. val rowsSize = if (rowSet.getColumnsSize > 0) { @@ -207,7 +208,17 @@ trait BackendServiceMetric extends BackendService { else MetricsConstants.BS_FETCH_RESULT_ROWS_RATE, rowsSize)) - rowSet + val operation = sessionManager.operationManager + .getOperation(operationHandle) + .asInstanceOf[KyuubiOperation] + + if (fetchLog) { + operation.increaseFetchLogCount(rowsSize) + } else { + operation.increaseFetchResultsCount(rowsSize) + } + + fetchResultsResp } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiBatchService.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiBatchService.scala new file mode 100644 index 00000000000..2bfbbce2ab7 --- /dev/null +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiBatchService.scala @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.server + +import java.util.concurrent.atomic.AtomicBoolean + +import org.apache.kyuubi.config.KyuubiConf.BATCH_SUBMITTER_THREADS +import org.apache.kyuubi.engine.ApplicationState +import org.apache.kyuubi.operation.OperationState +import org.apache.kyuubi.server.metadata.MetadataManager +import org.apache.kyuubi.service.{AbstractService, Serverable} +import org.apache.kyuubi.session.KyuubiSessionManager +import org.apache.kyuubi.util.ThreadUtils + +class KyuubiBatchService( + server: Serverable, + sessionManager: KyuubiSessionManager) + extends AbstractService(classOf[KyuubiBatchService].getSimpleName) { + + private lazy val restFrontend = server.frontendServices + .filter(_.isInstanceOf[KyuubiRestFrontendService]) + .head + + private def kyuubiInstance: String = restFrontend.connectionUrl + + // TODO expose metrics, including pending/running/succeeded/failed batches + // TODO handle dangling batches, e.g. batch is picked and changed state to pending, + // but the Server crashed before submitting or updating status to metastore + + private lazy val metadataManager: MetadataManager = sessionManager.metadataManager.get + private val running: AtomicBoolean = new AtomicBoolean(false) + private lazy val batchExecutor = ThreadUtils + .newDaemonFixedThreadPool(conf.get(BATCH_SUBMITTER_THREADS), "kyuubi-batch-submitter") + + def cancelUnscheduledBatch(batchId: String): Boolean = { + metadataManager.cancelUnscheduledBatch(batchId) + } + + def countBatch( + batchType: String, + batchUser: Option[String], + batchState: Option[String] = None, + kyuubiInstance: Option[String] = None): Int = { + metadataManager.countBatch( + batchType, + batchUser.orNull, + batchState.orNull, + kyuubiInstance.orNull) + } + + override def start(): Unit = { + assert(running.compareAndSet(false, true)) + val submitTask: Runnable = () => { + while (running.get) { + metadataManager.pickBatchForSubmitting(kyuubiInstance) match { + case None => Thread.sleep(1000) + case Some(metadata) => + val batchId = metadata.identifier + info(s"$batchId is picked for submission.") + val batchSession = sessionManager.createBatchSession( + metadata.username, + "anonymous", + metadata.ipAddress, + metadata.requestConf, + metadata.engineType, + Option(metadata.requestName), + metadata.resource, + metadata.className, + metadata.requestArgs, + Some(metadata), + fromRecovery = false) + sessionManager.openBatchSession(batchSession) + var submitted = false + while (!submitted) { // block until batch job submitted + submitted = metadataManager.getBatchSessionMetadata(batchId) match { + case Some(metadata) if OperationState.isTerminal(metadata.opState) => + true + case Some(metadata) if metadata.opState == OperationState.RUNNING => + metadata.appState match { + // app that is not submitted to resource manager + case None | Some(ApplicationState.NOT_FOUND) => false + // app that is pending in resource manager + case Some(ApplicationState.PENDING) => false + // not sure, added for safe + case Some(ApplicationState.UNKNOWN) => false + case _ => true + } + case Some(_) => + false + case None => + error(s"$batchId does not existed in metastore, assume it is finished") + true + } + if (!submitted) Thread.sleep(1000) + } + info(s"$batchId is submitted or finished.") + } + } + } + (0 until batchExecutor.getCorePoolSize).foreach(_ => batchExecutor.submit(submitTask)) + super.start() + } + + override def stop(): Unit = { + super.stop() + if (running.compareAndSet(true, false)) { + ThreadUtils.shutdown(batchExecutor) + } + } +} diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiMySQLFrontendService.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiMySQLFrontendService.scala index 96a2114aa95..1a449dde4f1 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiMySQLFrontendService.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiMySQLFrontendService.scala @@ -94,7 +94,10 @@ class KyuubiMySQLFrontendService(override val serverable: Serverable) override def connectionUrl: String = { checkInitialized() - s"${serverAddr.getCanonicalHostName}:$port" + conf.get(FRONTEND_ADVERTISED_HOST) match { + case Some(advertisedHost) => s"$advertisedHost:$port" + case None => s"${serverAddr.getCanonicalHostName}:$port" + } } override def start(): Unit = synchronized { diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiRestFrontendService.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiRestFrontendService.scala index 7019d8a6a4a..28dfab731fd 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiRestFrontendService.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiRestFrontendService.scala @@ -52,32 +52,40 @@ class KyuubiRestFrontendService(override val serverable: Serverable) private def hadoopConf: Configuration = KyuubiServer.getHadoopConf() - private def sessionManager = be.sessionManager.asInstanceOf[KyuubiSessionManager] + private[kyuubi] def sessionManager = be.sessionManager.asInstanceOf[KyuubiSessionManager] private val batchChecker = ThreadUtils.newDaemonSingleThreadScheduledExecutor("batch-checker") lazy val host: String = conf.get(FRONTEND_REST_BIND_HOST) .getOrElse { - if (conf.get(KyuubiConf.FRONTEND_CONNECTION_URL_USE_HOSTNAME)) { + if (Utils.isWindows || Utils.isMac) { + warn(s"Kyuubi Server run in Windows or Mac environment, binding $getName to 0.0.0.0") + "0.0.0.0" + } else if (conf.get(KyuubiConf.FRONTEND_CONNECTION_URL_USE_HOSTNAME)) { Utils.findLocalInetAddress.getCanonicalHostName } else { Utils.findLocalInetAddress.getHostAddress } } + private lazy val port: Int = conf.get(FRONTEND_REST_BIND_PORT) + override def initialize(conf: KyuubiConf): Unit = synchronized { this.conf = conf server = JettyServer( getName, host, - conf.get(FRONTEND_REST_BIND_PORT), + port, conf.get(FRONTEND_REST_MAX_WORKER_THREADS)) super.initialize(conf) } override def connectionUrl: String = { checkInitialized() - server.getServerUri + conf.get(FRONTEND_ADVERTISED_HOST) match { + case Some(advertisedHost) => s"$advertisedHost:$port" + case None => server.getServerUri + } } private def startInternal(): Unit = { @@ -87,6 +95,9 @@ class KyuubiRestFrontendService(override val serverable: Serverable) val authenticationFactory = new KyuubiHttpAuthenticationFactory(conf) server.addHandler(authenticationFactory.httpHandlerWrapperFactory.wrapHandler(contextHandler)) + val proxyHandler = ApiRootResource.getEngineUIProxyHandler(this) + server.addHandler(authenticationFactory.httpHandlerWrapperFactory.wrapHandler(proxyHandler)) + server.addStaticHandler("org/apache/kyuubi/ui/static", "/static/") server.addRedirectHandler("/", "/static/") server.addRedirectHandler("/static", "/static/") @@ -117,7 +128,7 @@ class KyuubiRestFrontendService(override val serverable: Serverable) sessionManager.getPeerInstanceClosedBatchSessions(connectionUrl).foreach { batch => Utils.tryLogNonFatalError { val sessionHandle = SessionHandle.fromUUID(batch.identifier) - Option(sessionManager.getBatchSessionImpl(sessionHandle)).foreach(_.close()) + sessionManager.getBatchSession(sessionHandle).foreach(_.close()) } } } catch { @@ -172,16 +183,22 @@ class KyuubiRestFrontendService(override val serverable: Serverable) if (!isStarted.get) { try { server.start() - recoverBatchSessions() isStarted.set(true) - info(s"$getName has started at ${server.getServerUri}") startBatchChecker() startInternal() + // block until the HTTP server is started, otherwise, we may get + // the wrong HTTP server port -1 + while (server.getState != "STARTED") { + info(s"Waiting for $getName's HTTP server getting started") + Thread.sleep(1000) + } + recoverBatchSessions() } catch { case e: Exception => throw new KyuubiException(s"Cannot start $getName", e) } } super.start() + info(s"Exposing REST endpoint at: http://${server.getServerUri}") } override def stop(): Unit = synchronized { @@ -229,7 +246,9 @@ class KyuubiRestFrontendService(override val serverable: Serverable) realUser } else { sessionConf.get(KyuubiAuthenticationFactory.HS2_PROXY_USER).map { proxyUser => - KyuubiAuthenticationFactory.verifyProxyAccess(realUser, proxyUser, ipAddress, hadoopConf) + if (!getConf.get(KyuubiConf.SERVER_ADMINISTRATORS).contains(realUser)) { + KyuubiAuthenticationFactory.verifyProxyAccess(realUser, proxyUser, ipAddress, hadoopConf) + } proxyUser }.getOrElse(realUser) } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiServer.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiServer.scala index e81240a96eb..453ae0b7904 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiServer.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiServer.scala @@ -25,7 +25,7 @@ import org.apache.hadoop.security.UserGroupInformation import org.apache.kyuubi._ import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.config.KyuubiConf.{FRONTEND_PROTOCOLS, FrontendProtocols} +import org.apache.kyuubi.config.KyuubiConf.{BATCH_SUBMITTER_ENABLED, FRONTEND_PROTOCOLS, FrontendProtocols, KYUUBI_KUBERNETES_CONF_PREFIX} import org.apache.kyuubi.config.KyuubiConf.FrontendProtocols._ import org.apache.kyuubi.events.{EventBus, KyuubiServerInfoEvent, ServerEventHandlerRegister} import org.apache.kyuubi.ha.HighAvailabilityConf._ @@ -38,17 +38,20 @@ import org.apache.kyuubi.util.{KyuubiHadoopUtils, SignalRegister} import org.apache.kyuubi.zookeeper.EmbeddedZookeeper object KyuubiServer extends Logging { - private val zkServer = new EmbeddedZookeeper() private[kyuubi] var kyuubiServer: KyuubiServer = _ @volatile private[kyuubi] var hadoopConf: Configuration = _ def startServer(conf: KyuubiConf): KyuubiServer = { hadoopConf = KyuubiHadoopUtils.newHadoopConf(conf) + var embeddedZkServer: Option[EmbeddedZookeeper] = None if (!ServiceDiscovery.supportServiceDiscovery(conf)) { - zkServer.initialize(conf) - zkServer.start() - conf.set(HA_ADDRESSES, zkServer.getConnectString) - conf.set(HA_ZK_AUTH_TYPE, AuthTypes.NONE.toString) + embeddedZkServer = Some(new EmbeddedZookeeper()) + embeddedZkServer.foreach(zkServer => { + zkServer.initialize(conf) + zkServer.start() + conf.set(HA_ADDRESSES, zkServer.getConnectString) + conf.set(HA_ZK_AUTH_TYPE, AuthTypes.NONE.toString) + }) } val server = conf.get(KyuubiConf.SERVER_NAME) match { @@ -59,9 +62,7 @@ object KyuubiServer extends Logging { server.initialize(conf) } catch { case e: Exception => - if (zkServer.getServiceState == ServiceState.STARTED) { - zkServer.stop() - } + embeddedZkServer.filter(_.getServiceState == ServiceState.STARTED).foreach(_.stop()) throw e } server.start() @@ -111,14 +112,29 @@ object KyuubiServer extends Logging { private[kyuubi] def refreshUserDefaultsConf(): Unit = kyuubiServer.conf.synchronized { val existedUserDefaults = kyuubiServer.conf.getAllUserDefaults val refreshedUserDefaults = KyuubiConf().loadFileDefaults().getAllUserDefaults + refreshConfig("user defaults", existedUserDefaults, refreshedUserDefaults) + } + + private[kyuubi] def refreshKubernetesConf(): Unit = kyuubiServer.conf.synchronized { + val existedKubernetesConf = + kyuubiServer.conf.getAll.filter(_._1.startsWith(KYUUBI_KUBERNETES_CONF_PREFIX)) + val refreshedKubernetesConf = + KyuubiConf().loadFileDefaults().getAll.filter(_._1.startsWith(KYUUBI_KUBERNETES_CONF_PREFIX)) + refreshConfig("kubernetes", existedKubernetesConf, refreshedKubernetesConf) + } + + private def refreshConfig( + configDomain: String, + existing: Map[String, String], + refreshed: Map[String, String]): Unit = { var (unsetCount, updatedCount, addedCount) = (0, 0, 0) - for ((k, _) <- existedUserDefaults if !refreshedUserDefaults.contains(k)) { + for ((k, _) <- existing if !refreshed.contains(k)) { kyuubiServer.conf.unset(k) unsetCount = unsetCount + 1 } - for ((k, v) <- refreshedUserDefaults) { - if (existedUserDefaults.contains(k)) { - if (!StringUtils.equals(existedUserDefaults.get(k).orNull, v)) { + for ((k, v) <- refreshed) { + if (existing.contains(k)) { + if (!StringUtils.equals(existing.get(k).orNull, v)) { updatedCount = updatedCount + 1 } } else { @@ -126,17 +142,25 @@ object KyuubiServer extends Logging { } kyuubiServer.conf.set(k, v) } - info(s"Refreshed user defaults configs with changes of " + + info(s"Refreshed $configDomain configs with changes of " + s"unset: $unsetCount, updated: $updatedCount, added: $addedCount") } private[kyuubi] def refreshUnlimitedUsers(): Unit = synchronized { val sessionMgr = kyuubiServer.backendService.sessionManager.asInstanceOf[KyuubiSessionManager] - val existingUnlimitedUsers = sessionMgr.getUnlimitedUsers() + val existingUnlimitedUsers = sessionMgr.getUnlimitedUsers sessionMgr.refreshUnlimitedUsers(KyuubiConf().loadFileDefaults()) - val refreshedUnlimitedUsers = sessionMgr.getUnlimitedUsers() + val refreshedUnlimitedUsers = sessionMgr.getUnlimitedUsers info(s"Refreshed unlimited users from $existingUnlimitedUsers to $refreshedUnlimitedUsers") } + + private[kyuubi] def refreshDenyUsers(): Unit = synchronized { + val sessionMgr = kyuubiServer.backendService.sessionManager.asInstanceOf[KyuubiSessionManager] + val existingDenyUsers = sessionMgr.getDenyUsers + sessionMgr.refreshDenyUsers(KyuubiConf().loadFileDefaults()) + val refreshedDenyUsers = sessionMgr.getDenyUsers + info(s"Refreshed deny users from $existingDenyUsers to $refreshedDenyUsers") + } } class KyuubiServer(name: String) extends Serverable(name) { @@ -157,15 +181,13 @@ class KyuubiServer(name: String) extends Serverable(name) { warn("MYSQL frontend protocol is experimental.") new KyuubiMySQLFrontendService(this) case TRINO => - warn("Trio frontend protocol is experimental.") + warn("Trino frontend protocol is experimental.") new KyuubiTrinoFrontendService(this) case other => throw new UnsupportedOperationException(s"Frontend protocol $other is not supported yet.") } override def initialize(conf: KyuubiConf): Unit = synchronized { - initLoggerEventHandler(conf) - val kinit = new KinitAuxiliaryService() addService(kinit) @@ -175,7 +197,15 @@ class KyuubiServer(name: String) extends Serverable(name) { if (conf.get(MetricsConf.METRICS_ENABLED)) { addService(new MetricsSystem) } + + if (conf.isRESTEnabled && conf.get(BATCH_SUBMITTER_ENABLED)) { + addService(new KyuubiBatchService( + this, + backendService.sessionManager.asInstanceOf[KyuubiSessionManager])) + } super.initialize(conf) + + initLoggerEventHandler(conf) } override def start(): Unit = { @@ -193,5 +223,7 @@ class KyuubiServer(name: String) extends Serverable(name) { ServerEventHandlerRegister.registerEventLoggers(conf) } - override protected def stopServer(): Unit = {} + override protected def stopServer(): Unit = { + EventBus.deregisterAll() + } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiTBinaryFrontendService.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiTBinaryFrontendService.scala index 069bc63e2b0..ae388a7c42a 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiTBinaryFrontendService.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiTBinaryFrontendService.scala @@ -92,6 +92,8 @@ final class KyuubiTBinaryFrontendService( KYUUBI_SESSION_ENGINE_LAUNCH_HANDLE_SECRET, Base64.getMimeEncoder.encodeToString(opHandleIdentifier.getSecret)) + respConfiguration.put(KYUUBI_SESSION_ENGINE_LAUNCH_SUPPORT_RESULT, true.toString) + resp.setSessionHandle(sessionHandle.toTSessionHandle) resp.setConfiguration(respConfiguration) resp.setStatus(OK_STATUS) diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiTHttpFrontendService.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiTHttpFrontendService.scala index 63933aa7724..79351118c50 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiTHttpFrontendService.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiTHttpFrontendService.scala @@ -278,7 +278,11 @@ final class KyuubiTHttpFrontendService( val realUser = getShortName(Option(SessionManager.getUserName).getOrElse(req.getUsername)) // using the remote ip address instead of that in proxy http header for authentication val ipAddress: String = SessionManager.getIpAddress - val sessionUser: String = getProxyUser(req.getConfiguration, ipAddress, realUser) + val sessionUser: String = if (req.getConfiguration == null) { + realUser + } else { + getProxyUser(req.getConfiguration, ipAddress, realUser) + } debug(s"Client's real user: $realUser, session user: $sessionUser") realUser -> sessionUser } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiTrinoFrontendService.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiTrinoFrontendService.scala index 573bb948f90..95f6d590265 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiTrinoFrontendService.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiTrinoFrontendService.scala @@ -21,7 +21,7 @@ import java.util.concurrent.atomic.AtomicBoolean import org.apache.kyuubi.{KyuubiException, Utils} import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.config.KyuubiConf.{FRONTEND_TRINO_BIND_HOST, FRONTEND_TRINO_BIND_PORT, FRONTEND_TRINO_MAX_WORKER_THREADS} +import org.apache.kyuubi.config.KyuubiConf.{FRONTEND_ADVERTISED_HOST, FRONTEND_TRINO_BIND_HOST, FRONTEND_TRINO_BIND_PORT, FRONTEND_TRINO_MAX_WORKER_THREADS} import org.apache.kyuubi.server.trino.api.v1.ApiRootResource import org.apache.kyuubi.server.ui.JettyServer import org.apache.kyuubi.service.{AbstractFrontendService, Serverable, Service} @@ -46,19 +46,24 @@ class KyuubiTrinoFrontendService(override val serverable: Serverable) } } + private lazy val port: Int = conf.get(FRONTEND_TRINO_BIND_PORT) + override def initialize(conf: KyuubiConf): Unit = synchronized { this.conf = conf server = JettyServer( getName, host, - conf.get(FRONTEND_TRINO_BIND_PORT), + port, conf.get(FRONTEND_TRINO_MAX_WORKER_THREADS)) super.initialize(conf) } override def connectionUrl: String = { checkInitialized() - server.getServerUri + conf.get(FRONTEND_ADVERTISED_HOST) match { + case Some(advertisedHost) => s"$advertisedHost:$port" + case None => server.getServerUri + } } private def startInternal(): Unit = { diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/ApiUtils.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/ApiUtils.scala new file mode 100644 index 00000000000..5aaf4d7780f --- /dev/null +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/ApiUtils.scala @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.server.api + +import scala.collection.JavaConverters._ + +import org.apache.kyuubi.{Logging, Utils} +import org.apache.kyuubi.client.api.v1.dto.{OperationData, ServerData, SessionData} +import org.apache.kyuubi.events.KyuubiOperationEvent +import org.apache.kyuubi.ha.client.ServiceNodeInfo +import org.apache.kyuubi.operation.KyuubiOperation +import org.apache.kyuubi.session.KyuubiSession + +object ApiUtils extends Logging { + + def sessionData(session: KyuubiSession): SessionData = { + val sessionEvent = session.getSessionEvent + new SessionData( + session.handle.identifier.toString, + session.user, + session.ipAddress, + session.conf.asJava, + session.createTime, + session.lastAccessTime - session.createTime, + session.getNoOperationTime, + sessionEvent.flatMap(_.exception).map(Utils.prettyPrint).getOrElse(""), + session.sessionType.toString, + session.connectionUrl, + sessionEvent.map(_.engineId).getOrElse("")) + } + + def operationData(operation: KyuubiOperation): OperationData = { + val opEvent = KyuubiOperationEvent(operation) + new OperationData( + opEvent.statementId, + opEvent.statement, + opEvent.state, + opEvent.createTime, + opEvent.startTime, + opEvent.completeTime, + opEvent.exception.map(Utils.prettyPrint).getOrElse(""), + opEvent.sessionId, + opEvent.sessionUser, + opEvent.sessionType, + operation.getSession.asInstanceOf[KyuubiSession].connectionUrl, + operation.metrics.asJava) + } + + def serverData(nodeInfo: ServiceNodeInfo): ServerData = { + new ServerData( + nodeInfo.nodeName, + nodeInfo.namespace, + nodeInfo.instance, + nodeInfo.host, + nodeInfo.port, + nodeInfo.attributes.asJava, + "Running") + } + + def logAndRefineErrorMsg(errorMsg: String, throwable: Throwable): String = { + error(errorMsg, throwable) + s"$errorMsg: ${Utils.prettyPrint(throwable)}" + } +} diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/EngineUIProxyServlet.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/EngineUIProxyServlet.scala new file mode 100644 index 00000000000..021a2ad85ed --- /dev/null +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/EngineUIProxyServlet.scala @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.server.api + +import java.net.URL +import javax.servlet.http.HttpServletRequest + +import org.apache.commons.lang3.StringUtils +import org.eclipse.jetty.client.api.Request +import org.eclipse.jetty.proxy.ProxyServlet + +import org.apache.kyuubi.Logging + +private[api] class EngineUIProxyServlet extends ProxyServlet with Logging { + + override def rewriteTarget(request: HttpServletRequest): String = { + val requestURL = request.getRequestURL + val requestURI = request.getRequestURI + var targetURL = "/no-ui-error" + extractTargetAddress(requestURI).foreach { case (host, port) => + val targetURI = requestURI.stripPrefix(s"/engine-ui/$host:$port") match { + // for some reason, the proxy can not handle redirect well, as a workaround, + // we simulate the Spark UI redirection behavior and forcibly rewrite the + // empty URI to the Spark Jobs page. + case "" | "/" => "/jobs/" + case path => path + } + val targetQueryString = + Option(request.getQueryString).filter(StringUtils.isNotEmpty).map(q => s"?$q").getOrElse("") + targetURL = new URL("http", host, port, targetURI + targetQueryString).toString + } + debug(s"rewrite $requestURL => $targetURL") + targetURL + } + + override def addXForwardedHeaders( + clientRequest: HttpServletRequest, + proxyRequest: Request): Unit = { + val requestURI = clientRequest.getRequestURI + extractTargetAddress(requestURI).foreach { case (host, port) => + // SPARK-24209: Knox uses X-Forwarded-Context to notify the application the base path + proxyRequest.header("X-Forwarded-Context", s"/engine-ui/$host:$port") + } + super.addXForwardedHeaders(clientRequest, proxyRequest) + } + + private val r = "^/engine-ui/([^/:]+):(\\d+)/?.*".r + private def extractTargetAddress(requestURI: String): Option[(String, Int)] = + requestURI match { + case r(host, port) => Some(host -> port.toInt) + case _ => None + } +} diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/KyuubiScalaObjectMapper.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/KyuubiScalaObjectMapper.scala index 776c35ba731..724da120999 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/KyuubiScalaObjectMapper.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/KyuubiScalaObjectMapper.scala @@ -19,11 +19,13 @@ package org.apache.kyuubi.server.api import javax.ws.rs.ext.ContextResolver -import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper} import com.fasterxml.jackson.module.scala.DefaultScalaModule class KyuubiScalaObjectMapper extends ContextResolver[ObjectMapper] { - private val mapper = new ObjectMapper().registerModule(DefaultScalaModule) + private val mapper = new ObjectMapper() + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) + .registerModule(DefaultScalaModule) override def getContext(aClass: Class[_]): ObjectMapper = mapper } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/api.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/api.scala index deadcf9abe4..93953a577dc 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/api.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/api.scala @@ -25,7 +25,8 @@ import javax.ws.rs.ext.{ExceptionMapper, Provider} import org.eclipse.jetty.server.handler.ContextHandler -import org.apache.kyuubi.server.KyuubiRestFrontendService +import org.apache.kyuubi.Logging +import org.apache.kyuubi.server.{KyuubiBatchService, KyuubiRestFrontendService, KyuubiServer} private[api] trait ApiRequestContext { @@ -35,22 +36,28 @@ private[api] trait ApiRequestContext { @Context protected var httpRequest: HttpServletRequest = _ + protected lazy val batchService: Option[KyuubiBatchService] = + KyuubiServer.kyuubiServer.getServices + .find(_.isInstanceOf[KyuubiBatchService]) + .map(_.asInstanceOf[KyuubiBatchService]) + final protected def fe: KyuubiRestFrontendService = FrontendServiceContext.get(servletContext) } @Provider -class RestExceptionMapper extends ExceptionMapper[Exception] { +class RestExceptionMapper extends ExceptionMapper[Exception] with Logging { override def toResponse(exception: Exception): Response = { + warn("Error occurs on accessing REST API.", exception) exception match { case e: WebApplicationException => Response.status(e.getResponse.getStatus) - .`type`(e.getResponse.getMediaType) - .entity(e.getMessage) + .`type`(MediaType.APPLICATION_JSON) + .entity(Map("message" -> e.getMessage)) .build() case e => Response.status(Response.Status.INTERNAL_SERVER_ERROR) .`type`(MediaType.APPLICATION_JSON) - .entity(e.getMessage) + .entity(Map("message" -> e.getMessage)) .build() } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/AdminResource.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/AdminResource.scala index ceb7179b810..5f410ab7de9 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/AdminResource.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/AdminResource.scala @@ -27,25 +27,25 @@ import scala.collection.mutable.ListBuffer import io.swagger.v3.oas.annotations.media.{ArraySchema, Content, Schema} import io.swagger.v3.oas.annotations.responses.ApiResponse import io.swagger.v3.oas.annotations.tags.Tag -import org.apache.zookeeper.KeeperException.NoNodeException +import org.apache.commons.lang3.StringUtils import org.apache.kyuubi.{KYUUBI_VERSION, Logging, Utils} -import org.apache.kyuubi.client.api.v1.dto.{Engine, SessionData} +import org.apache.kyuubi.client.api.v1.dto._ import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ -import org.apache.kyuubi.events.KyuubiOperationEvent import org.apache.kyuubi.ha.HighAvailabilityConf.HA_NAMESPACE import org.apache.kyuubi.ha.client.{DiscoveryPaths, ServiceNodeInfo} import org.apache.kyuubi.ha.client.DiscoveryClientProvider.withDiscoveryClient import org.apache.kyuubi.operation.{KyuubiOperation, OperationHandle} import org.apache.kyuubi.server.KyuubiServer -import org.apache.kyuubi.server.api.ApiRequestContext -import org.apache.kyuubi.session.SessionHandle +import org.apache.kyuubi.server.api.{ApiRequestContext, ApiUtils} +import org.apache.kyuubi.session.{KyuubiSession, SessionHandle} +import org.apache.kyuubi.shaded.zookeeper.KeeperException.NoNodeException @Tag(name = "Admin") @Produces(Array(MediaType.APPLICATION_JSON)) private[v1] class AdminResource extends ApiRequestContext with Logging { - private lazy val administrators = fe.getConf.get(KyuubiConf.SERVER_ADMINISTRATORS).toSet + + private lazy val administrators = fe.getConf.get(KyuubiConf.SERVER_ADMINISTRATORS) + Utils.currentUser @ApiResponse( @@ -87,6 +87,25 @@ private[v1] class AdminResource extends ApiRequestContext with Logging { Response.ok(s"Refresh the user defaults conf successfully.").build() } + @ApiResponse( + responseCode = "200", + content = Array(new Content(mediaType = MediaType.APPLICATION_JSON)), + description = "refresh the kubernetes configs") + @POST + @Path("refresh/kubernetes_conf") + def refreshKubernetesConf(): Response = { + val userName = fe.getSessionUser(Map.empty[String, String]) + val ipAddress = fe.getIpAddress + info(s"Receive refresh kubernetes conf request from $userName/$ipAddress") + if (!isAdministrator(userName)) { + throw new NotAllowedException( + s"$userName is not allowed to refresh the kubernetes conf") + } + info(s"Reloading kubernetes conf") + KyuubiServer.refreshKubernetesConf() + Response.ok(s"Refresh the kubernetes conf successfully.").build() + } + @ApiResponse( responseCode = "200", content = Array(new Content(mediaType = MediaType.APPLICATION_JSON)), @@ -106,6 +125,25 @@ private[v1] class AdminResource extends ApiRequestContext with Logging { Response.ok(s"Refresh the unlimited users successfully.").build() } + @ApiResponse( + responseCode = "200", + content = Array(new Content(mediaType = MediaType.APPLICATION_JSON)), + description = "refresh the deny users") + @POST + @Path("refresh/deny_users") + def refreshDenyUser(): Response = { + val userName = fe.getSessionUser(Map.empty[String, String]) + val ipAddress = fe.getIpAddress + info(s"Receive refresh deny users request from $userName/$ipAddress") + if (!isAdministrator(userName)) { + throw new NotAllowedException( + s"$userName is not allowed to refresh the deny users") + } + info(s"Reloading deny users") + KyuubiServer.refreshDenyUsers() + Response.ok(s"Refresh the deny users successfully.").build() + } + @ApiResponse( responseCode = "200", content = Array(new Content( @@ -114,7 +152,7 @@ private[v1] class AdminResource extends ApiRequestContext with Logging { description = "get the list of all live sessions") @GET @Path("sessions") - def sessions(): Seq[SessionData] = { + def sessions(@QueryParam("users") users: String): Seq[SessionData] = { val userName = fe.getSessionUser(Map.empty[String, String]) val ipAddress = fe.getIpAddress info(s"Received listing all live sessions request from $userName/$ipAddress") @@ -122,16 +160,12 @@ private[v1] class AdminResource extends ApiRequestContext with Logging { throw new NotAllowedException( s"$userName is not allowed to list all live sessions") } - fe.be.sessionManager.allSessions().map { session => - new SessionData( - session.handle.identifier.toString, - session.user, - session.ipAddress, - session.conf.asJava, - session.createTime, - session.lastAccessTime - session.createTime, - session.getNoOperationTime) - }.toSeq + var sessions = fe.be.sessionManager.allSessions() + if (StringUtils.isNotBlank(users)) { + val usersSet = users.split(",").toSet + sessions = sessions.filter(session => usersSet.contains(session.user)) + } + sessions.map(session => ApiUtils.sessionData(session.asInstanceOf[KyuubiSession])).toSeq } @ApiResponse( @@ -157,12 +191,14 @@ private[v1] class AdminResource extends ApiRequestContext with Logging { content = Array(new Content( mediaType = MediaType.APPLICATION_JSON, array = new ArraySchema(schema = new Schema(implementation = - classOf[KyuubiOperationEvent])))), + classOf[OperationData])))), description = - "get the list of all active operation events") + "get the list of all active operations") @GET @Path("operations") - def listOperations(): Seq[KyuubiOperationEvent] = { + def listOperations( + @QueryParam("users") users: String, + @QueryParam("sessionHandle") sessionHandle: String): Seq[OperationData] = { val userName = fe.getSessionUser(Map.empty[String, String]) val ipAddress = fe.getIpAddress info(s"Received listing all of the active operations request from $userName/$ipAddress") @@ -170,8 +206,17 @@ private[v1] class AdminResource extends ApiRequestContext with Logging { throw new NotAllowedException( s"$userName is not allowed to list all the operations") } - fe.be.sessionManager.operationManager.allOperations() - .map(operation => KyuubiOperationEvent(operation.asInstanceOf[KyuubiOperation])).toSeq + var operations = fe.be.sessionManager.operationManager.allOperations() + if (StringUtils.isNotBlank(users)) { + val usersSet = users.split(",").toSet + operations = operations.filter(operation => usersSet.contains(operation.getSession.user)) + } + if (StringUtils.isNotBlank(sessionHandle)) { + operations = operations.filter(operation => + operation.getSession.handle.equals(SessionHandle.fromUUID(sessionHandle))) + } + operations + .map(operation => ApiUtils.operationData(operation.asInstanceOf[KyuubiOperation])).toSeq } @ApiResponse( @@ -204,7 +249,11 @@ private[v1] class AdminResource extends ApiRequestContext with Logging { @QueryParam("sharelevel") shareLevel: String, @QueryParam("subdomain") subdomain: String, @QueryParam("hive.server2.proxy.user") hs2ProxyUser: String): Response = { - val userName = fe.getSessionUser(hs2ProxyUser) + val userName = if (isAdministrator(fe.getRealUser())) { + Option(hs2ProxyUser).getOrElse(fe.getRealUser()) + } else { + fe.getSessionUser(hs2ProxyUser) + } val engine = getEngine(userName, engineType, shareLevel, subdomain, "default") val engineSpace = getEngineSpace(engine) @@ -237,12 +286,60 @@ private[v1] class AdminResource extends ApiRequestContext with Logging { @QueryParam("type") engineType: String, @QueryParam("sharelevel") shareLevel: String, @QueryParam("subdomain") subdomain: String, - @QueryParam("hive.server2.proxy.user") hs2ProxyUser: String): Seq[Engine] = { - val userName = fe.getSessionUser(hs2ProxyUser) + @QueryParam("hive.server2.proxy.user") hs2ProxyUser: String, + @QueryParam("all") @DefaultValue("false") all: String): Seq[Engine] = { + if (all.toBoolean) { + val userName = fe.getSessionUser(Map.empty[String, String]) + val ipAddress = fe.getIpAddress + info(s"Received list all kyuubi engine request from $userName/$ipAddress") + if (!isAdministrator(userName)) { + throw new NotAllowedException( + s"$userName is not allowed to list all kyuubi engine") + } + val engines = ListBuffer[Engine]() + val engineSpace = fe.getConf.get(HA_NAMESPACE) + val shareLevel = fe.getConf.get(ENGINE_SHARE_LEVEL) + val engineType = fe.getConf.get(ENGINE_TYPE) + withDiscoveryClient(fe.getConf) { discoveryClient => + val commonParent = s"/${engineSpace}_${KYUUBI_VERSION}_${shareLevel}_$engineType" + info(s"Listing engine nodes for $commonParent") + try { + discoveryClient.getChildren(commonParent).map { + user => + val engine = getEngine(user, engineType, shareLevel, "", "") + val engineSpace = getEngineSpace(engine) + discoveryClient.getChildren(engineSpace).map { child => + info(s"Listing engine nodes for $engineSpace/$child") + engines ++= discoveryClient.getServiceNodesInfo(s"$engineSpace/$child").map(node => + new Engine( + engine.getVersion, + engine.getUser, + engine.getEngineType, + engine.getSharelevel, + node.namespace.split("/").last, + node.instance, + node.namespace, + node.attributes.asJava)) + } + } + } catch { + case nne: NoNodeException => + error(s"No such engine for engine type: $engineType, share level: $shareLevel", nne) + throw new NotFoundException( + s"No such engine for engine type: $engineType, share level: $shareLevel") + } + } + return engines.toSeq + } + val userName = if (isAdministrator(fe.getRealUser())) { + Option(hs2ProxyUser).getOrElse(fe.getRealUser()) + } else { + fe.getSessionUser(hs2ProxyUser) + } val engine = getEngine(userName, engineType, shareLevel, subdomain, "") val engineSpace = getEngineSpace(engine) - var engineNodes = ListBuffer[ServiceNodeInfo]() + val engineNodes = ListBuffer[ServiceNodeInfo]() Option(subdomain).filter(_.nonEmpty) match { case Some(_) => withDiscoveryClient(fe.getConf) { discoveryClient => @@ -277,6 +374,36 @@ private[v1] class AdminResource extends ApiRequestContext with Logging { node.instance, node.namespace, node.attributes.asJava)) + .toSeq + } + + @ApiResponse( + responseCode = "200", + content = Array( + new Content( + mediaType = MediaType.APPLICATION_JSON, + array = new ArraySchema(schema = new Schema(implementation = + classOf[OperationData])))), + description = "list all live kyuubi servers") + @GET + @Path("server") + def listServers(): Seq[ServerData] = { + val userName = fe.getSessionUser(Map.empty[String, String]) + val ipAddress = fe.getIpAddress + info(s"Received list all live kyuubi servers request from $userName/$ipAddress") + if (!isAdministrator(userName)) { + throw new NotAllowedException( + s"$userName is not allowed to list all live kyuubi servers") + } + val kyuubiConf = fe.getConf + val servers = ListBuffer[ServerData]() + val serverSpec = DiscoveryPaths.makePath(null, kyuubiConf.get(HA_NAMESPACE)) + withDiscoveryClient(kyuubiConf) { discoveryClient => + discoveryClient.getServiceNodesInfo(serverSpec).map(nodeInfo => { + servers += ApiUtils.serverData(nodeInfo) + }) + } + servers.toSeq } private def getEngine( @@ -309,13 +436,44 @@ private[v1] class AdminResource extends ApiRequestContext with Logging { private def getEngineSpace(engine: Engine): String = { val serverSpace = fe.getConf.get(HA_NAMESPACE) + val appUser = engine.getSharelevel match { + case "GROUP" => + fe.sessionManager.groupProvider.primaryGroup(engine.getUser, fe.getConf.getAll.asJava) + case _ => engine.getUser + } + DiscoveryPaths.makePath( s"${serverSpace}_${engine.getVersion}_${engine.getSharelevel}_${engine.getEngineType}", - engine.getUser, + appUser, engine.getSubdomain) } + @ApiResponse( + responseCode = "200", + content = Array(new Content( + mediaType = MediaType.APPLICATION_JSON, + schema = new Schema(implementation = classOf[Count]))), + description = "get the batch count") + @GET + @Path("batch/count") + def countBatch( + @QueryParam("batchType") @DefaultValue("SPARK") batchType: String, + @QueryParam("batchUser") batchUser: String, + @QueryParam("batchState") batchState: String): Count = { + val userName = fe.getSessionUser(Map.empty[String, String]) + val ipAddress = fe.getIpAddress + info(s"Received counting batches request from $userName/$ipAddress") + if (!isAdministrator(userName)) { + throw new NotAllowedException( + s"$userName is not allowed to count the batches") + } + val batchCount = batchService + .map(_.countBatch(batchType, Option(batchUser), Option(batchState))) + .getOrElse(0) + new Count(batchCount) + } + private def isAdministrator(userName: String): Boolean = { - administrators.contains(userName); + administrators.contains(userName) } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/ApiRootResource.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/ApiRootResource.scala index d8b997e865c..8abc23ff1bd 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/ApiRootResource.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/ApiRootResource.scala @@ -30,7 +30,7 @@ import org.glassfish.jersey.servlet.ServletContainer import org.apache.kyuubi.KYUUBI_VERSION import org.apache.kyuubi.client.api.v1.dto._ import org.apache.kyuubi.server.KyuubiRestFrontendService -import org.apache.kyuubi.server.api.{ApiRequestContext, FrontendServiceContext, OpenAPIConfig} +import org.apache.kyuubi.server.api.{ApiRequestContext, EngineUIProxyServlet, FrontendServiceContext, OpenAPIConfig} @Path("/v1") private[v1] class ApiRootResource extends ApiRequestContext { @@ -82,4 +82,13 @@ private[server] object ApiRootResource { handler.addServlet(holder, "/*") handler } + + def getEngineUIProxyHandler(fe: KyuubiRestFrontendService): ServletContextHandler = { + val proxyServlet = new EngineUIProxyServlet() + val holder = new ServletHolder(proxyServlet) + val proxyHandler = new ServletContextHandler(ServletContextHandler.NO_SESSIONS) + proxyHandler.setContextPath("/engine-ui") + proxyHandler.addServlet(holder, "/*") + proxyHandler + } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/BatchesResource.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/BatchesResource.scala index edfc056168c..76d913a98c7 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/BatchesResource.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/BatchesResource.scala @@ -38,15 +38,16 @@ import org.apache.kyuubi.{Logging, Utils} import org.apache.kyuubi.client.api.v1.dto._ import org.apache.kyuubi.client.exception.KyuubiRestException import org.apache.kyuubi.client.util.BatchUtils._ -import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.KyuubiConf._ import org.apache.kyuubi.config.KyuubiReservedKeys._ -import org.apache.kyuubi.engine.{ApplicationInfo, KyuubiApplicationManager} +import org.apache.kyuubi.engine.{ApplicationInfo, ApplicationManagerInfo, KillResponse, KyuubiApplicationManager} import org.apache.kyuubi.operation.{BatchJobSubmission, FetchOrientation, OperationState} +import org.apache.kyuubi.server.KyuubiServer import org.apache.kyuubi.server.api.ApiRequestContext import org.apache.kyuubi.server.api.v1.BatchesResource._ import org.apache.kyuubi.server.metadata.MetadataManager -import org.apache.kyuubi.server.metadata.api.Metadata -import org.apache.kyuubi.session.{KyuubiBatchSessionImpl, KyuubiSessionManager, SessionHandle} +import org.apache.kyuubi.server.metadata.api.{Metadata, MetadataFilter} +import org.apache.kyuubi.session.{KyuubiBatchSession, KyuubiSessionManager, SessionHandle, SessionType} import org.apache.kyuubi.util.JdbcUtils @Tag(name = "Batch") @@ -54,45 +55,38 @@ import org.apache.kyuubi.util.JdbcUtils private[v1] class BatchesResource extends ApiRequestContext with Logging { private val internalRestClients = new ConcurrentHashMap[String, InternalRestClient]() private lazy val internalSocketTimeout = - fe.getConf.get(KyuubiConf.BATCH_INTERNAL_REST_CLIENT_SOCKET_TIMEOUT) + fe.getConf.get(BATCH_INTERNAL_REST_CLIENT_SOCKET_TIMEOUT).toInt private lazy val internalConnectTimeout = - fe.getConf.get(KyuubiConf.BATCH_INTERNAL_REST_CLIENT_CONNECT_TIMEOUT) + fe.getConf.get(BATCH_INTERNAL_REST_CLIENT_CONNECT_TIMEOUT).toInt + + private def batchV2Enabled(reqConf: Map[String, String]): Boolean = { + KyuubiServer.kyuubiServer.getConf.get(BATCH_SUBMITTER_ENABLED) && + reqConf.getOrElse(BATCH_IMPL_VERSION.key, fe.getConf.get(BATCH_IMPL_VERSION)) == "2" + } private def getInternalRestClient(kyuubiInstance: String): InternalRestClient = { internalRestClients.computeIfAbsent( kyuubiInstance, - kyuubiInstance => { - new InternalRestClient( - kyuubiInstance, - internalSocketTimeout.toInt, - internalConnectTimeout.toInt) - }) + k => new InternalRestClient(k, internalSocketTimeout, internalConnectTimeout)) } private def sessionManager = fe.be.sessionManager.asInstanceOf[KyuubiSessionManager] - private def buildBatch(session: KyuubiBatchSessionImpl): Batch = { + private def buildBatch(session: KyuubiBatchSession): Batch = { val batchOp = session.batchJobSubmissionOp val batchOpStatus = batchOp.getStatus - val batchAppStatus = batchOp.getOrFetchCurrentApplicationInfo - - val name = Option(batchOp.batchName).getOrElse(batchAppStatus.map(_.name).orNull) - var appId: String = null - var appUrl: String = null - var appState: String = null - var appDiagnostic: String = null - - if (batchAppStatus.nonEmpty) { - appId = batchAppStatus.get.id - appUrl = batchAppStatus.get.url.orNull - appState = batchAppStatus.get.state.toString - appDiagnostic = batchAppStatus.get.error.orNull - } else { - val metadata = sessionManager.getBatchMetadata(batchOp.batchId) - appId = metadata.engineId - appUrl = metadata.engineUrl - appState = metadata.engineState - appDiagnostic = metadata.engineError.orNull + + val (name, appId, appUrl, appState, appDiagnostic) = batchOp.getApplicationInfo.map { appInfo => + val name = Option(batchOp.batchName).getOrElse(appInfo.name) + (name, appInfo.id, appInfo.url.orNull, appInfo.state.toString, appInfo.error.orNull) + }.getOrElse { + sessionManager.getBatchMetadata(batchOp.batchId) match { + case Some(batch) => + val diagnostic = batch.engineError.orNull + (batchOp.batchName, batch.engineId, batch.engineUrl, batch.engineState, diagnostic) + case None => + (batchOp.batchName, null, null, null, null) + } } new Batch( @@ -184,6 +178,9 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging { @FormDataParam("batchRequest") batchRequest: BatchRequest, @FormDataParam("resourceFile") resourceFileInputStream: InputStream, @FormDataParam("resourceFile") resourceFileMetadata: FormDataContentDisposition): Batch = { + require( + fe.getConf.get(BATCH_RESOURCE_UPLOAD_ENABLED), + "Batch resource upload function is disabled.") require( batchRequest != null, "batchRequest is required and please check the content type" + @@ -225,7 +222,7 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging { } userProvidedBatchId.flatMap { batchId => - Option(sessionManager.getBatchFromMetadataStore(batchId)) + sessionManager.getBatchFromMetadataStore(batchId) } match { case Some(batch) => markDuplicated(batch) @@ -242,20 +239,51 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging { KYUUBI_SESSION_CONNECTION_URL_KEY -> fe.connectionUrl, KYUUBI_SESSION_REAL_USER_KEY -> fe.getRealUser())).asJava) + if (batchV2Enabled(request.getConf.asScala.toMap)) { + logger.info(s"Submit batch job $batchId using Batch API v2") + return Try { + sessionManager.initializeBatchState( + userName, + ipAddress, + request.getConf.asScala.toMap, + request) + } match { + case Success(batchId) => + sessionManager.getBatchFromMetadataStore(batchId) match { + case Some(batch) => batch + case None => throw new IllegalStateException( + s"can not find batch $batchId from metadata store") + } + case Failure(cause) if JdbcUtils.isDuplicatedKeyDBErr(cause) => + sessionManager.getBatchFromMetadataStore(batchId) match { + case Some(batch) => markDuplicated(batch) + case None => throw new IllegalStateException( + s"can not find duplicated batch $batchId from metadata store") + } + case Failure(cause) => throw new IllegalStateException(cause) + } + } + Try { sessionManager.openBatchSession( userName, "anonymous", ipAddress, - request.getConf.asScala.toMap, request) } match { case Success(sessionHandle) => - buildBatch(sessionManager.getBatchSessionImpl(sessionHandle)) + sessionManager.getBatchSession(sessionHandle) match { + case Some(batchSession) => buildBatch(batchSession) + case None => throw new IllegalStateException( + s"can not find batch $batchId from metadata store") + } case Failure(cause) if JdbcUtils.isDuplicatedKeyDBErr(cause) => - val batch = sessionManager.getBatchFromMetadataStore(batchId) - assert(batch != null, s"can not find duplicated batch $batchId from metadata store") - markDuplicated(batch) + sessionManager.getBatchFromMetadataStore(batchId) match { + case Some(batch) => markDuplicated(batch) + case None => throw new IllegalStateException( + s"can not find duplicated batch $batchId from metadata store") + } + case Failure(cause) => throw new IllegalStateException(cause) } } } @@ -277,11 +305,12 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging { def batchInfo(@PathParam("batchId") batchId: String): Batch = { val userName = fe.getSessionUser(Map.empty[String, String]) val sessionHandle = formatSessionHandle(batchId) - Option(sessionManager.getBatchSessionImpl(sessionHandle)).map { batchSession => + sessionManager.getBatchSession(sessionHandle).map { batchSession => buildBatch(batchSession) }.getOrElse { - Option(sessionManager.getBatchMetadata(batchId)).map { metadata => - if (OperationState.isTerminal(OperationState.withName(metadata.state)) || + sessionManager.getBatchMetadata(batchId).map { metadata => + if (batchV2Enabled(metadata.requestConf) || + OperationState.isTerminal(OperationState.withName(metadata.state)) || metadata.kyuubiInstance == fe.connectionUrl) { MetadataManager.buildBatch(metadata) } else { @@ -292,8 +321,11 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging { case e: KyuubiRestException => error(s"Error redirecting get batch[$batchId] to ${metadata.kyuubiInstance}", e) val batchAppStatus = sessionManager.applicationManager.getApplicationInfo( - metadata.clusterManager, - batchId) + metadata.appMgrInfo, + batchId, + Some(userName), + // prevent that the batch be marked as terminated if application state is NOT_FOUND + Some(metadata.engineOpenTime).filter(_ > 0).orElse(Some(System.currentTimeMillis))) buildBatch(metadata, batchAppStatus) } } @@ -316,6 +348,7 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging { @QueryParam("batchType") batchType: String, @QueryParam("batchState") batchState: String, @QueryParam("batchUser") batchUser: String, + @QueryParam("batchName") batchName: String, @QueryParam("createTime") createTime: Long, @QueryParam("endTime") endTime: Long, @QueryParam("from") from: Int, @@ -328,15 +361,16 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging { validBatchState(batchState), s"The valid batch state can be one of the following: ${VALID_BATCH_STATES.mkString(",")}") } - val batches = - sessionManager.getBatchesFromMetadataStore( - batchType, - batchUser, - batchState, - createTime, - endTime, - from, - size) + + val filter = MetadataFilter( + sessionType = SessionType.BATCH, + engineType = batchType, + username = batchUser, + state = batchState, + requestName = batchName, + createTime = createTime, + endTime = endTime) + val batches = sessionManager.getBatchesFromMetadataStore(filter, from, size) new GetBatchesResponse(from, batches.size, batches.asJava) } @@ -354,7 +388,7 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging { @QueryParam("size") @DefaultValue("100") size: Int): OperationLog = { val userName = fe.getSessionUser(Map.empty[String, String]) val sessionHandle = formatSessionHandle(batchId) - Option(sessionManager.getBatchSessionImpl(sessionHandle)).map { batchSession => + sessionManager.getBatchSession(sessionHandle).map { batchSession => try { val submissionOp = batchSession.batchJobSubmissionOp val rowSet = submissionOp.getOperationLogRowSet(FetchOrientation.FETCH_NEXT, from, size) @@ -374,10 +408,21 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging { throw new NotFoundException(errorMsg) } }.getOrElse { - Option(sessionManager.getBatchMetadata(batchId)).map { metadata => - if (fe.connectionUrl != metadata.kyuubiInstance) { + sessionManager.getBatchMetadata(batchId).map { metadata => + if (batchV2Enabled(metadata.requestConf) && metadata.state == "INITIALIZED") { + info(s"Batch $batchId is waiting for scheduling") + val dummyLogs = List(s"Batch $batchId is waiting for scheduling").asJava + new OperationLog(dummyLogs, dummyLogs.size) + } else if (fe.connectionUrl != metadata.kyuubiInstance) { val internalRestClient = getInternalRestClient(metadata.kyuubiInstance) internalRestClient.getBatchLocalLog(userName, batchId, from, size) + } else if (batchV2Enabled(metadata.requestConf) && + // in batch v2 impl, the operation state is changed from PENDING to RUNNING + // before being added to SessionManager. + (metadata.state == "PENDING" || metadata.state == "RUNNING")) { + info(s"Batch $batchId is waiting for submitting") + val dummyLogs = List(s"Batch $batchId is waiting for submitting").asJava + new OperationLog(dummyLogs, dummyLogs.size) } else { throw new NotFoundException(s"No local log found for batch: $batchId") } @@ -399,29 +444,50 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging { def closeBatchSession( @PathParam("batchId") batchId: String, @QueryParam("hive.server2.proxy.user") hs2ProxyUser: String): CloseBatchResponse = { - val sessionHandle = formatSessionHandle(batchId) - val userName = fe.getSessionUser(hs2ProxyUser) - - Option(sessionManager.getBatchSessionImpl(sessionHandle)).map { batchSession => - if (userName != batchSession.user) { + def checkPermission(operator: String, owner: String): Unit = { + if (operator != owner) { throw new WebApplicationException( - s"$userName is not allowed to close the session belong to ${batchSession.user}", + s"$operator is not allowed to close the session belong to $owner", Status.METHOD_NOT_ALLOWED) } + } + + def forceKill( + appMgrInfo: ApplicationManagerInfo, + batchId: String, + user: String): KillResponse = { + val (killed, message) = sessionManager.applicationManager + .killApplication(appMgrInfo, batchId, Some(user)) + info(s"Mark batch[$batchId] closed by ${fe.connectionUrl}") + sessionManager.updateMetadata(Metadata(identifier = batchId, peerInstanceClosed = true)) + (killed, message) + } + + val sessionHandle = formatSessionHandle(batchId) + val userName = fe.getSessionUser(hs2ProxyUser) + + sessionManager.getBatchSession(sessionHandle).map { batchSession => + checkPermission(userName, batchSession.user) sessionManager.closeSession(batchSession.handle) - val (success, msg) = batchSession.batchJobSubmissionOp.getKillMessage - new CloseBatchResponse(success, msg) + val (killed, msg) = batchSession.batchJobSubmissionOp.getKillMessage + new CloseBatchResponse(killed, msg) }.getOrElse { - Option(sessionManager.getBatchMetadata(batchId)).map { metadata => - if (userName != metadata.username) { - throw new WebApplicationException( - s"$userName is not allowed to close the session belong to ${metadata.username}", - Status.METHOD_NOT_ALLOWED) - } else if (OperationState.isTerminal(OperationState.withName(metadata.state)) || - metadata.kyuubiInstance == fe.connectionUrl) { + sessionManager.getBatchMetadata(batchId).map { metadata => + checkPermission(userName, metadata.username) + if (OperationState.isTerminal(OperationState.withName(metadata.state))) { new CloseBatchResponse(false, s"The batch[$metadata] has been terminated.") - } else { + } else if (batchV2Enabled(metadata.requestConf) && metadata.state == "INITIALIZED") { + if (batchService.get.cancelUnscheduledBatch(batchId)) { + new CloseBatchResponse(true, s"Unscheduled batch $batchId is canceled.") + } else if (OperationState.isTerminal(OperationState.withName(metadata.state))) { + new CloseBatchResponse(false, s"The batch[$metadata] has been terminated.") + } else { + info(s"Cancel batch[$batchId] with state ${metadata.state} by killing application") + val (killed, msg) = forceKill(metadata.appMgrInfo, batchId, userName) + new CloseBatchResponse(killed, msg) + } + } else if (metadata.kyuubiInstance != fe.connectionUrl) { info(s"Redirecting delete batch[$batchId] to ${metadata.kyuubiInstance}") val internalRestClient = getInternalRestClient(metadata.kyuubiInstance) try { @@ -429,20 +495,13 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging { } catch { case e: KyuubiRestException => error(s"Error redirecting delete batch[$batchId] to ${metadata.kyuubiInstance}", e) - val appMgrKillResp = sessionManager.applicationManager.killApplication( - metadata.clusterManager, - batchId) - info( - s"Marking batch[$batchId/${metadata.kyuubiInstance}] closed by ${fe.connectionUrl}") - sessionManager.updateMetadata(Metadata( - identifier = batchId, - peerInstanceClosed = true)) - if (appMgrKillResp._1) { - new CloseBatchResponse(appMgrKillResp._1, appMgrKillResp._2) - } else { - new CloseBatchResponse(false, Utils.stringifyException(e)) - } + val (killed, msg) = forceKill(metadata.appMgrInfo, batchId, userName) + new CloseBatchResponse(killed, if (killed) msg else Utils.stringifyException(e)) } + } else { // should not happen, but handle this for safe + warn(s"Something wrong on deleting batch[$batchId], try forcibly killing application") + val (killed, msg) = forceKill(metadata.appMgrInfo, batchId, userName) + new CloseBatchResponse(killed, msg) } }.getOrElse { error(s"Invalid batchId: $batchId") diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/OperationsResource.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/OperationsResource.scala index 70a6d3a2848..fdde5bbc5b2 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/OperationsResource.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/OperationsResource.scala @@ -17,7 +17,7 @@ package org.apache.kyuubi.server.api.v1 -import javax.ws.rs._ +import javax.ws.rs.{BadRequestException, _} import javax.ws.rs.core.{MediaType, Response} import scala.collection.JavaConverters._ @@ -32,12 +32,13 @@ import org.apache.kyuubi.{KyuubiSQLException, Logging} import org.apache.kyuubi.client.api.v1.dto._ import org.apache.kyuubi.events.KyuubiOperationEvent import org.apache.kyuubi.operation.{FetchOrientation, KyuubiOperation, OperationHandle} -import org.apache.kyuubi.server.api.ApiRequestContext +import org.apache.kyuubi.server.api.{ApiRequestContext, ApiUtils} @Tag(name = "Operation") @Produces(Array(MediaType.APPLICATION_JSON)) @Consumes(Array(MediaType.APPLICATION_JSON)) private[v1] class OperationsResource extends ApiRequestContext with Logging { + import ApiUtils.logAndRefineErrorMsg @ApiResponse( responseCode = "200", @@ -57,8 +58,7 @@ private[v1] class OperationsResource extends ApiRequestContext with Logging { } catch { case NonFatal(e) => val errorMsg = "Error getting an operation event" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -84,8 +84,7 @@ private[v1] class OperationsResource extends ApiRequestContext with Logging { case NonFatal(e) => val errorMsg = s"Error applying ${request.getAction} for operation handle $operationHandleStr" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -109,7 +108,7 @@ private[v1] class OperationsResource extends ApiRequestContext with Logging { var scale = 0 if (tPrimitiveTypeEntry.getTypeQualifiers != null) { val qualifiers = tPrimitiveTypeEntry.getTypeQualifiers.getQualifiers - val defaultValue = TTypeQualifierValue.i32Value(0); + val defaultValue = TTypeQualifierValue.i32Value(0) precision = qualifiers.getOrDefault("precision", defaultValue).getI32Value scale = qualifiers.getOrDefault("scale", defaultValue).getI32Value } @@ -124,8 +123,7 @@ private[v1] class OperationsResource extends ApiRequestContext with Logging { } catch { case NonFatal(e) => val errorMsg = s"Error getting result set metadata for operation handle $operationHandleStr" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -140,19 +138,26 @@ private[v1] class OperationsResource extends ApiRequestContext with Logging { @Path("{operationHandle}/log") def getOperationLog( @PathParam("operationHandle") operationHandleStr: String, - @QueryParam("maxrows") maxRows: Int): OperationLog = { + @QueryParam("maxrows") @DefaultValue("100") maxRows: Int, + @QueryParam("fetchorientation") @DefaultValue("FETCH_NEXT") + fetchOrientation: String): OperationLog = { try { - val rowSet = fe.be.sessionManager.operationManager.getOperationLogRowSet( + if (fetchOrientation != "FETCH_NEXT" && fetchOrientation != "FETCH_FIRST") { + throw new BadRequestException(s"$fetchOrientation in operation log is not supported") + } + val fetchResultsResp = fe.be.sessionManager.operationManager.getOperationLogRowSet( OperationHandle(operationHandleStr), - FetchOrientation.FETCH_NEXT, + FetchOrientation.withName(fetchOrientation), maxRows) + val rowSet = fetchResultsResp.getResults val logRowSet = rowSet.getColumns.get(0).getStringVal.getValues.asScala new OperationLog(logRowSet.asJava, logRowSet.size) } catch { + case e: BadRequestException => + throw e case NonFatal(e) => val errorMsg = s"Error getting operation log for operation handle $operationHandleStr" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -171,11 +176,12 @@ private[v1] class OperationsResource extends ApiRequestContext with Logging { @QueryParam("fetchorientation") @DefaultValue("FETCH_NEXT") fetchOrientation: String): ResultRowSet = { try { - val rowSet = fe.be.fetchResults( + val fetchResultsResp = fe.be.fetchResults( OperationHandle(operationHandleStr), FetchOrientation.withName(fetchOrientation), maxRows, fetchLog = false) + val rowSet = fetchResultsResp.getResults val rows = rowSet.getRows.asScala.map(i => { new Row(i.getColVals.asScala.map(i => { new Field( @@ -233,8 +239,7 @@ private[v1] class OperationsResource extends ApiRequestContext with Logging { throw new BadRequestException(e.getMessage) case NonFatal(e) => val errorMsg = s"Error getting result row set for operation handle $operationHandleStr" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/SessionsResource.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/SessionsResource.scala index 84b19eb0038..10a55786798 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/SessionsResource.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/SessionsResource.scala @@ -27,22 +27,23 @@ import scala.util.control.NonFatal import io.swagger.v3.oas.annotations.media.{ArraySchema, Content, Schema} import io.swagger.v3.oas.annotations.responses.ApiResponse import io.swagger.v3.oas.annotations.tags.Tag +import org.apache.commons.lang3.StringUtils import org.apache.hive.service.rpc.thrift.{TGetInfoType, TProtocolVersion} import org.apache.kyuubi.Logging import org.apache.kyuubi.client.api.v1.dto import org.apache.kyuubi.client.api.v1.dto._ import org.apache.kyuubi.config.KyuubiReservedKeys._ -import org.apache.kyuubi.events.KyuubiEvent -import org.apache.kyuubi.operation.OperationHandle -import org.apache.kyuubi.server.api.ApiRequestContext -import org.apache.kyuubi.session.KyuubiSession -import org.apache.kyuubi.session.SessionHandle +import org.apache.kyuubi.operation.{KyuubiOperation, OperationHandle} +import org.apache.kyuubi.server.api.{ApiRequestContext, ApiUtils} +import org.apache.kyuubi.session.{KyuubiSession, SessionHandle} @Tag(name = "Session") @Produces(Array(MediaType.APPLICATION_JSON)) @Consumes(Array(MediaType.APPLICATION_JSON)) private[v1] class SessionsResource extends ApiRequestContext with Logging { + import ApiUtils.logAndRefineErrorMsg + implicit def toSessionHandle(str: String): SessionHandle = SessionHandle.fromUUID(str) private def sessionManager = fe.be.sessionManager @@ -54,34 +55,44 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { description = "get the list of all live sessions") @GET def sessions(): Seq[SessionData] = { - sessionManager.allSessions().map { session => - new SessionData( - session.handle.identifier.toString, - session.user, - session.ipAddress, - session.conf.asJava, - session.createTime, - session.lastAccessTime - session.createTime, - session.getNoOperationTime) - }.toSeq + sessionManager.allSessions() + .map(session => ApiUtils.sessionData(session.asInstanceOf[KyuubiSession])).toSeq } @ApiResponse( responseCode = "200", content = Array(new Content( mediaType = MediaType.APPLICATION_JSON, - schema = new Schema(implementation = classOf[KyuubiEvent]))), + schema = new Schema(implementation = classOf[dto.KyuubiSessionEvent]))), description = "get a session event via session handle identifier") @GET @Path("{sessionHandle}") - def sessionInfo(@PathParam("sessionHandle") sessionHandleStr: String): KyuubiEvent = { + def sessionInfo(@PathParam("sessionHandle") sessionHandleStr: String): dto.KyuubiSessionEvent = { try { sessionManager.getSession(sessionHandleStr) - .asInstanceOf[KyuubiSession].getSessionEvent.get + .asInstanceOf[KyuubiSession].getSessionEvent.map(event => + dto.KyuubiSessionEvent.builder + .sessionId(event.sessionId) + .clientVersion(event.clientVersion) + .sessionType(event.sessionType) + .sessionName(event.sessionName) + .user(event.user) + .clientIp(event.clientIP) + .serverIp(event.serverIP) + .conf(event.conf.asJava) + .remoteSessionId(event.remoteSessionId) + .engineId(event.engineId) + .eventTime(event.eventTime) + .openedTime(event.openedTime) + .startTime(event.startTime) + .endTime(event.endTime) + .totalOperations(event.totalOperations) + .exception(event.exception.orNull) + .build).get } catch { case NonFatal(e) => - error(s"Invalid $sessionHandleStr", e) - throw new NotFoundException(s"Invalid $sessionHandleStr") + val errorMsg = s"Invalid $sessionHandleStr" + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -103,8 +114,8 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { new InfoDetail(info.toString, infoValue.getStringValue) } catch { case NonFatal(e) => - error(s"Unrecognized GetInfoType value: $infoType", e) - throw new NotFoundException(s"Unrecognized GetInfoType value: $infoType") + val errorMsg = s"Unrecognized GetInfoType value: $infoType" + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -144,7 +155,7 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { val userName = fe.getSessionUser(request.getConfigs.asScala.toMap) val ipAddress = fe.getIpAddress val handle = fe.be.openSession( - TProtocolVersion.findByValue(request.getProtocolVersion), + SessionsResource.SESSION_PROTOCOL_VERSION, userName, "", ipAddress, @@ -163,6 +174,7 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { @DELETE @Path("{sessionHandle}") def closeSession(@PathParam("sessionHandle") sessionHandleStr: String): Response = { + info(s"Received request of closing $sessionHandleStr") fe.be.closeSession(sessionHandleStr) Response.ok().build() } @@ -182,14 +194,13 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { fe.be.executeStatement( sessionHandleStr, request.getStatement, - Map.empty, + request.getConfOverlay.asScala.toMap, request.isRunAsync, request.getQueryTimeout) } catch { case NonFatal(e) => val errorMsg = "Error executing statement" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -207,8 +218,7 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { } catch { case NonFatal(e) => val errorMsg = "Error getting type information" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -226,8 +236,7 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { } catch { case NonFatal(e) => val errorMsg = "Error getting catalogs" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -251,8 +260,7 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { } catch { case NonFatal(e) => val errorMsg = "Error getting schemas" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -277,8 +285,7 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { } catch { case NonFatal(e) => val errorMsg = "Error getting tables" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -296,8 +303,7 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { } catch { case NonFatal(e) => val errorMsg = "Error getting table types" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -322,8 +328,7 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { } catch { case NonFatal(e) => val errorMsg = "Error getting columns" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -347,8 +352,7 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { } catch { case NonFatal(e) => val errorMsg = "Error getting functions" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -372,8 +376,7 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { } catch { case NonFatal(e) => val errorMsg = "Error getting primary keys" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } @@ -400,8 +403,37 @@ private[v1] class SessionsResource extends ApiRequestContext with Logging { } catch { case NonFatal(e) => val errorMsg = "Error getting cross reference" - error(errorMsg, e) - throw new NotFoundException(errorMsg) + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) } } + + @ApiResponse( + responseCode = "200", + content = Array(new Content( + mediaType = MediaType.APPLICATION_JSON, + array = new ArraySchema(schema = new Schema(implementation = + classOf[OperationData])))), + description = + "get the list of all type operations belong to session") + @GET + @Path("{sessionHandle}/operations") + def getOperation(@PathParam("sessionHandle") sessionHandleStr: String): Seq[OperationData] = { + try { + fe.be.sessionManager.operationManager.allOperations().map { operation => + if (StringUtils.equalsIgnoreCase( + operation.getSession.handle.identifier.toString, + sessionHandleStr)) { + ApiUtils.operationData(operation.asInstanceOf[KyuubiOperation]) + } + }.toSeq.asInstanceOf[Seq[OperationData]] + } catch { + case NonFatal(e) => + val errorMsg = "Error getting the list of all type operations belong to session" + throw new NotFoundException(logAndRefineErrorMsg(errorMsg, e)) + } + } +} + +object SessionsResource { + final val SESSION_PROTOCOL_VERSION = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1 } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/AuthenticationFilter.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/AuthenticationFilter.scala index 3c4065a7bdc..523d2490753 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/AuthenticationFilter.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/AuthenticationFilter.scala @@ -22,7 +22,7 @@ import javax.security.sasl.AuthenticationException import javax.servlet.{Filter, FilterChain, FilterConfig, ServletException, ServletRequest, ServletResponse} import javax.servlet.http.{HttpServletRequest, HttpServletResponse} -import scala.collection.mutable.HashMap +import scala.collection.mutable import org.apache.kyuubi.Logging import org.apache.kyuubi.config.KyuubiConf @@ -35,7 +35,8 @@ class AuthenticationFilter(conf: KyuubiConf) extends Filter with Logging { import AuthenticationHandler._ import AuthSchemes._ - private[authentication] val authSchemeHandlers = new HashMap[AuthScheme, AuthenticationHandler]() + private[authentication] val authSchemeHandlers = + new mutable.HashMap[AuthScheme, AuthenticationHandler]() private[authentication] def addAuthHandler(authHandler: AuthenticationHandler): Unit = { authHandler.init(conf) @@ -57,7 +58,7 @@ class AuthenticationFilter(conf: KyuubiConf) extends Filter with Logging { val authTypes = conf.get(AUTHENTICATION_METHOD).map(AuthTypes.withName) val spnegoKerberosEnabled = authTypes.contains(KERBEROS) val basicAuthTypeOpt = { - if (authTypes == Seq(NOSASL)) { + if (authTypes == Set(NOSASL)) { authTypes.headOption } else { authTypes.filterNot(_.equals(KERBEROS)).filterNot(_.equals(NOSASL)).headOption @@ -88,7 +89,7 @@ class AuthenticationFilter(conf: KyuubiConf) extends Filter with Logging { /** * If the request has a valid authentication token it allows the request to continue to the * target resource, otherwise it triggers an authentication sequence using the configured - * {@link AuthenticationHandler}. + * [[AuthenticationHandler]]. * * @param request the request object. * @param response the response object. @@ -109,32 +110,31 @@ class AuthenticationFilter(conf: KyuubiConf) extends Filter with Logging { HTTP_PROXY_HEADER_CLIENT_IP_ADDRESS.set( httpRequest.getHeader(conf.get(FRONTEND_PROXY_HTTP_CLIENT_IP_HEADER))) - if (matchedHandler == null) { - debug(s"No auth scheme matched for url: ${httpRequest.getRequestURL}") - httpResponse.setStatus(HttpServletResponse.SC_UNAUTHORIZED) - AuthenticationAuditLogger.audit(httpRequest, httpResponse) - httpResponse.sendError( - HttpServletResponse.SC_UNAUTHORIZED, - s"No auth scheme matched for $authorization") - } else { - HTTP_AUTH_TYPE.set(matchedHandler.authScheme.toString) - try { + try { + if (matchedHandler == null) { + debug(s"No auth scheme matched for url: ${httpRequest.getRequestURL}") + httpResponse.setStatus(HttpServletResponse.SC_UNAUTHORIZED) + httpResponse.sendError( + HttpServletResponse.SC_UNAUTHORIZED, + s"No auth scheme matched for $authorization") + } else { + HTTP_AUTH_TYPE.set(matchedHandler.authScheme.toString) val authUser = matchedHandler.authenticate(httpRequest, httpResponse) if (authUser != null) { HTTP_CLIENT_USER_NAME.set(authUser) doFilter(filterChain, httpRequest, httpResponse) } - AuthenticationAuditLogger.audit(httpRequest, httpResponse) - } catch { - case e: AuthenticationException => - httpResponse.setStatus(HttpServletResponse.SC_FORBIDDEN) - AuthenticationAuditLogger.audit(httpRequest, httpResponse) - HTTP_CLIENT_USER_NAME.remove() - HTTP_CLIENT_IP_ADDRESS.remove() - HTTP_PROXY_HEADER_CLIENT_IP_ADDRESS.remove() - HTTP_AUTH_TYPE.remove() - httpResponse.sendError(HttpServletResponse.SC_FORBIDDEN, e.getMessage) } + } catch { + case e: AuthenticationException => + httpResponse.setStatus(HttpServletResponse.SC_FORBIDDEN) + HTTP_CLIENT_USER_NAME.remove() + HTTP_CLIENT_IP_ADDRESS.remove() + HTTP_PROXY_HEADER_CLIENT_IP_ADDRESS.remove() + HTTP_AUTH_TYPE.remove() + httpResponse.sendError(HttpServletResponse.SC_FORBIDDEN, e.getMessage) + } finally { + AuthenticationAuditLogger.audit(httpRequest, httpResponse) } } @@ -158,7 +158,7 @@ class AuthenticationFilter(conf: KyuubiConf) extends Filter with Logging { } override def destroy(): Unit = { - if (!authSchemeHandlers.isEmpty) { + if (authSchemeHandlers.nonEmpty) { authSchemeHandlers.values.foreach(_.destroy()) authSchemeHandlers.clear() } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/AuthenticationHandler.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/AuthenticationHandler.scala index acbc52f3531..bf2cb5bbecb 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/AuthenticationHandler.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/AuthenticationHandler.scala @@ -46,14 +46,14 @@ trait AuthenticationHandler { /** * Destroys the authentication handler instance. *

    - * This method is invoked by the {@link AuthenticationFilter# destroy} method. + * This method is invoked by the [[AuthenticationFilter.destroy]] method. */ def destroy(): Unit /** * Performs an authentication step for the given HTTP client request. *

    - * This method is invoked by the {@link AuthenticationFilter} only if the HTTP client request is + * This method is invoked by the [[AuthenticationFilter]] only if the HTTP client request is * not yet authenticated. *

    * Depending upon the authentication mechanism being implemented, a particular HTTP client may diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/KerberosAuthenticationHandler.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/KerberosAuthenticationHandler.scala index 19a31feb6f3..04603f30a41 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/KerberosAuthenticationHandler.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/KerberosAuthenticationHandler.scala @@ -46,7 +46,7 @@ class KerberosAuthenticationHandler extends AuthenticationHandler with Logging { override val authScheme: AuthScheme = AuthSchemes.NEGOTIATE override def authenticationSupported: Boolean = { - !keytab.isEmpty && !principal.isEmpty + keytab.nonEmpty && principal.nonEmpty } override def init(conf: KyuubiConf): Unit = { @@ -141,7 +141,7 @@ class KerberosAuthenticationHandler extends AuthenticationHandler with Logging { GSSCredential.ACCEPT_ONLY) gssContext = gssManager.createContext(gssCreds) val serverToken = gssContext.acceptSecContext(clientToken, 0, clientToken.length) - if (serverToken != null && serverToken.length > 0) { + if (serverToken != null && serverToken.nonEmpty) { val authenticate = Base64.getEncoder.encodeToString(serverToken) response.setHeader(WWW_AUTHENTICATE, s"$NEGOTIATE $authenticate") } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/KerberosUtil.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/KerberosUtil.scala index 8ff079373ed..a5b95678c23 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/KerberosUtil.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/http/authentication/KerberosUtil.scala @@ -201,7 +201,7 @@ object KerberosUtil { val names = ticket.get(0xA2, 0x30, 0xA1, 0x30) val sb = new StringBuilder while (names.hasNext) { - if (sb.length > 0) { + if (sb.nonEmpty) { sb.append('/') } sb.append(names.next.getAsString) diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/MetadataManager.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/MetadataManager.scala index 88a7f4e4ebd..1da9e1f3148 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/MetadataManager.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/MetadataManager.scala @@ -124,32 +124,39 @@ class MetadataManager extends AbstractService("MetadataManager") { } } - def getBatch(batchId: String): Batch = { - Option(getBatchSessionMetadata(batchId)).map(buildBatch).orNull + def getBatch(batchId: String): Option[Batch] = { + getBatchSessionMetadata(batchId).map(buildBatch) } - def getBatchSessionMetadata(batchId: String): Metadata = { - Option(withMetadataRequestMetrics(_metadataStore.getMetadata(batchId, true))).filter( - _.sessionType == SessionType.BATCH).orNull + def getBatchSessionMetadata(batchId: String): Option[Metadata] = { + Option(withMetadataRequestMetrics(_metadataStore.getMetadata(batchId))) + .filter(_.sessionType == SessionType.BATCH) } - def getBatches( + def getBatches(filter: MetadataFilter, from: Int, size: Int): Seq[Batch] = { + withMetadataRequestMetrics(_metadataStore.getMetadataList(filter, from, size)).map( + buildBatch) + } + + def countBatch( batchType: String, batchUser: String, batchState: String, - createTime: Long, - endTime: Long, - from: Int, - size: Int): Seq[Batch] = { + kyuubiInstance: String): Int = { val filter = MetadataFilter( sessionType = SessionType.BATCH, engineType = batchType, username = batchUser, state = batchState, - createTime = createTime, - endTime = endTime) - withMetadataRequestMetrics(_metadataStore.getMetadataList(filter, from, size, true)).map( - buildBatch) + kyuubiInstance = kyuubiInstance) + withMetadataRequestMetrics(_metadataStore.countMetadata(filter)) + } + + def pickBatchForSubmitting(kyuubiInstance: String): Option[Metadata] = + withMetadataRequestMetrics(_metadataStore.pickMetadata(kyuubiInstance)) + + def cancelUnscheduledBatch(batchId: String): Boolean = { + _metadataStore.transformMetadataState(batchId, "INITIALIZED", "CANCELED") } def getBatchesRecoveryMetadata( @@ -161,7 +168,7 @@ class MetadataManager extends AbstractService("MetadataManager") { sessionType = SessionType.BATCH, state = state, kyuubiInstance = kyuubiInstance) - withMetadataRequestMetrics(_metadataStore.getMetadataList(filter, from, size, false)) + withMetadataRequestMetrics(_metadataStore.getMetadataList(filter, from, size)) } def getPeerInstanceClosedBatchesMetadata( @@ -174,7 +181,7 @@ class MetadataManager extends AbstractService("MetadataManager") { state = state, kyuubiInstance = kyuubiInstance, peerInstanceClosed = true) - withMetadataRequestMetrics(_metadataStore.getMetadataList(filter, from, size, true)) + withMetadataRequestMetrics(_metadataStore.getMetadataList(filter, from, size)) } def updateMetadata(metadata: Metadata, asyncRetryOnError: Boolean = true): Unit = { diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/MetadataStore.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/MetadataStore.scala index 4416c4a6dce..d8258816a45 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/MetadataStore.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/MetadataStore.scala @@ -28,27 +28,45 @@ trait MetadataStore extends Closeable { */ def insertMetadata(metadata: Metadata): Unit + /** + * Find unscheduled batch job metadata and pick up it to submit. + * @param kyuubiInstance the Kyuubi instance picked batch job + * @return selected metadata for submitting or None if no sufficient items + */ + def pickMetadata(kyuubiInstance: String): Option[Metadata] + + /** + * Transfer state of metadata from the existing state to another + * @param identifier the identifier. + * @param fromState the desired current state + * @param targetState the desired target state + * @return `true` if the metadata state was same as `fromState`, and successfully + * transitioned to `targetState`, otherwise `false` is returned + */ + def transformMetadataState(identifier: String, fromState: String, targetState: String): Boolean + /** * Get the persisted metadata by batch identifier. * @param identifier the identifier. - * @param stateOnly only return the state related column values. * @return selected metadata. */ - def getMetadata(identifier: String, stateOnly: Boolean): Metadata + def getMetadata(identifier: String): Metadata /** * Get the metadata list with filter conditions, offset and size. * @param filter the metadata filter conditions. * @param from the metadata offset. * @param size the size to get. - * @param stateOnly only return the state related column values. * @return selected metadata list. */ - def getMetadataList( - filter: MetadataFilter, - from: Int, - size: Int, - stateOnly: Boolean): Seq[Metadata] + def getMetadataList(filter: MetadataFilter, from: Int, size: Int): Seq[Metadata] + + /** + * Count the metadata list with filter conditions. + * @param filter the metadata filter conditions. + * @return the count of metadata satisfied the filter condition. + */ + def countMetadata(filter: MetadataFilter): Int /** * Update the metadata according to identifier. diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/api/Metadata.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/api/Metadata.scala index 949e88abdf1..3e3d9482841 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/api/Metadata.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/api/Metadata.scala @@ -17,6 +17,11 @@ package org.apache.kyuubi.server.metadata.api +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.engine.{ApplicationManagerInfo, ApplicationState} +import org.apache.kyuubi.engine.ApplicationState.ApplicationState +import org.apache.kyuubi.operation.OperationState +import org.apache.kyuubi.operation.OperationState.OperationState import org.apache.kyuubi.session.SessionType.SessionType /** @@ -73,4 +78,18 @@ case class Metadata( engineState: String = null, engineError: Option[String] = None, endTime: Long = 0L, - peerInstanceClosed: Boolean = false) + peerInstanceClosed: Boolean = false) { + def appMgrInfo: ApplicationManagerInfo = { + ApplicationManagerInfo( + clusterManager, + requestConf.get(KyuubiConf.KUBERNETES_CONTEXT.key), + requestConf.get(KyuubiConf.KUBERNETES_NAMESPACE.key)) + } + + def opState: OperationState = { + assert(state != null, "invalid state, a normal batch record must have non-null state") + OperationState.withName(state) + } + + def appState: Option[ApplicationState] = Option(engineState).map(ApplicationState.withName) +} diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/api/MetadataFilter.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/api/MetadataFilter.scala index 6213f8e6433..d4f7f2b63d1 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/api/MetadataFilter.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/api/MetadataFilter.scala @@ -27,6 +27,7 @@ case class MetadataFilter( engineType: String = null, username: String = null, state: String = null, + requestName: String = null, kyuubiInstance: String = null, createTime: Long = 0L, endTime: Long = 0L, diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/DatabaseType.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/DatabaseType.scala index ef93f31c55f..67d6686d17e 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/DatabaseType.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/DatabaseType.scala @@ -20,5 +20,5 @@ package org.apache.kyuubi.server.metadata.jdbc object DatabaseType extends Enumeration { type DatabaseType = Value - val DERBY, MYSQL, CUSTOM = Value + val DERBY, MYSQL, CUSTOM, SQLITE = Value } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/JDBCMetadataStore.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/JDBCMetadataStore.scala index f6caa9c1a46..dcb9c0f6685 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/JDBCMetadataStore.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/JDBCMetadataStore.scala @@ -39,6 +39,7 @@ import org.apache.kyuubi.server.metadata.api.{Metadata, MetadataFilter} import org.apache.kyuubi.server.metadata.jdbc.DatabaseType._ import org.apache.kyuubi.server.metadata.jdbc.JDBCMetadataStoreConf._ import org.apache.kyuubi.session.SessionType +import org.apache.kyuubi.util.JdbcUtils class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { import JDBCMetadataStore._ @@ -46,15 +47,17 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { private val dbType = DatabaseType.withName(conf.get(METADATA_STORE_JDBC_DATABASE_TYPE)) private val driverClassOpt = conf.get(METADATA_STORE_JDBC_DRIVER) private val driverClass = dbType match { + case SQLITE => driverClassOpt.getOrElse("org.sqlite.JDBC") case DERBY => driverClassOpt.getOrElse("org.apache.derby.jdbc.AutoloadedDriver") case MYSQL => driverClassOpt.getOrElse("com.mysql.jdbc.Driver") case CUSTOM => driverClassOpt.getOrElse( throw new IllegalArgumentException("No jdbc driver defined")) } - private val databaseAdaptor = dbType match { + private val dialect = dbType match { case DERBY => new DerbyDatabaseDialect - case MYSQL => new MysqlDatabaseDialect + case SQLITE => new SQLiteDatabaseDialect + case MYSQL => new MySQLDatabaseDialect case CUSTOM => new GenericDatabaseDialect } @@ -68,11 +71,10 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { hikariConfig.setPoolName("jdbc-metadata-store-pool") @VisibleForTesting - private[kyuubi] val hikariDataSource = new HikariDataSource(hikariConfig) + implicit private[kyuubi] val hikariDataSource = new HikariDataSource(hikariConfig) private val mapper = new ObjectMapper().registerModule(DefaultScalaModule) - private val terminalStates = - OperationState.terminalStates.map(x => s"'${x.toString}'").mkString(", ") + private val terminalStates = OperationState.terminalStates.map(x => s"'$x'").mkString(", ") if (conf.get(METADATA_STORE_JDBC_DATABASE_SCHEMA_INIT)) { initSchema() @@ -80,12 +82,14 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { private def initSchema(): Unit = { getInitSchema(dbType).foreach { schema => - val ddlStatements = schema.trim.split(";") - withConnection() { connection => + val ddlStatements = schema.trim.split(";").map(_.trim) + JdbcUtils.withConnection { connection => Utils.tryLogNonFatalError { ddlStatements.foreach { ddlStatement => execute(connection, ddlStatement) - info(s"Execute init schema ddl: $ddlStatement successfully.") + info(s"""Execute init schema ddl successfully. + |$ddlStatement + |""".stripMargin) } } } @@ -122,7 +126,7 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { inputStream.close() } } - }.headOption + } } def getSchemaVersion(schemaUrl: String): (Int, Int, Int) = @@ -168,7 +172,7 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { |VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) |""".stripMargin - withConnection() { connection => + JdbcUtils.withConnection { connection => execute( connection, query, @@ -190,77 +194,125 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { } } - override def getMetadata(identifier: String, stateOnly: Boolean): Metadata = { - val query = - if (stateOnly) { - s"SELECT $METADATA_STATE_ONLY_COLUMNS FROM $METADATA_TABLE WHERE identifier = ?" - } else { - s"SELECT $METADATA_ALL_COLUMNS FROM $METADATA_TABLE WHERE identifier = ?" + override def pickMetadata(kyuubiInstance: String): Option[Metadata] = synchronized { + JdbcUtils.executeQueryWithRowMapper( + s"""SELECT identifier FROM $METADATA_TABLE + |WHERE state=? + |ORDER BY create_time ASC LIMIT 1 + |""".stripMargin) { stmt => + stmt.setString(1, OperationState.INITIALIZED.toString) + } { resultSet => + resultSet.getString(1) + }.headOption.filter { preSelectedBatchId => + JdbcUtils.executeUpdate( + s"""UPDATE $METADATA_TABLE + |SET kyuubi_instance=?, state=? + |WHERE identifier=? AND state=? + |""".stripMargin) { stmt => + stmt.setString(1, kyuubiInstance) + stmt.setString(2, OperationState.PENDING.toString) + stmt.setString(3, preSelectedBatchId) + stmt.setString(4, OperationState.INITIALIZED.toString) + } == 1 + }.map { pickedBatchId => + getMetadata(pickedBatchId) + } + } + + override def transformMetadataState( + identifier: String, + fromState: String, + targetState: String): Boolean = { + val query = s"UPDATE $METADATA_TABLE SET state = ? WHERE identifier = ? AND state = ?" + JdbcUtils.withConnection { connection => + withUpdateCount(connection, query, fromState, identifier, targetState) { updateCount => + updateCount == 1 } + } + } - withConnection() { connection => + override def getMetadata(identifier: String): Metadata = { + val query = s"SELECT $METADATA_COLUMNS FROM $METADATA_TABLE WHERE identifier = ?" + + JdbcUtils.withConnection { connection => withResultSet(connection, query, identifier) { rs => - buildMetadata(rs, stateOnly).headOption.orNull + buildMetadata(rs).headOption.orNull } } } - override def getMetadataList( - filter: MetadataFilter, - from: Int, - size: Int, - stateOnly: Boolean): Seq[Metadata] = { + override def getMetadataList(filter: MetadataFilter, from: Int, size: Int): Seq[Metadata] = { val queryBuilder = new StringBuilder val params = ListBuffer[Any]() - if (stateOnly) { - queryBuilder.append(s"SELECT $METADATA_STATE_ONLY_COLUMNS FROM $METADATA_TABLE") - } else { - queryBuilder.append(s"SELECT $METADATA_ALL_COLUMNS FROM $METADATA_TABLE") + queryBuilder.append("SELECT ") + queryBuilder.append(METADATA_COLUMNS) + queryBuilder.append(s" FROM $METADATA_TABLE") + queryBuilder.append(s" ${assembleWhereClause(filter, params)}") + queryBuilder.append(" ORDER BY key_id ") + queryBuilder.append(dialect.limitClause(size, from)) + val query = queryBuilder.toString + JdbcUtils.withConnection { connection => + withResultSet(connection, query, params.toSeq: _*) { rs => + buildMetadata(rs) + } } - val whereConditions = ListBuffer[String]() + } + + override def countMetadata(filter: MetadataFilter): Int = { + val queryBuilder = new StringBuilder + val params = ListBuffer[Any]() + queryBuilder.append(s"SELECT COUNT(1) FROM $METADATA_TABLE") + queryBuilder.append(s" ${assembleWhereClause(filter, params)}") + val query = queryBuilder.toString + JdbcUtils.executeQueryWithRowMapper(query) { stmt => + setStatementParams(stmt, params) + } { resultSet => + resultSet.getInt(1) + }.head + } + + private def assembleWhereClause( + filter: MetadataFilter, + params: ListBuffer[Any]): String = { + val whereConditions = ListBuffer[String]("1 = 1") Option(filter.sessionType).foreach { sessionType => - whereConditions += " session_type = ?" + whereConditions += "session_type = ?" params += sessionType.toString } Option(filter.engineType).filter(_.nonEmpty).foreach { engineType => - whereConditions += " UPPER(engine_type) = ? " + whereConditions += "UPPER(engine_type) = ?" params += engineType.toUpperCase(Locale.ROOT) } Option(filter.username).filter(_.nonEmpty).foreach { username => - whereConditions += " user_name = ? " + whereConditions += "user_name = ?" params += username } Option(filter.state).filter(_.nonEmpty).foreach { state => - whereConditions += " state = ? " + whereConditions += "state = ?" params += state.toUpperCase(Locale.ROOT) } + Option(filter.requestName).filter(_.nonEmpty).foreach { requestName => + whereConditions += "request_name = ?" + params += requestName + } Option(filter.kyuubiInstance).filter(_.nonEmpty).foreach { kyuubiInstance => - whereConditions += " kyuubi_instance = ? " + whereConditions += "kyuubi_instance = ?" params += kyuubiInstance } if (filter.createTime > 0) { - whereConditions += " create_time >= ? " + whereConditions += "create_time >= ?" params += filter.createTime } if (filter.endTime > 0) { - whereConditions += " end_time > 0 " - whereConditions += " end_time <= ? " + whereConditions += "end_time > 0" + whereConditions += "end_time <= ?" params += filter.endTime } if (filter.peerInstanceClosed) { - whereConditions += " peer_instance_closed = ? " + whereConditions += "peer_instance_closed = ?" params += filter.peerInstanceClosed } - if (whereConditions.nonEmpty) { - queryBuilder.append(whereConditions.mkString(" WHERE ", " AND ", " ")) - } - queryBuilder.append(" ORDER BY key_id ") - val query = databaseAdaptor.addLimitAndOffsetToQuery(queryBuilder.toString(), size, from) - withConnection() { connection => - withResultSet(connection, query, params: _*) { rs => - buildMetadata(rs, stateOnly) - } - } + whereConditions.mkString("WHERE ", " AND ", "") } override def updateMetadata(metadata: Metadata): Unit = { @@ -269,54 +321,67 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { queryBuilder.append(s"UPDATE $METADATA_TABLE") val setClauses = ListBuffer[String]() + Option(metadata.kyuubiInstance).foreach { _ => + setClauses += "kyuubi_instance = ?" + params += metadata.kyuubiInstance + } Option(metadata.state).foreach { _ => - setClauses += " state = ? " + setClauses += "state = ?" params += metadata.state } + Option(metadata.requestConf).filter(_.nonEmpty).foreach { _ => + setClauses += "request_conf =?" + params += valueAsString(metadata.requestConf) + } + metadata.clusterManager.foreach { cm => + setClauses += "cluster_manager = ?" + params += cm + } if (metadata.endTime > 0) { - setClauses += " end_time = ? " + setClauses += "end_time = ?" params += metadata.endTime } if (metadata.engineOpenTime > 0) { - setClauses += " engine_open_time = ? " + setClauses += "engine_open_time = ?" params += metadata.engineOpenTime } Option(metadata.engineId).foreach { _ => - setClauses += " engine_id = ? " + setClauses += "engine_id = ?" params += metadata.engineId } Option(metadata.engineName).foreach { _ => - setClauses += " engine_name = ? " + setClauses += "engine_name = ?" params += metadata.engineName } Option(metadata.engineUrl).foreach { _ => - setClauses += " engine_url = ? " + setClauses += "engine_url = ?" params += metadata.engineUrl } Option(metadata.engineState).foreach { _ => - setClauses += " engine_state = ? " + setClauses += "engine_state = ?" params += metadata.engineState } metadata.engineError.foreach { error => - setClauses += " engine_error = ? " + setClauses += "engine_error = ?" params += error } if (metadata.peerInstanceClosed) { - setClauses += " peer_instance_closed = ? " + setClauses += "peer_instance_closed = ?" params += metadata.peerInstanceClosed } if (setClauses.nonEmpty) { - queryBuilder.append(setClauses.mkString(" SET ", " , ", " ")) + queryBuilder.append(setClauses.mkString(" SET ", ", ", "")) } - queryBuilder.append(" WHERE identifier = ? ") + queryBuilder.append(" WHERE identifier = ?") params += metadata.identifier val query = queryBuilder.toString() - withConnection() { connection => - withUpdateCount(connection, query, params: _*) { updateCount => + JdbcUtils.withConnection { connection => + withUpdateCount(connection, query, params.toSeq: _*) { updateCount => if (updateCount == 0) { throw new KyuubiException( - s"Error updating metadata for ${metadata.identifier} with $query") + s"Error updating metadata for ${metadata.identifier} by SQL: $query, " + + s"with params: ${params.mkString(", ")}") } } } @@ -324,7 +389,7 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { override def cleanupMetadataByIdentifier(identifier: String): Unit = { val query = s"DELETE FROM $METADATA_TABLE WHERE identifier = ?" - withConnection() { connection => + JdbcUtils.withConnection { connection => execute(connection, query, identifier) } } @@ -332,12 +397,12 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { override def cleanupMetadataByAge(maxAge: Long): Unit = { val minEndTime = System.currentTimeMillis() - maxAge val query = s"DELETE FROM $METADATA_TABLE WHERE state IN ($terminalStates) AND end_time < ?" - withConnection() { connection => + JdbcUtils.withConnection { connection => execute(connection, query, minEndTime) } } - private def buildMetadata(resultSet: ResultSet, stateOnly: Boolean): Seq[Metadata] = { + private def buildMetadata(resultSet: ResultSet): Seq[Metadata] = { try { val metadataList = ListBuffer[Metadata]() while (resultSet.next()) { @@ -348,7 +413,11 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { val ipAddress = resultSet.getString("ip_address") val kyuubiInstance = resultSet.getString("kyuubi_instance") val state = resultSet.getString("state") + val resource = resultSet.getString("resource") + val className = resultSet.getString("class_name") val requestName = resultSet.getString("request_name") + val requestConf = string2Map(resultSet.getString("request_conf")) + val requestArgs = string2Seq(resultSet.getString("request_args")) val createTime = resultSet.getLong("create_time") val engineType = resultSet.getString("engine_type") val clusterManager = Option(resultSet.getString("cluster_manager")) @@ -360,17 +429,6 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { val endTime = resultSet.getLong("end_time") val peerInstanceClosed = resultSet.getBoolean("peer_instance_closed") - var resource: String = null - var className: String = null - var requestConf: Map[String, String] = Map.empty - var requestArgs: Seq[String] = Seq.empty - - if (!stateOnly) { - resource = resultSet.getString("resource") - className = resultSet.getString("class_name") - requestConf = string2Map(resultSet.getString("request_conf")) - requestArgs = string2Seq(resultSet.getString("request_args")) - } val metadata = Metadata( identifier = identifier, sessionType = sessionType, @@ -396,14 +454,14 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { peerInstanceClosed = peerInstanceClosed) metadataList += metadata } - metadataList + metadataList.toSeq } finally { Utils.tryLogNonFatalError(resultSet.close()) } } private def execute(conn: Connection, sql: String, params: Any*): Unit = { - debug(s"executing sql $sql") + debug(s"execute sql: $sql, with params: ${params.mkString(", ")}") var statement: PreparedStatement = null try { statement = conn.prepareStatement(sql) @@ -411,7 +469,9 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { statement.execute() } catch { case e: SQLException => - throw new KyuubiException(s"Error executing $sql:" + e.getMessage, e) + throw new KyuubiException( + s"Error executing sql: $sql, with params: ${params.mkString(", ")}. ${e.getMessage}", + e) } finally { if (statement != null) { Utils.tryLogNonFatalError(statement.close()) @@ -423,7 +483,7 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { conn: Connection, sql: String, params: Any*)(f: ResultSet => T): T = { - debug(s"executing sql $sql with result set") + debug(s"executeQuery sql: $sql, with params: ${params.mkString(", ")}") var statement: PreparedStatement = null var resultSet: ResultSet = null try { @@ -433,7 +493,9 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { f(resultSet) } catch { case e: SQLException => - throw new KyuubiException(e.getMessage, e) + throw new KyuubiException( + s"Error executing sql: $sql, with params: ${params.mkString(", ")}. ${e.getMessage}", + e) } finally { if (resultSet != null) { Utils.tryLogNonFatalError(resultSet.close()) @@ -448,7 +510,7 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { conn: Connection, sql: String, params: Any*)(f: Int => T): T = { - debug(s"executing sql $sql with update count") + debug(s"executeUpdate sql: $sql, with params: ${params.mkString(", ")}") var statement: PreparedStatement = null try { statement = conn.prepareStatement(sql) @@ -456,7 +518,9 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { f(statement.executeUpdate()) } catch { case e: SQLException => - throw new KyuubiException(e.getMessage, e) + throw new KyuubiException( + s"Error executing sql: $sql, with params: ${params.mkString(", ")}. ${e.getMessage}", + e) } finally { if (statement != null) { Utils.tryLogNonFatalError(statement.close()) @@ -479,22 +543,6 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { } } - private def withConnection[T](autoCommit: Boolean = true)(f: Connection => T): T = { - var connection: Connection = null - try { - connection = hikariDataSource.getConnection - connection.setAutoCommit(autoCommit) - f(connection) - } catch { - case e: SQLException => - throw new KyuubiException(e.getMessage, e) - } finally { - if (connection != null) { - Utils.tryLogNonFatalError(connection.close()) - } - } - } - private def valueAsString(obj: Any): String = { mapper.writeValueAsString(obj) } @@ -519,7 +567,7 @@ class JDBCMetadataStore(conf: KyuubiConf) extends MetadataStore with Logging { object JDBCMetadataStore { private val SCHEMA_URL_PATTERN = """^metadata-store-schema-(\d+)\.(\d+)\.(\d+)\.(.*)\.sql$""".r private val METADATA_TABLE = "metadata" - private val METADATA_STATE_ONLY_COLUMNS = Seq( + private val METADATA_COLUMNS = Seq( "identifier", "session_type", "real_user", @@ -527,7 +575,11 @@ object JDBCMetadataStore { "ip_address", "kyuubi_instance", "state", + "resource", + "class_name", "request_name", + "request_conf", + "request_args", "create_time", "engine_type", "cluster_manager", @@ -538,10 +590,4 @@ object JDBCMetadataStore { "engine_error", "end_time", "peer_instance_closed").mkString(",") - private val METADATA_ALL_COLUMNS = Seq( - METADATA_STATE_ONLY_COLUMNS, - "resource", - "class_name", - "request_conf", - "request_args").mkString(",") } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/JDBCMetadataStoreConf.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/JDBCMetadataStoreConf.scala index de30b6e6689..dd5d741382f 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/JDBCMetadataStoreConf.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/JDBCMetadataStoreConf.scala @@ -17,7 +17,7 @@ package org.apache.kyuubi.server.metadata.jdbc -import java.util.{Locale, Properties} +import java.util.Properties import org.apache.kyuubi.config.{ConfigEntry, KyuubiConf, OptionalConfigEntry} import org.apache.kyuubi.config.KyuubiConf.buildConf @@ -37,7 +37,9 @@ object JDBCMetadataStoreConf { val METADATA_STORE_JDBC_DATABASE_TYPE: ConfigEntry[String] = buildConf("kyuubi.metadata.store.jdbc.database.type") .doc("The database type for server jdbc metadata store.

      " + - "
    • DERBY: Apache Derby, JDBC driver `org.apache.derby.jdbc.AutoloadedDriver`.
    • " + + "
    • (Deprecated) DERBY: Apache Derby, JDBC driver " + + "`org.apache.derby.jdbc.AutoloadedDriver`.
    • " + + "
    • SQLITE: SQLite3, JDBC driver `org.sqlite.JDBC`.
    • " + "
    • MYSQL: MySQL, JDBC driver `com.mysql.jdbc.Driver`.
    • " + "
    • CUSTOM: User-defined database type, need to specify corresponding JDBC driver.
    • " + " Note that: The JDBC datasource is powered by HiKariCP, for datasource properties," + @@ -46,8 +48,8 @@ object JDBCMetadataStoreConf { .version("1.6.0") .serverOnly .stringConf - .transform(_.toUpperCase(Locale.ROOT)) - .createWithDefault("DERBY") + .transformToUpperCase + .createWithDefault("SQLITE") val METADATA_STORE_JDBC_DATABASE_SCHEMA_INIT: ConfigEntry[Boolean] = buildConf("kyuubi.metadata.store.jdbc.database.schema.init") @@ -67,14 +69,14 @@ object JDBCMetadataStoreConf { val METADATA_STORE_JDBC_URL: ConfigEntry[String] = buildConf("kyuubi.metadata.store.jdbc.url") - .doc("The JDBC url for server JDBC metadata store. By default, it is a DERBY in-memory" + + .doc("The JDBC url for server JDBC metadata store. By default, it is a SQLite" + " database url, and the state information is not shared across kyuubi instances. To" + " enable high availability for multiple kyuubi instances," + " please specify a production JDBC url.") .version("1.6.0") .serverOnly .stringConf - .createWithDefault("jdbc:derby:memory:kyuubi_state_store_db;create=true") + .createWithDefault("jdbc:sqlite:kyuubi_state_store.db") val METADATA_STORE_JDBC_USER: ConfigEntry[String] = buildConf("kyuubi.metadata.store.jdbc.user") diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/JdbcDatabaseDialect.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/JdbcDatabaseDialect.scala index 837af77cf58..69bd36519e1 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/JdbcDatabaseDialect.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/metadata/jdbc/JdbcDatabaseDialect.scala @@ -18,19 +18,20 @@ package org.apache.kyuubi.server.metadata.jdbc trait JdbcDatabaseDialect { - def addLimitAndOffsetToQuery(sql: String, limit: Int, offset: Int): String + def limitClause(limit: Int, offset: Int): String } class DerbyDatabaseDialect extends JdbcDatabaseDialect { - override def addLimitAndOffsetToQuery(sql: String, limit: Int, offset: Int): String = { - s"$sql OFFSET $offset ROWS FETCH NEXT $limit ROWS ONLY" + override def limitClause(limit: Int, offset: Int): String = { + s"OFFSET $offset ROWS FETCH NEXT $limit ROWS ONLY" } } class GenericDatabaseDialect extends JdbcDatabaseDialect { - override def addLimitAndOffsetToQuery(sql: String, limit: Int, offset: Int): String = { - s"$sql LIMIT $limit OFFSET $offset" + override def limitClause(limit: Int, offset: Int): String = { + s"LIMIT $limit OFFSET $offset" } } -class MysqlDatabaseDialect extends GenericDatabaseDialect {} +class SQLiteDatabaseDialect extends GenericDatabaseDialect {} +class MySQLDatabaseDialect extends GenericDatabaseDialect {} diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLCommandHandler.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLCommandHandler.scala index 2f574d904dc..5f7a07f5875 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLCommandHandler.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLCommandHandler.scala @@ -196,11 +196,12 @@ class MySQLCommandHandler( .getOrElse(KyuubiSQLException(s"Error operator state ${opStatus.state}")) } val resultSetMetadata = be.getResultSetMetadata(opHandle) - val rowSet = be.fetchResults( + val fetchResultResp = be.fetchResults( opHandle, FetchOrientation.FETCH_NEXT, Int.MaxValue, fetchLog = false) + val rowSet = fetchResultResp.getResults MySQLQueryResult(resultSetMetadata.getSchema, rowSet) } catch { case rethrow: Exception => diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLDataPackets.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLDataPackets.scala index d7917b51f83..273af56b867 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLDataPackets.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLDataPackets.scala @@ -84,7 +84,7 @@ case class MySQLColumnDefinition41Packet( case class MySQLTextResultSetRowPacket( sequenceId: Int, - row: Seq[Any]) extends MySQLPacket with SupportsEncode { + row: Iterable[Any]) extends MySQLPacket with SupportsEncode { private def nullVal = 0xFB diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLQueryResult.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLQueryResult.scala index c398e31e9a8..59371b923e9 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLQueryResult.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/mysql/MySQLQueryResult.scala @@ -42,7 +42,7 @@ trait MySQLQueryResult { def toColDefinePackets: Seq[MySQLPacket] - def toRowPackets: Seq[MySQLPacket] + def toRowPackets: Iterable[MySQLPacket] def toPackets: Seq[MySQLPacket] = { val buf = Seq.newBuilder[MySQLPacket] @@ -77,7 +77,7 @@ class MySQLSimpleQueryResult( decimals = decimals) } - override def toRowPackets: Seq[MySQLPacket] = + override def toRowPackets: Iterable[MySQLPacket] = rows.zipWithIndex.map { case (row, i) => val sequenceId = colCount + 3 + i MySQLTextResultSetRowPacket(sequenceId = sequenceId, row = row) @@ -94,8 +94,9 @@ class MySQLThriftQueryResult( override def toColDefinePackets: Seq[MySQLPacket] = schema.getColumns.asScala .zipWithIndex.map { case (tCol, i) => tColDescToMySQL(tCol, 2 + i) } + .toSeq - override def toRowPackets: Seq[MySQLPacket] = rows.getRows.asScala + override def toRowPackets: Iterable[MySQLPacket] = rows.getRows.asScala .zipWithIndex.map { case (tRow, i) => tRowToMySQL(tRow, colCount + 3 + i) } private def tColDescToMySQL( diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/Query.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/Query.scala index 925875579d6..dc9de4ae2e0 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/Query.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/Query.scala @@ -30,12 +30,13 @@ import scala.collection.mutable import Slug.Context.{EXECUTING_QUERY, QUEUED_QUERY} import com.google.common.hash.Hashing import io.trino.client.QueryResults -import org.apache.hive.service.rpc.thrift.TProtocolVersion +import org.apache.hive.service.rpc.thrift.{TBoolValue, TColumnDesc, TColumnValue, TGetResultSetMetadataResp, TPrimitiveTypeEntry, TProtocolVersion, TRow, TRowSet, TTableSchema, TTypeDesc, TTypeEntry, TTypeId} -import org.apache.kyuubi.operation.{FetchOrientation, OperationHandle} +import org.apache.kyuubi.operation.{FetchOrientation, OperationHandle, OperationState, OperationStatus} import org.apache.kyuubi.operation.OperationState.{FINISHED, INITIALIZED, OperationState, PENDING} import org.apache.kyuubi.server.trino.api.Query.KYUUBI_SESSION_ID import org.apache.kyuubi.service.BackendService +import org.apache.kyuubi.service.TFrontendService.OK_STATUS import org.apache.kyuubi.session.SessionHandle case class Query( @@ -68,7 +69,7 @@ case class Query( queryId.operationHandle, defaultFetchOrientation, defaultMaxRows, - false) + false).getResults TrinoContext.createQueryResults( queryId.getQueryId, nextUri, @@ -85,6 +86,45 @@ case class Query( } } + def getPrepareQueryResults( + token: Long, + uriInfo: UriInfo, + maxWait: Long = 0): QueryResults = { + val status = OperationStatus(OperationState.FINISHED, 0, 0, 0, 0, false) + val nextUri = null + val queryHtmlUri = uriInfo.getRequestUriBuilder + .replacePath("ui/query.html").replaceQuery(queryId.getQueryId).build() + + val columns = new TGetResultSetMetadataResp() + columns.setStatus(OK_STATUS) + val tColumnDesc = new TColumnDesc() + tColumnDesc.setColumnName("result") + val desc = new TTypeDesc + desc.addToTypes(TTypeEntry.primitiveEntry(new TPrimitiveTypeEntry(TTypeId.BOOLEAN_TYPE))) + tColumnDesc.setTypeDesc(desc) + tColumnDesc.setPosition(0) + val schema = new TTableSchema() + schema.addToColumns(tColumnDesc) + columns.setSchema(schema) + + val rows = new java.util.ArrayList[TRow] + val trow = new TRow() + val value = new TBoolValue() + value.setValue(true) + trow.addToColVals(TColumnValue.boolVal(value)) + rows.add(trow) + val rowSet = new TRowSet(0, rows) + + TrinoContext.createQueryResults( + queryId.getQueryId, + nextUri, + queryHtmlUri, + status, + Option(columns), + Option(rowSet), + updateType = "PREPARE") + } + def getLastToken: Long = this.lastToken.get() def getSlug: Slug = this.slug @@ -152,6 +192,20 @@ object Query { Query(QueryId(operationHandle), updatedContext, backendService) } + def apply( + statementId: String, + statement: String, + context: TrinoContext, + backendService: BackendService): Query = { + val sessionHandle = getOrCreateSession(context, backendService) + val sessionWithId = + context.session + (KYUUBI_SESSION_ID -> sessionHandle.identifier.toString) + Query( + queryId = QueryId(new OperationHandle(UUID.randomUUID())), + context.copy(preparedStatement = Map(statementId -> statement), session = sessionWithId), + backendService) + } + def apply(id: String, context: TrinoContext, backendService: BackendService): Query = { Query(QueryId(id), context, backendService) } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoContext.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoContext.scala index 8c85f31d780..842f0ceec73 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoContext.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/TrinoContext.scala @@ -187,14 +187,20 @@ object TrinoContext { queryHtmlUri: URI, queryStatus: OperationStatus, columns: Option[TGetResultSetMetadataResp] = None, - data: Option[TRowSet] = None): QueryResults = { + data: Option[TRowSet] = None, + updateType: String = null): QueryResults = { val columnList = columns match { case Some(value) => convertTColumn(value) case None => null } val rowList = data match { - case Some(value) => convertTRowSet(value) + case Some(value) => + Option(updateType) match { + case Some("PREPARE") => + ImmutableList.of(ImmutableList.of(true).asInstanceOf[util.List[Object]]) + case _ => convertTRowSet(value) + } case None => null } @@ -214,7 +220,7 @@ object TrinoContext { .setElapsedTimeMillis(0).setQueuedTimeMillis(0).build(), toQueryError(queryStatus), defaultWarning, - null, + updateType, 0L) } @@ -327,7 +333,7 @@ object TrinoContext { if (rowSet.getColumns == null) { return rowSet.getRows.asScala - .map(t => t.getColVals.asScala.map(v => v.getFieldValue.asInstanceOf[Object]).asJava) + .map(t => t.getColVals.asScala.map(v => v.getFieldValue).asJava) .asJava } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/api.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/api.scala index 76f8a1ca3ae..50887dabdf4 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/api.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/api.scala @@ -25,6 +25,7 @@ import javax.ws.rs.ext.{ExceptionMapper, Provider} import org.eclipse.jetty.server.handler.ContextHandler +import org.apache.kyuubi.Logging import org.apache.kyuubi.server.KyuubiTrinoFrontendService private[api] trait ApiRequestContext { @@ -39,18 +40,19 @@ private[api] trait ApiRequestContext { } @Provider -class RestExceptionMapper extends ExceptionMapper[Exception] { +class RestExceptionMapper extends ExceptionMapper[Exception] with Logging { override def toResponse(exception: Exception): Response = { + warn("Error occurs on accessing Trino API.", exception) exception match { case e: WebApplicationException => Response.status(e.getResponse.getStatus) - .`type`(e.getResponse.getMediaType) - .entity(e.getMessage) + .`type`(MediaType.APPLICATION_JSON) + .entity(Map("message" -> e.getMessage)) .build() case e => Response.status(Response.Status.INTERNAL_SERVER_ERROR) .`type`(MediaType.APPLICATION_JSON) - .entity(e.getMessage) + .entity(Map("message" -> e.getMessage)) .build() } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/v1/StatementResource.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/v1/StatementResource.scala index e051dbb2376..c6b5550cc51 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/v1/StatementResource.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/trino/api/v1/StatementResource.scala @@ -17,6 +17,8 @@ package org.apache.kyuubi.server.trino.api.v1 +import java.util +import java.util.UUID import javax.ws.rs._ import javax.ws.rs.core.{Context, HttpHeaders, MediaType, Response, UriInfo} import javax.ws.rs.core.MediaType.TEXT_PLAIN_TYPE @@ -32,16 +34,21 @@ import io.swagger.v3.oas.annotations.tags.Tag import io.trino.client.QueryResults import org.apache.kyuubi.Logging +import org.apache.kyuubi.jdbc.hive.Utils +import org.apache.kyuubi.operation.OperationHandle import org.apache.kyuubi.server.trino.api.{ApiRequestContext, KyuubiTrinoOperationTranslator, Query, QueryId, Slug, TrinoContext} import org.apache.kyuubi.server.trino.api.Slug.Context.{EXECUTING_QUERY, QUEUED_QUERY} import org.apache.kyuubi.server.trino.api.v1.dto.Ok import org.apache.kyuubi.service.BackendService +import org.apache.kyuubi.sql.parser.trino.KyuubiTrinoFeParser +import org.apache.kyuubi.sql.plan.trino.{Deallocate, ExecuteForPreparing, Prepare} @Tag(name = "Statement") @Produces(Array(MediaType.APPLICATION_JSON)) private[v1] class StatementResource extends ApiRequestContext with Logging { lazy val translator = new KyuubiTrinoOperationTranslator(fe.be) + lazy val parser = new KyuubiTrinoFeParser() @ApiResponse( responseCode = "200", @@ -73,9 +80,39 @@ private[v1] class StatementResource extends ApiRequestContext with Logging { val trinoContext = TrinoContext(headers, remoteAddr) try { - val query = Query(statement, trinoContext, translator, fe.be) - val qr = query.getQueryResults(query.getLastToken, uriInfo) - TrinoContext.buildTrinoResponse(qr, query.context) + parser.parsePlan(statement) match { + case Prepare(statementId, _) => + val query = Query( + statementId, + statement.split(s"$statementId FROM")(1), + trinoContext, + fe.be) + val qr = query.getPrepareQueryResults(query.getLastToken, uriInfo) + TrinoContext.buildTrinoResponse(qr, query.context) + case ExecuteForPreparing(statementId, parameters) => + val parametersMap = new util.HashMap[Integer, String]() + for (i <- parameters.indices) { + parametersMap.put(i + 1, parameters(i)) + } + trinoContext.preparedStatement.get(statementId).map { originSql => + val realSql = Utils.updateSql(originSql, parametersMap) + val query = Query(realSql, trinoContext, translator, fe.be) + val qr = query.getQueryResults(query.getLastToken, uriInfo) + TrinoContext.buildTrinoResponse(qr, query.context) + }.get + case Deallocate(statementId) => + info(s"DEALLOCATE PREPARE ${statementId}") + val query = Query( + QueryId(new OperationHandle(UUID.randomUUID())), + trinoContext, + fe.be) + val qr = query.getPrepareQueryResults(query.getLastToken, uriInfo) + TrinoContext.buildTrinoResponse(qr, query.context) + case _ => + val query = Query(statement, trinoContext, translator, fe.be) + val qr = query.getQueryResults(query.getLastToken, uriInfo) + TrinoContext.buildTrinoResponse(qr, query.context) + } } catch { case e: Exception => val errorMsg = diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/ui/JettyServer.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/ui/JettyServer.scala index 3ee6f0913a9..00b172f2c94 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/ui/JettyServer.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/ui/JettyServer.scala @@ -67,6 +67,8 @@ private[kyuubi] case class JettyServer( dest: String): Unit = { addHandler(JettyUtils.createRedirectHandler(src, dest)) } + + def getState: String = server.getState } object JettyServer { diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiBatchSessionImpl.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiBatchSession.scala similarity index 53% rename from kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiBatchSessionImpl.scala rename to kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiBatchSession.scala index 228890a1e4e..e10230ebfa0 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiBatchSessionImpl.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiBatchSession.scala @@ -21,7 +21,6 @@ import scala.collection.JavaConverters._ import org.apache.hive.service.rpc.thrift.TProtocolVersion -import org.apache.kyuubi.client.api.v1.dto.BatchRequest import org.apache.kyuubi.client.util.BatchUtils._ import org.apache.kyuubi.config.{KyuubiConf, KyuubiReservedKeys} import org.apache.kyuubi.engine.KyuubiApplicationManager @@ -31,15 +30,20 @@ import org.apache.kyuubi.operation.OperationState import org.apache.kyuubi.server.metadata.api.Metadata import org.apache.kyuubi.session.SessionType.SessionType -class KyuubiBatchSessionImpl( +class KyuubiBatchSession( user: String, password: String, ipAddress: String, conf: Map[String, String], override val sessionManager: KyuubiSessionManager, val sessionConf: KyuubiConf, - batchRequest: BatchRequest, - recoveryMetadata: Option[Metadata] = None) + batchType: String, + batchName: Option[String], + resource: String, + className: String, + batchArgs: Seq[String], + metadata: Option[Metadata] = None, + fromRecovery: Boolean) extends KyuubiSession( TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1, user, @@ -50,11 +54,11 @@ class KyuubiBatchSessionImpl( override val sessionType: SessionType = SessionType.BATCH override val handle: SessionHandle = { - val batchId = recoveryMetadata.map(_.identifier).getOrElse(conf(KYUUBI_BATCH_ID_KEY)) + val batchId = metadata.map(_.identifier).getOrElse(conf(KYUUBI_BATCH_ID_KEY)) SessionHandle.fromUUID(batchId) } - override def createTime: Long = recoveryMetadata.map(_.createTime).getOrElse(super.createTime) + override def createTime: Long = metadata.map(_.createTime).getOrElse(super.createTime) override def getNoOperationTime: Long = { if (batchJobSubmissionOp != null && !OperationState.isTerminal( @@ -68,29 +72,40 @@ class KyuubiBatchSessionImpl( override val sessionIdleTimeoutThreshold: Long = sessionManager.getConf.get(KyuubiConf.BATCH_SESSION_IDLE_TIMEOUT) - // TODO: Support batch conf advisor - override val normalizedConf: Map[String, String] = { - sessionConf.getBatchConf(batchRequest.getBatchType) ++ - sessionManager.validateBatchConf(batchRequest.getConf.asScala.toMap) + override val normalizedConf: Map[String, String] = + sessionConf.getBatchConf(batchType) ++ sessionManager.validateBatchConf(conf) + + val optimizedConf: Map[String, String] = { + val confOverlay = sessionManager.sessionConfAdvisor.getConfOverlay( + user, + normalizedConf.asJava) + if (confOverlay != null) { + val overlayConf = new KyuubiConf(false) + confOverlay.asScala.foreach { case (k, v) => overlayConf.set(k, v) } + normalizedConf ++ overlayConf.getBatchConf(batchType) + } else { + warn(s"the server plugin return null value for user: $user, ignore it") + normalizedConf + } } - override lazy val name: Option[String] = Option(batchRequest.getName).orElse( - normalizedConf.get(KyuubiConf.SESSION_NAME.key)) + override lazy val name: Option[String] = + batchName.filterNot(_.trim.isEmpty).orElse(optimizedConf.get(KyuubiConf.SESSION_NAME.key)) // whether the resource file is from uploading - private[kyuubi] val isResourceUploaded: Boolean = batchRequest.getConf - .getOrDefault(KyuubiReservedKeys.KYUUBI_BATCH_RESOURCE_UPLOADED_KEY, "false").toBoolean + private[kyuubi] val isResourceUploaded: Boolean = + conf.getOrElse(KyuubiReservedKeys.KYUUBI_BATCH_RESOURCE_UPLOADED_KEY, "false").toBoolean private[kyuubi] lazy val batchJobSubmissionOp = sessionManager.operationManager .newBatchJobSubmissionOperation( this, - batchRequest.getBatchType, + batchType, name.orNull, - batchRequest.getResource, - batchRequest.getClassName, - normalizedConf, - batchRequest.getArgs.asScala, - recoveryMetadata) + resource, + className, + optimizedConf, + batchArgs, + metadata) private def waitMetadataRequestsRetryCompletion(): Unit = { val batchId = batchJobSubmissionOp.batchId @@ -105,7 +120,9 @@ class KyuubiBatchSessionImpl( } private val sessionEvent = KyuubiSessionEvent(this) - recoveryMetadata.foreach(metadata => sessionEvent.engineId = metadata.engineId) + if (fromRecovery) { + metadata.foreach { m => sessionEvent.engineId = m.engineId } + } EventBus.post(sessionEvent) override def getSessionEvent: Option[KyuubiSessionEvent] = { @@ -114,40 +131,57 @@ class KyuubiBatchSessionImpl( override def checkSessionAccessPathURIs(): Unit = { KyuubiApplicationManager.checkApplicationAccessPaths( - batchRequest.getBatchType, - normalizedConf, + batchType, + optimizedConf, sessionManager.getConf) - if (batchRequest.getResource != SparkProcessBuilder.INTERNAL_RESOURCE - && !isResourceUploaded) { - KyuubiApplicationManager.checkApplicationAccessPath( - batchRequest.getResource, - sessionManager.getConf) + if (resource != SparkProcessBuilder.INTERNAL_RESOURCE && !isResourceUploaded) { + KyuubiApplicationManager.checkApplicationAccessPath(resource, sessionManager.getConf) } } override def open(): Unit = handleSessionException { traceMetricsOnOpen() - if (recoveryMetadata.isEmpty) { - val metaData = Metadata( - identifier = handle.identifier.toString, - sessionType = sessionType, - realUser = realUser, - username = user, - ipAddress = ipAddress, - kyuubiInstance = connectionUrl, - state = OperationState.PENDING.toString, - resource = batchRequest.getResource, - className = batchRequest.getClassName, - requestName = name.orNull, - requestConf = normalizedConf, - requestArgs = batchRequest.getArgs.asScala, - createTime = createTime, - engineType = batchRequest.getBatchType, - clusterManager = batchJobSubmissionOp.builder.clusterManager()) - - // there is a chance that operation failed w/ duplicated key error - sessionManager.insertMetadata(metaData) + lazy val kubernetesInfo: Map[String, String] = { + val appMgrInfo = batchJobSubmissionOp.builder.appMgrInfo() + appMgrInfo.kubernetesInfo.context.map { context => + Map(KyuubiConf.KUBERNETES_CONTEXT.key -> context) + }.getOrElse(Map.empty) ++ appMgrInfo.kubernetesInfo.namespace.map { namespace => + Map(KyuubiConf.KUBERNETES_NAMESPACE.key -> namespace) + }.getOrElse(Map.empty) + } + + (metadata, fromRecovery) match { + case (Some(initialMetadata), false) => + // new batch job created using batch impl v2 + val metadataToUpdate = Metadata( + identifier = initialMetadata.identifier, + requestName = name.orNull, + requestConf = optimizedConf ++ kubernetesInfo, // save the kubernetes info + clusterManager = batchJobSubmissionOp.builder.clusterManager()) + sessionManager.updateMetadata(metadataToUpdate) + case (None, _) => + // new batch job created using batch impl v1 + val newMetadata = Metadata( + identifier = handle.identifier.toString, + sessionType = sessionType, + realUser = realUser, + username = user, + ipAddress = ipAddress, + kyuubiInstance = connectionUrl, + state = OperationState.PENDING.toString, + resource = resource, + className = className, + requestName = name.orNull, + requestConf = optimizedConf ++ kubernetesInfo, // save the kubernetes info + requestArgs = batchArgs, + createTime = createTime, + engineType = batchType, + clusterManager = batchJobSubmissionOp.builder.clusterManager()) + + // there is a chance that operation failed w/ duplicated key error + sessionManager.insertMetadata(newMetadata) + case _ => } checkSessionAccessPathURIs() diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSession.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSession.scala index 18597dac91c..a4c345af39c 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSession.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSession.scala @@ -20,7 +20,7 @@ import com.codahale.metrics.MetricRegistry import org.apache.hive.service.rpc.thrift.TProtocolVersion import org.apache.kyuubi.config.KyuubiReservedKeys.{KYUUBI_SESSION_CONNECTION_URL_KEY, KYUUBI_SESSION_REAL_USER_KEY} -import org.apache.kyuubi.events.KyuubiSessionEvent +import org.apache.kyuubi.events.{EventBus, KyuubiSessionEvent} import org.apache.kyuubi.metrics.MetricsConstants.{CONN_OPEN, CONN_TOTAL} import org.apache.kyuubi.metrics.MetricsSystem import org.apache.kyuubi.session.SessionType.SessionType @@ -36,9 +36,9 @@ abstract class KyuubiSession( val sessionType: SessionType - val connectionUrl = conf.get(KYUUBI_SESSION_CONNECTION_URL_KEY).getOrElse("") + val connectionUrl = conf.getOrElse(KYUUBI_SESSION_CONNECTION_URL_KEY, "") - val realUser = conf.get(KYUUBI_SESSION_REAL_USER_KEY).getOrElse(user) + val realUser = conf.getOrElse(KYUUBI_SESSION_REAL_USER_KEY, user) def getSessionEvent: Option[KyuubiSessionEvent] @@ -49,7 +49,10 @@ abstract class KyuubiSession( f } catch { case t: Throwable => - getSessionEvent.foreach(_.exception = Some(t)) + getSessionEvent.foreach { sessionEvent => + sessionEvent.exception = Some(t) + EventBus.post(sessionEvent) + } throw t } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionImpl.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionImpl.scala index e4203b301ab..6dd1810a8de 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionImpl.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionImpl.scala @@ -64,11 +64,9 @@ class KyuubiSessionImpl( } } - // TODO: needs improve the hardcode optimizedConf.foreach { - case ("use:catalog", _) => - case ("use:database", _) => - case ("kyuubi.engine.pool.size.threshold", _) => + case (USE_CATALOG, _) => + case (USE_DATABASE, _) => case (key, value) => sessionConf.set(key, value) } @@ -77,9 +75,10 @@ class KyuubiSessionImpl( lazy val engine: EngineRef = new EngineRef( sessionConf, user, - sessionManager.groupProvider.primaryGroup(user, optimizedConf.asJava), + sessionManager.groupProvider, handle.identifier.toString, - sessionManager.applicationManager) + sessionManager.applicationManager, + sessionManager.engineStartupProcessSemaphore) private[kyuubi] val launchEngineOp = sessionManager.operationManager .newLaunchEngineOperation(this, sessionConf.get(SESSION_ENGINE_LAUNCH_ASYNC)) @@ -116,6 +115,7 @@ class KyuubiSessionImpl( super.open() runOperation(launchEngineOp) + engineLastAlive = System.currentTimeMillis() } private[kyuubi] def openEngineSession(extraEngineLog: Option[OperationLog] = None): Unit = @@ -161,7 +161,7 @@ class KyuubiSessionImpl( } catch { case e: org.apache.thrift.transport.TTransportException if attempt < maxAttempts && e.getCause.isInstanceOf[java.net.ConnectException] && - e.getCause.getMessage.contains("Connection refused (Connection refused)") => + e.getCause.getMessage.contains("Connection refused") => warn( s"Failed to open [${engine.defaultEngineName} $host:$port] after" + s" $attempt/$maxAttempts times, retrying", @@ -285,4 +285,42 @@ class KyuubiSessionImpl( case _ => super.executeStatement(statement, confOverlay, runAsync, queryTimeout) } } + + @volatile private var engineLastAlive: Long = _ + private val engineAliveTimeout = sessionConf.get(KyuubiConf.ENGINE_ALIVE_TIMEOUT) + private val aliveProbeEnabled = sessionConf.get(KyuubiConf.ENGINE_ALIVE_PROBE_ENABLED) + private val engineAliveMaxFailCount = sessionConf.get(KyuubiConf.ENGINE_ALIVE_MAX_FAILURES) + private var engineAliveFailCount = 0 + + def checkEngineConnectionAlive(): Boolean = { + try { + if (Option(client).exists(_.engineConnectionClosed)) return false + if (!aliveProbeEnabled) return true + getInfo(TGetInfoType.CLI_DBMS_VER) + engineLastAlive = System.currentTimeMillis() + engineAliveFailCount = 0 + true + } catch { + case e: Throwable => + val now = System.currentTimeMillis() + engineAliveFailCount = engineAliveFailCount + 1 + if (now - engineLastAlive > engineAliveTimeout && + engineAliveFailCount >= engineAliveMaxFailCount) { + error(s"The engineRef[${engine.getEngineRefId()}] is marked as not alive " + + s"due to a lack of recent successful alive probes. " + + s"The time since last successful probe: " + + s"${now - engineLastAlive} ms exceeds the timeout of $engineAliveTimeout ms. " + + s"The engine has failed $engineAliveFailCount times, " + + s"surpassing the maximum failure count of $engineAliveMaxFailCount.") + false + } else { + warn( + s"The engineRef[${engine.getEngineRefId()}] alive probe fails, " + + s"${now - engineLastAlive} ms exceeds timeout $engineAliveTimeout ms, " + + s"and has failed $engineAliveFailCount times.", + e) + true + } + } + } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionManager.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionManager.scala index 73248cd5632..8d323469959 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionManager.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionManager.scala @@ -17,6 +17,8 @@ package org.apache.kyuubi.session +import java.util.concurrent.{Semaphore, TimeUnit} + import scala.collection.JavaConverters._ import com.codahale.metrics.MetricRegistry @@ -25,8 +27,10 @@ import org.apache.hive.service.rpc.thrift.TProtocolVersion import org.apache.kyuubi.KyuubiSQLException import org.apache.kyuubi.client.api.v1.dto.{Batch, BatchRequest} +import org.apache.kyuubi.client.util.BatchUtils.KYUUBI_BATCH_ID_KEY import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ +import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_REAL_USER_KEY import org.apache.kyuubi.credentials.HadoopCredentialsManager import org.apache.kyuubi.engine.KyuubiApplicationManager import org.apache.kyuubi.metrics.MetricsConstants._ @@ -34,9 +38,9 @@ import org.apache.kyuubi.metrics.MetricsSystem import org.apache.kyuubi.operation.{KyuubiOperationManager, OperationState} import org.apache.kyuubi.plugin.{GroupProvider, PluginLoader, SessionConfAdvisor} import org.apache.kyuubi.server.metadata.{MetadataManager, MetadataRequestsRetryRef} -import org.apache.kyuubi.server.metadata.api.Metadata +import org.apache.kyuubi.server.metadata.api.{Metadata, MetadataFilter} import org.apache.kyuubi.sql.parser.server.KyuubiParser -import org.apache.kyuubi.util.SignUtils +import org.apache.kyuubi.util.{SignUtils, ThreadUtils} class KyuubiSessionManager private (name: String) extends SessionManager(name) { @@ -47,16 +51,11 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) { val operationManager = new KyuubiOperationManager() val credentialsManager = new HadoopCredentialsManager() val applicationManager = new KyuubiApplicationManager() - private lazy val metadataManager: Option[MetadataManager] = { - // Currently, the metadata manager is used by the REST frontend which provides batch job APIs, - // so we initialize it only when Kyuubi starts with the REST frontend. - if (conf.get(FRONTEND_PROTOCOLS).map(FrontendProtocols.withName) - .contains(FrontendProtocols.REST)) { - Option(new MetadataManager()) - } else { - None - } - } + + // Currently, the metadata manager is used by the REST frontend which provides batch job APIs, + // so we initialize it only when Kyuubi starts with the REST frontend. + lazy val metadataManager: Option[MetadataManager] = + if (conf.isRESTEnabled) Some(new MetadataManager()) else None // lazy is required for plugins since the conf is null when this class initialization lazy val sessionConfAdvisor: SessionConfAdvisor = PluginLoader.loadSessionConfAdvisor(conf) @@ -66,12 +65,18 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) { private var batchLimiter: Option[SessionLimiter] = None lazy val (signingPrivateKey, signingPublicKey) = SignUtils.generateKeyPair() + var engineStartupProcessSemaphore: Option[Semaphore] = None + + private val engineConnectionAliveChecker = + ThreadUtils.newDaemonSingleThreadScheduledExecutor(s"$name-engine-alive-checker") + override def initialize(conf: KyuubiConf): Unit = { this.conf = conf addService(applicationManager) addService(credentialsManager) metadataManager.foreach(addService) initSessionLimiter(conf) + initEngineStartupProcessSemaphore(conf) super.initialize(conf) } @@ -121,7 +126,7 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) { super.closeSession(sessionHandle) } finally { session match { - case _: KyuubiBatchSessionImpl => + case _: KyuubiBatchSession => batchLimiter.foreach(_.decrement(UserIpAddress(session.user, session.ipAddress))) case _ => limiter.foreach(_.decrement(UserIpAddress(session.user, session.ipAddress))) @@ -129,26 +134,39 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) { } } - private def createBatchSession( + // scalastyle:off + def createBatchSession( user: String, password: String, ipAddress: String, conf: Map[String, String], - batchRequest: BatchRequest, - recoveryMetadata: Option[Metadata] = None): KyuubiBatchSessionImpl = { + batchType: String, + batchName: Option[String], + resource: String, + className: String, + batchArgs: Seq[String], + metadata: Option[Metadata] = None, + fromRecovery: Boolean): KyuubiBatchSession = { + // scalastyle:on val username = Option(user).filter(_.nonEmpty).getOrElse("anonymous") - new KyuubiBatchSessionImpl( + val sessionConf = this.getConf.getUserDefaults(user) + new KyuubiBatchSession( username, password, ipAddress, conf, this, - this.getConf.getUserDefaults(user), - batchRequest, - recoveryMetadata) + sessionConf, + batchType, + batchName, + resource, + className, + batchArgs, + metadata, + fromRecovery) } - private[kyuubi] def openBatchSession(batchSession: KyuubiBatchSessionImpl): SessionHandle = { + private[kyuubi] def openBatchSession(batchSession: KyuubiBatchSession): SessionHandle = { val user = batchSession.user val ipAddress = batchSession.ipAddress batchLimiter.foreach(_.increment(UserIpAddress(user, ipAddress))) @@ -182,14 +200,52 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) { user: String, password: String, ipAddress: String, - conf: Map[String, String], batchRequest: BatchRequest): SessionHandle = { - val batchSession = createBatchSession(user, password, ipAddress, conf, batchRequest) + val batchSession = createBatchSession( + user, + password, + ipAddress, + batchRequest.getConf.asScala.toMap, + batchRequest.getBatchType, + Option(batchRequest.getName), + batchRequest.getResource, + batchRequest.getClassName, + batchRequest.getArgs.asScala.toSeq, + None, + fromRecovery = false) openBatchSession(batchSession) } - def getBatchSessionImpl(sessionHandle: SessionHandle): KyuubiBatchSessionImpl = { - getSessionOption(sessionHandle).map(_.asInstanceOf[KyuubiBatchSessionImpl]).orNull + def initializeBatchState( + user: String, + ipAddress: String, + conf: Map[String, String], + batchRequest: BatchRequest): String = { + val realUser = conf.getOrElse(KYUUBI_SESSION_REAL_USER_KEY, user) + val username = Option(user).filter(_.nonEmpty).getOrElse("anonymous") + val batchId = conf(KYUUBI_BATCH_ID_KEY) + val metadata = Metadata( + identifier = batchId, + sessionType = SessionType.BATCH, + realUser = realUser, + username = username, + ipAddress = ipAddress, + state = OperationState.INITIALIZED.toString, + resource = batchRequest.getResource, + className = batchRequest.getClassName, + requestName = batchRequest.getName, + requestConf = conf, + requestArgs = batchRequest.getArgs.asScala.toSeq, + createTime = System.currentTimeMillis(), + engineType = batchRequest.getBatchType) + + // there is a chance that operation failed w/ duplicated key error + metadataManager.foreach(_.insertMetadata(metadata, asyncRetryOnError = false)) + batchId + } + + def getBatchSession(sessionHandle: SessionHandle): Option[KyuubiBatchSession] = { + getSessionOption(sessionHandle).map(_.asInstanceOf[KyuubiBatchSession]) } def insertMetadata(metadata: Metadata): Unit = { @@ -201,32 +257,23 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) { } def getMetadataRequestsRetryRef(identifier: String): Option[MetadataRequestsRetryRef] = { - Option(metadataManager.map(_.getMetadataRequestsRetryRef(identifier)).orNull) + metadataManager.flatMap(mm => Option(mm.getMetadataRequestsRetryRef(identifier))) } def deRegisterMetadataRequestsRetryRef(identifier: String): Unit = { metadataManager.foreach(_.deRegisterRequestsRetryRef(identifier)) } - def getBatchFromMetadataStore(batchId: String): Batch = { - metadataManager.map(_.getBatch(batchId)).orNull + def getBatchFromMetadataStore(batchId: String): Option[Batch] = { + metadataManager.flatMap(mm => mm.getBatch(batchId)) } - def getBatchesFromMetadataStore( - batchType: String, - batchUser: String, - batchState: String, - createTime: Long, - endTime: Long, - from: Int, - size: Int): Seq[Batch] = { - metadataManager.map( - _.getBatches(batchType, batchUser, batchState, createTime, endTime, from, size)) - .getOrElse(Seq.empty) + def getBatchesFromMetadataStore(filter: MetadataFilter, from: Int, size: Int): Seq[Batch] = { + metadataManager.map(_.getBatches(filter, from, size)).getOrElse(Seq.empty) } - def getBatchMetadata(batchId: String): Metadata = { - metadataManager.map(_.getBatchSessionMetadata(batchId)).orNull + def getBatchMetadata(batchId: String): Option[Metadata] = { + metadataManager.flatMap(_.getBatchSessionMetadata(batchId)) } @VisibleForTesting @@ -242,30 +289,28 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) { ms.registerGauge(EXEC_POOL_WORK_QUEUE_SIZE, getWorkQueueSize, 0) } super.start() + startEngineAliveChecker() } - def getBatchSessionsToRecover(kyuubiInstance: String): Seq[KyuubiBatchSessionImpl] = { + def getBatchSessionsToRecover(kyuubiInstance: String): Seq[KyuubiBatchSession] = { Seq(OperationState.PENDING, OperationState.RUNNING).flatMap { stateToRecover => metadataManager.map(_.getBatchesRecoveryMetadata( stateToRecover.toString, kyuubiInstance, 0, Int.MaxValue).map { metadata => - val batchRequest = new BatchRequest( - metadata.engineType, - metadata.resource, - metadata.className, - metadata.requestName, - metadata.requestConf.asJava, - metadata.requestArgs.asJava) - createBatchSession( metadata.username, "anonymous", metadata.ipAddress, metadata.requestConf, - batchRequest, - Some(metadata)) + metadata.engineType, + Option(metadata.requestName), + metadata.resource, + metadata.className, + metadata.requestArgs, + Some(metadata), + fromRecovery = true) }).getOrElse(Seq.empty) } } @@ -286,8 +331,15 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) { val userLimit = conf.get(SERVER_LIMIT_CONNECTIONS_PER_USER).getOrElse(0) val ipAddressLimit = conf.get(SERVER_LIMIT_CONNECTIONS_PER_IPADDRESS).getOrElse(0) val userIpAddressLimit = conf.get(SERVER_LIMIT_CONNECTIONS_PER_USER_IPADDRESS).getOrElse(0) - val userUnlimitedList = conf.get(SERVER_LIMIT_CONNECTIONS_USER_UNLIMITED_LIST) - limiter = applySessionLimiter(userLimit, ipAddressLimit, userIpAddressLimit, userUnlimitedList) + val userUnlimitedList = + conf.get(SERVER_LIMIT_CONNECTIONS_USER_UNLIMITED_LIST).filter(_.nonEmpty) + val userDenyList = conf.get(SERVER_LIMIT_CONNECTIONS_USER_DENY_LIST).filter(_.nonEmpty) + limiter = applySessionLimiter( + userLimit, + ipAddressLimit, + userIpAddressLimit, + userUnlimitedList, + userDenyList) val userBatchLimit = conf.get(SERVER_LIMIT_BATCH_CONNECTIONS_PER_USER).getOrElse(0) val ipAddressBatchLimit = conf.get(SERVER_LIMIT_BATCH_CONNECTIONS_PER_IPADDRESS).getOrElse(0) @@ -297,25 +349,76 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) { userBatchLimit, ipAddressBatchLimit, userIpAddressBatchLimit, - userUnlimitedList) + userUnlimitedList, + userDenyList) } - private[kyuubi] def getUnlimitedUsers(): Set[String] = { + private[kyuubi] def getUnlimitedUsers: Set[String] = { limiter.orElse(batchLimiter).map(SessionLimiter.getUnlimitedUsers).getOrElse(Set.empty) } private[kyuubi] def refreshUnlimitedUsers(conf: KyuubiConf): Unit = { - val unlimitedUsers = conf.get(SERVER_LIMIT_CONNECTIONS_USER_UNLIMITED_LIST).toSet + val unlimitedUsers = + conf.get(SERVER_LIMIT_CONNECTIONS_USER_UNLIMITED_LIST).filter(_.nonEmpty) limiter.foreach(SessionLimiter.resetUnlimitedUsers(_, unlimitedUsers)) batchLimiter.foreach(SessionLimiter.resetUnlimitedUsers(_, unlimitedUsers)) } + private[kyuubi] def getDenyUsers: Set[String] = { + limiter.orElse(batchLimiter).map(SessionLimiter.getDenyUsers).getOrElse(Set.empty) + } + + private[kyuubi] def refreshDenyUsers(conf: KyuubiConf): Unit = { + val denyUsers = conf.get(SERVER_LIMIT_CONNECTIONS_USER_DENY_LIST).filter(_.nonEmpty) + limiter.foreach(SessionLimiter.resetDenyUsers(_, denyUsers)) + batchLimiter.foreach(SessionLimiter.resetDenyUsers(_, denyUsers)) + } + private def applySessionLimiter( userLimit: Int, ipAddressLimit: Int, userIpAddressLimit: Int, - userUnlimitedList: Seq[String]): Option[SessionLimiter] = { - Seq(userLimit, ipAddressLimit, userIpAddressLimit).find(_ > 0).map(_ => - SessionLimiter(userLimit, ipAddressLimit, userIpAddressLimit, userUnlimitedList.toSet)) + userUnlimitedList: Set[String], + userDenyList: Set[String]): Option[SessionLimiter] = { + if (Seq(userLimit, ipAddressLimit, userIpAddressLimit).exists(_ > 0) || userDenyList.nonEmpty) { + Some(SessionLimiter( + userLimit, + ipAddressLimit, + userIpAddressLimit, + userUnlimitedList, + userDenyList)) + } else { + None + } + } + + private def startEngineAliveChecker(): Unit = { + val interval = conf.get(KyuubiConf.ENGINE_ALIVE_PROBE_INTERVAL) + val checkTask: Runnable = () => { + allSessions().foreach { session => + if (!session.asInstanceOf[KyuubiSessionImpl].checkEngineConnectionAlive()) { + try { + closeSession(session.handle) + logger.info(s"The session ${session.handle} has been closed " + + s"due to engine unresponsiveness (checked by the engine alive checker).") + } catch { + case e: KyuubiSQLException => + warn(s"Error closing session ${session.handle}", e) + } + } + } + } + engineConnectionAliveChecker.scheduleWithFixedDelay( + checkTask, + interval, + interval, + TimeUnit.MILLISECONDS) + } + + private def initEngineStartupProcessSemaphore(conf: KyuubiConf): Unit = { + val engineCreationLimit = conf.get(KyuubiConf.SERVER_LIMIT_ENGINE_CREATION) + engineCreationLimit.filter(_ > 0).foreach { limit => + engineStartupProcessSemaphore = Some(new Semaphore(limit)) + } } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/SessionLimiter.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/SessionLimiter.scala index 96ca36df176..8a1ebedf1a5 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/SessionLimiter.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/SessionLimiter.scala @@ -95,33 +95,40 @@ class SessionLimiterImpl(userLimit: Int, ipAddressLimit: Int, userIpAddressLimit private def decrLimitCount(key: String): Unit = { _counters.get(key) match { - case count: AtomicInteger => count.decrementAndGet() + case count: AtomicInteger => + count.accumulateAndGet(1, (l, r) => if (l > 0) l - r else l) case _ => } } } -class SessionLimiterWithUnlimitedUsersImpl( +class SessionLimiterWithAccessControlListImpl( userLimit: Int, ipAddressLimit: Int, userIpAddressLimit: Int, - var unlimitedUsers: Set[String]) + var unlimitedUsers: Set[String], + var denyUsers: Set[String]) extends SessionLimiterImpl(userLimit, ipAddressLimit, userIpAddressLimit) { override def increment(userIpAddress: UserIpAddress): Unit = { - if (!unlimitedUsers.contains(userIpAddress.user)) { - super.increment(userIpAddress) + val user = userIpAddress.user + if (StringUtils.isNotBlank(user) && denyUsers.contains(user)) { + val errorMsg = + s"Connection denied because the user is in the deny user list. (user: $user)" + throw KyuubiSQLException(errorMsg) } - } - override def decrement(userIpAddress: UserIpAddress): Unit = { - if (!unlimitedUsers.contains(userIpAddress.user)) { - super.decrement(userIpAddress) + if (!unlimitedUsers.contains(user)) { + super.increment(userIpAddress) } } private[kyuubi] def setUnlimitedUsers(unlimitedUsers: Set[String]): Unit = { this.unlimitedUsers = unlimitedUsers } + + private[kyuubi] def setDenyUsers(denyUsers: Set[String]): Unit = { + this.denyUsers = denyUsers + } } object SessionLimiter { @@ -130,22 +137,35 @@ object SessionLimiter { userLimit: Int, ipAddressLimit: Int, userIpAddressLimit: Int, - unlimitedUsers: Set[String] = Set.empty): SessionLimiter = { - new SessionLimiterWithUnlimitedUsersImpl( + unlimitedUsers: Set[String] = Set.empty, + denyUsers: Set[String] = Set.empty): SessionLimiter = { + new SessionLimiterWithAccessControlListImpl( userLimit, ipAddressLimit, userIpAddressLimit, - unlimitedUsers) + unlimitedUsers, + denyUsers) } def resetUnlimitedUsers(limiter: SessionLimiter, unlimitedUsers: Set[String]): Unit = limiter match { - case l: SessionLimiterWithUnlimitedUsersImpl => l.setUnlimitedUsers(unlimitedUsers) + case l: SessionLimiterWithAccessControlListImpl => l.setUnlimitedUsers(unlimitedUsers) case _ => } def getUnlimitedUsers(limiter: SessionLimiter): Set[String] = limiter match { - case l: SessionLimiterWithUnlimitedUsersImpl => l.unlimitedUsers + case l: SessionLimiterWithAccessControlListImpl => l.unlimitedUsers + case _ => Set.empty + } + + def resetDenyUsers(limiter: SessionLimiter, denyUsers: Set[String]): Unit = + limiter match { + case l: SessionLimiterWithAccessControlListImpl => l.setDenyUsers(denyUsers) + case _ => + } + + def getDenyUsers(limiter: SessionLimiter): Set[String] = limiter match { + case l: SessionLimiterWithAccessControlListImpl => l.denyUsers case _ => Set.empty } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/parser/trino/KyuubiTrinoFeAstBuilder.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/parser/trino/KyuubiTrinoFeAstBuilder.scala index 061985c1caf..8d1e38519d9 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/parser/trino/KyuubiTrinoFeAstBuilder.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/parser/trino/KyuubiTrinoFeAstBuilder.scala @@ -25,7 +25,7 @@ import org.apache.kyuubi.sql.KyuubiTrinoFeBaseParser._ import org.apache.kyuubi.sql.KyuubiTrinoFeBaseParserBaseVisitor import org.apache.kyuubi.sql.parser.KyuubiParser.unescapeSQLString import org.apache.kyuubi.sql.plan.{KyuubiTreeNode, PassThroughNode} -import org.apache.kyuubi.sql.plan.trino.{GetCatalogs, GetColumns, GetPrimaryKeys, GetSchemas, GetTables, GetTableTypes, GetTypeInfo} +import org.apache.kyuubi.sql.plan.trino.{Deallocate, ExecuteForPreparing, GetCatalogs, GetColumns, GetPrimaryKeys, GetSchemas, GetTables, GetTableTypes, GetTypeInfo, Prepare} class KyuubiTrinoFeAstBuilder extends KyuubiTrinoFeBaseParserBaseVisitor[AnyRef] { @@ -123,4 +123,21 @@ class KyuubiTrinoFeAstBuilder extends KyuubiTrinoFeBaseParserBaseVisitor[AnyRef] override def visitTypesFilter(ctx: TypesFilterContext): List[String] = { ctx.stringLit().asScala.map(v => unescapeSQLString(v.getText)).toList } + + override def visitExecute(ctx: ExecuteContext): KyuubiTreeNode = { + val parameters = Option(ctx.parameterList()) match { + case Some(para) => + para.anyStr().asScala.toList.map(p => p.getText.substring(1, p.getText.length - 1)) + case None => List[String]() + } + ExecuteForPreparing(ctx.IDENTIFIER().getText, parameters) + } + + override def visitPrepare(ctx: PrepareContext): KyuubiTreeNode = { + Prepare(ctx.IDENTIFIER().getText, ctx.statement().getText) + } + + override def visitDeallocate(ctx: DeallocateContext): KyuubiTreeNode = { + Deallocate(ctx.IDENTIFIER().getText) + } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/parser/trino/KyuubiTrinoFeParser.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/parser/trino/KyuubiTrinoFeParser.scala index 987288b0f82..5dececf20f0 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/parser/trino/KyuubiTrinoFeParser.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/parser/trino/KyuubiTrinoFeParser.scala @@ -56,4 +56,5 @@ class KyuubiTrinoFeParser extends KyuubiParserBase[KyuubiTrinoFeBaseParser] { } override def parseTree(parser: KyuubiTrinoFeBaseParser): ParseTree = parser.singleStatement() + } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/plan/command/RunnableCommand.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/plan/command/RunnableCommand.scala index 54ca9f6892d..deda7d0061f 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/plan/command/RunnableCommand.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/plan/command/RunnableCommand.scala @@ -46,7 +46,7 @@ trait RunnableCommand extends KyuubiTreeNode { } val taken = iter.take(rowSetSize) val resultRowSet = RowSetHelper.toTRowSet( - taken.toList.asInstanceOf[List[Row]], + taken.toList, resultSchema, protocolVersion) resultRowSet.setStartRowOffset(iter.getPosition) diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/plan/trino/TrinoFeOperations.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/plan/trino/TrinoFeOperations.scala index 6136995ab10..8d02a74c676 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/plan/trino/TrinoFeOperations.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/sql/plan/trino/TrinoFeOperations.scala @@ -59,3 +59,18 @@ case class GetColumns( case class GetPrimaryKeys() extends KyuubiTreeNode { override def name(): String = "Get Primary Keys" } + +case class ExecuteForPreparing(statementId: String, parameters: List[String]) + extends KyuubiTreeNode { + override def name(): String = "Execute For Preparing" +} + +case class Prepare(statementId: String, sql: String) + extends KyuubiTreeNode { + override def name(): String = "Prepare Sql" +} + +case class Deallocate(statementId: String) + extends KyuubiTreeNode { + override def name(): String = "Deallocate Prepare" +} diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/util/KubernetesUtils.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/util/KubernetesUtils.scala index 921aa04ae3c..9da3408a336 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/util/KubernetesUtils.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/util/KubernetesUtils.scala @@ -18,11 +18,12 @@ package org.apache.kyuubi.util import java.io.File +import java.util.Locale import com.fasterxml.jackson.databind.ObjectMapper import com.google.common.base.Charsets import com.google.common.io.Files -import io.fabric8.kubernetes.client.{Config, ConfigBuilder, DefaultKubernetesClient, KubernetesClient} +import io.fabric8.kubernetes.client.{Config, ConfigBuilder, KubernetesClient, KubernetesClientBuilder} import io.fabric8.kubernetes.client.Config.autoConfigure import io.fabric8.kubernetes.client.okhttp.OkHttpClientFactory import okhttp3.{Dispatcher, OkHttpClient} @@ -32,6 +33,10 @@ import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ object KubernetesUtils extends Logging { + // Kubernetes pod name max length - '-exec-' - Int.MAX_VALUE.length + // 253 - 10 - 6 + final val EXECUTOR_POD_NAME_PREFIX_MAX_LENGTH = 237 + final val DRIVER_POD_NAME_MAX_LENGTH = 253 def buildKubernetesClient(conf: KyuubiConf): Option[KubernetesClient] = { val master = conf.get(KUBERNETES_MASTER) @@ -93,13 +98,16 @@ object KubernetesUtils extends Logging { debug("Kubernetes client config: " + new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(config)) - Some(new DefaultKubernetesClient(factoryWithCustomDispatcher.createHttpClient(config), config)) + Some(new KubernetesClientBuilder() + .withHttpClientFactory(factoryWithCustomDispatcher) + .withConfig(config) + .build()) } implicit private class OptionConfigurableConfigBuilder(val configBuilder: ConfigBuilder) extends AnyVal { - def withOption[T](option: Option[T])(configurator: ((T, ConfigBuilder) => ConfigBuilder)) + def withOption[T](option: Option[T])(configurator: (T, ConfigBuilder) => ConfigBuilder) : ConfigBuilder = { option.map { opt => configurator(opt, configBuilder) @@ -111,4 +119,32 @@ object KubernetesUtils extends Logging { opt1.foreach { _ => require(opt2.isEmpty, errMessage) } opt2.foreach { _ => require(opt1.isEmpty, errMessage) } } + + private def getResourceNamePrefix(appName: String, engineRefId: String): String = { + s"$appName-$engineRefId" + .trim + .toLowerCase(Locale.ROOT) + .replaceAll("[^a-z0-9\\-]", "-") + .replaceAll("-+", "-") + .replaceAll("^-", "") + .replaceAll("^[0-9]", "x") + } + + def generateDriverPodName(appName: String, engineRefId: String): String = { + val resolvedResourceName = s"kyuubi-${getResourceNamePrefix(appName, engineRefId)}-driver" + if (resolvedResourceName.length <= DRIVER_POD_NAME_MAX_LENGTH) { + resolvedResourceName + } else { + s"kyuubi-$engineRefId-driver" + } + } + + def generateExecutorPodNamePrefix(appName: String, engineRefId: String): String = { + val resolvedResourceName = s"kyuubi-${getResourceNamePrefix(appName, engineRefId)}" + if (resolvedResourceName.length <= EXECUTOR_POD_NAME_PREFIX_MAX_LENGTH) { + resolvedResourceName + } else { + s"kyuubi-$engineRefId" + } + } } diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/util/Validator.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/util/Validator.scala index 00eca3604db..7bada5ebee6 100644 --- a/kyuubi-server/src/main/scala/org/apache/kyuubi/util/Validator.scala +++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/util/Validator.scala @@ -38,7 +38,7 @@ object Validator { private val dns1123LabelFmt = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" - private val podConfValidator = (s"^$dns1123LabelFmt(\\.$dns1123LabelFmt)*$$").r.pattern + private val podConfValidator = s"^$dns1123LabelFmt(\\.$dns1123LabelFmt)*$$".r.pattern val KUBERNETES_DNS_SUBDOMAIN_NAME_MAX_LENGTH = 253 diff --git a/kyuubi-server/src/test/resources/log4j2-test.xml b/kyuubi-server/src/test/resources/log4j2-test.xml index 623dd71fd14..25e37e8594d 100644 --- a/kyuubi-server/src/test/resources/log4j2-test.xml +++ b/kyuubi-server/src/test/resources/log4j2-test.xml @@ -48,5 +48,9 @@ + + + + diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/RestClientTestHelper.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/RestClientTestHelper.scala index 8344cdef01d..1c78b9fa612 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/RestClientTestHelper.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/RestClientTestHelper.scala @@ -48,7 +48,7 @@ trait RestClientTestHelper extends RestFrontendTestHelper with KerberizedTestHel UserGroupInformation.setConfiguration(config) assert(UserGroupInformation.isSecurityEnabled) - val conf = KyuubiConf().set(KyuubiConf.AUTHENTICATION_METHOD, Seq("KERBEROS", "LDAP", "CUSTOM")) + val conf = KyuubiConf().set(KyuubiConf.AUTHENTICATION_METHOD, Set("KERBEROS", "LDAP", "CUSTOM")) .set(KyuubiConf.SERVER_KEYTAB.key, testKeytab) .set(KyuubiConf.SERVER_PRINCIPAL, testPrincipal) .set(KyuubiConf.SERVER_SPNEGO_KEYTAB, testKeytab) diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/WithKyuubiServerOnYarn.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/WithKyuubiServerOnYarn.scala index 3bc6bb1c578..e4382a859d2 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/WithKyuubiServerOnYarn.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/WithKyuubiServerOnYarn.scala @@ -26,12 +26,12 @@ import org.apache.kyuubi.client.util.BatchUtils._ import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ import org.apache.kyuubi.config.KyuubiConf.FrontendProtocols.FrontendProtocol -import org.apache.kyuubi.engine.{ApplicationState, YarnApplicationOperation} +import org.apache.kyuubi.engine.{ApplicationManagerInfo, ApplicationState, YarnApplicationOperation} import org.apache.kyuubi.engine.ApplicationState._ import org.apache.kyuubi.operation.{FetchOrientation, HiveJDBCTestHelper, OperationState} import org.apache.kyuubi.operation.OperationState.ERROR import org.apache.kyuubi.server.MiniYarnService -import org.apache.kyuubi.session.{KyuubiBatchSessionImpl, KyuubiSessionManager} +import org.apache.kyuubi.session.{KyuubiBatchSession, KyuubiSessionManager} /** * To developers: @@ -80,7 +80,7 @@ class KyuubiOperationYarnClusterSuite extends WithKyuubiServerOnYarn with HiveJD override protected val conf: KyuubiConf = { new KyuubiConf() .set(s"$KYUUBI_BATCH_CONF_PREFIX.spark.spark.master", "yarn") - .set(BATCH_CONF_IGNORE_LIST, Seq("spark.master")) + .set(BATCH_CONF_IGNORE_LIST, Set("spark.master")) .set(BATCH_APPLICATION_CHECK_INTERVAL, 3000L) } @@ -116,29 +116,32 @@ class KyuubiOperationYarnClusterSuite extends WithKyuubiServerOnYarn with HiveJD "kyuubi", "passwd", "localhost", - batchRequest.getConf.asScala.toMap, batchRequest) - val session = sessionManager.getSession(sessionHandle).asInstanceOf[KyuubiBatchSessionImpl] + val session = sessionManager.getSession(sessionHandle).asInstanceOf[KyuubiBatchSession] val batchJobSubmissionOp = session.batchJobSubmissionOp eventually(timeout(3.minutes), interval(50.milliseconds)) { - val appInfo = batchJobSubmissionOp.getOrFetchCurrentApplicationInfo + val appInfo = batchJobSubmissionOp.getApplicationInfo assert(appInfo.nonEmpty) assert(appInfo.exists(_.id.startsWith("application_"))) } eventually(timeout(10.seconds)) { val metadata = session.sessionManager.getBatchMetadata(session.handle.identifier.toString) - assert(metadata.state === "RUNNING") - assert(metadata.engineId.startsWith("application_")) + assert(metadata.map(_.state).contains("RUNNING")) + assert(metadata.map(_.engineId).get.startsWith("application_")) } - val killResponse = yarnOperation.killApplicationByTag(sessionHandle.identifier.toString) + val appMgrInfo = ApplicationManagerInfo(Some("yarn")) + + val killResponse = + yarnOperation.killApplicationByTag(appMgrInfo, sessionHandle.identifier.toString) assert(killResponse._1) assert(killResponse._2 startsWith "Succeeded to terminate:") - val appInfo = yarnOperation.getApplicationInfoByTag(sessionHandle.identifier.toString) + val appInfo = + yarnOperation.getApplicationInfoByTag(appMgrInfo, sessionHandle.identifier.toString) assert(appInfo.state === KILLED) @@ -147,7 +150,7 @@ class KyuubiOperationYarnClusterSuite extends WithKyuubiServerOnYarn with HiveJD } val resultColumns = batchJobSubmissionOp.getNextRowSet(FetchOrientation.FETCH_NEXT, 10) - .getColumns.asScala + .getResults.getColumns.asScala val keys = resultColumns.head.getStringVal.getValues.asScala val values = resultColumns.apply(1).getStringVal.getValues.asScala @@ -158,7 +161,7 @@ class KyuubiOperationYarnClusterSuite extends WithKyuubiServerOnYarn with HiveJD val appUrl = rows("url") val appError = rows("error") - val appInfo2 = batchJobSubmissionOp.getOrFetchCurrentApplicationInfo.get + val appInfo2 = batchJobSubmissionOp.getApplicationInfo.get assert(appId === appInfo2.id) assert(appName === appInfo2.name) assert(appState === appInfo2.state.toString) @@ -176,16 +179,15 @@ class KyuubiOperationYarnClusterSuite extends WithKyuubiServerOnYarn with HiveJD "kyuubi", "passwd", "localhost", - batchRequest.getConf.asScala.toMap, batchRequest) - val session = sessionManager.getSession(sessionHandle).asInstanceOf[KyuubiBatchSessionImpl] + val session = sessionManager.getSession(sessionHandle).asInstanceOf[KyuubiBatchSession] val batchJobSubmissionOp = session.batchJobSubmissionOp eventually(timeout(3.minutes), interval(50.milliseconds)) { - assert(batchJobSubmissionOp.getOrFetchCurrentApplicationInfo.exists(_.id == null)) - assert(batchJobSubmissionOp.getOrFetchCurrentApplicationInfo.exists( - _.state == ApplicationState.NOT_FOUND)) + assert(batchJobSubmissionOp.getApplicationInfo.exists(_.id == null)) + assert(batchJobSubmissionOp.getApplicationInfo.exists( + _.state == ApplicationState.UNKNOWN)) assert(batchJobSubmissionOp.getStatus.state === OperationState.ERROR) } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/config/AllKyuubiConfiguration.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/config/AllKyuubiConfiguration.scala index bb183c00cc8..f53fb3a6128 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/config/AllKyuubiConfiguration.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/config/AllKyuubiConfiguration.scala @@ -21,32 +21,31 @@ import java.nio.file.Paths import scala.collection.JavaConverters._ -import org.apache.kyuubi.{KyuubiFunSuite, MarkdownBuilder, MarkdownUtils, Utils} +import org.apache.kyuubi.{KyuubiFunSuite, MarkdownBuilder, Utils} import org.apache.kyuubi.ctl.CtlConf import org.apache.kyuubi.ha.HighAvailabilityConf import org.apache.kyuubi.metrics.MetricsConf import org.apache.kyuubi.server.metadata.jdbc.JDBCMetadataStoreConf +import org.apache.kyuubi.util.GoldenFileUtils._ import org.apache.kyuubi.zookeeper.ZookeeperConf -// scalastyle:off line.size.limit /** * End-to-end test cases for configuration doc file - * The golden result file is "docs/deployment/settings.md". + * The golden result file is "docs/configuration/settings.md". * * To run the entire test suite: * {{{ - * build/mvn clean test -pl kyuubi-server -am -Pflink-provided,spark-provided,hive-provided -DwildcardSuites=org.apache.kyuubi.config.AllKyuubiConfiguration + * KYUUBI_UPDATE=0 dev/gen/gen_all_config_docs.sh * }}} * * To re-generate golden files for entire suite, run: * {{{ - * KYUUBI_UPDATE=1 build/mvn clean test -pl kyuubi-server -am -Pflink-provided,spark-provided,hive-provided -DwildcardSuites=org.apache.kyuubi.config.AllKyuubiConfiguration + * dev/gen/gen_all_config_docs.sh * }}} */ -// scalastyle:on line.size.limit class AllKyuubiConfiguration extends KyuubiFunSuite { private val kyuubiHome: String = Utils.getCodeSourceLocation(getClass).split("kyuubi-server")(0) - private val markdown = Paths.get(kyuubiHome, "docs", "deployment", "settings.md") + private val markdown = Paths.get(kyuubiHome, "docs", "configuration", "settings.md") .toAbsolutePath private def loadConfigs = Array( @@ -62,165 +61,154 @@ class AllKyuubiConfiguration extends KyuubiFunSuite { val builder = MarkdownBuilder(licenced = true, getClass.getName) - builder - .lines(s""" - |# Introduction to the Kyuubi Configurations System + builder ++= + s""" + |# Configurations | |Kyuubi provides several ways to configure the system and corresponding engines. | |## Environments | - |""") - .line("""You can configure the environment variables in `$KYUUBI_HOME/conf/kyuubi-env.sh`, + |""" += + """You can configure the environment variables in `$KYUUBI_HOME/conf/kyuubi-env.sh`, | e.g, `JAVA_HOME`, then this java runtime will be used both for Kyuubi server instance and | the applications it launches. You can also change the variable in the subprocess's env | configuration file, e.g.`$SPARK_HOME/conf/spark-env.sh` to use more specific ENV for | SQL engine applications. see `$KYUUBI_HOME/conf/kyuubi-env.sh.template` as an example. - | """) - .line( - """ - | For the environment variables that only needed to be transferred into engine + | """ += + """ For the environment variables that only needed to be transferred into engine | side, you can set it with a Kyuubi configuration item formatted | `kyuubi.engineEnv.VAR_NAME`. For example, with `kyuubi.engineEnv.SPARK_DRIVER_MEMORY=4g`, | the environment variable `SPARK_DRIVER_MEMORY` with value `4g` would be transferred into | engine side. With `kyuubi.engineEnv.SPARK_CONF_DIR=/apache/confs/spark/conf`, the | value of `SPARK_CONF_DIR` on the engine side is set to `/apache/confs/spark/conf`. - | """) - .line("## Kyuubi Configurations") - .line(""" You can configure the Kyuubi properties in + | """ += "## Kyuubi Configurations" += + """ You can configure the Kyuubi properties in | `$KYUUBI_HOME/conf/kyuubi-defaults.conf`, see | `$KYUUBI_HOME/conf/kyuubi-defaults.conf.template` as an example. - | """) + | """ KyuubiConf.getConfigEntries().asScala .toStream .filterNot(_.internal) .groupBy(_.key.split("\\.")(1)) .toSeq.sortBy(_._1).foreach { case (category, entries) => - builder.lines( + builder ++= s"""### ${category.capitalize} | Key | Default | Meaning | Type | Since | --- | --- | --- | --- | --- - |""") + |""" entries.sortBy(_.key).foreach { c => val dft = c.defaultValStr.replace("<", "<").replace(">", ">") - builder.line(Seq( + builder += Seq( s"${c.key}", s"$dft", s"${c.doc}", s"${c.typ}", - s"${c.version}").mkString("|")) + s"${c.version}").mkString("|") } } - builder - .lines(""" - |## Spark Configurations + builder ++= + """## Spark Configurations |### Via spark-defaults.conf - |""") - .line(""" - | Setting them in `$SPARK_HOME/conf/spark-defaults.conf` + |""" += + """ Setting them in `$SPARK_HOME/conf/spark-defaults.conf` | supplies with default values for SQL engine application. Available properties can be | found at Spark official online documentation for | [Spark Configurations](https://spark.apache.org/docs/latest/configuration.html) - | """) - .line("### Via kyuubi-defaults.conf") - .line(""" - | Setting them in `$KYUUBI_HOME/conf/kyuubi-defaults.conf` + | """ += + "### Via kyuubi-defaults.conf" += + """ Setting them in `$KYUUBI_HOME/conf/kyuubi-defaults.conf` | supplies with default values for SQL engine application too. These properties will - | override all settings in `$SPARK_HOME/conf/spark-defaults.conf`""") - .line("### Via JDBC Connection URL") - .line(""" - | Setting them in the JDBC Connection URL + | override all settings in `$SPARK_HOME/conf/spark-defaults.conf`""" += + "### Via JDBC Connection URL" += + """ Setting them in the JDBC Connection URL | supplies session-specific for each SQL engine. For example: | ``` |jdbc:hive2://localhost:10009/default;# |spark.sql.shuffle.partitions=2;spark.executor.memory=5g - |```""") - .line() - .line("- **Runtime SQL Configuration**") - .line(""" - For [Runtime SQL Configurations]( + |``` + |""" += + "" += + "- **Runtime SQL Configuration**" += + """ - For [Runtime SQL Configurations]( |https://spark.apache.org/docs/latest/configuration.html#runtime-sql-configuration), they - | will take affect every time""") - .line("- **Static SQL and Spark Core Configuration**") - .line(""" - For [Static SQL Configurations]( + | will take affect every time""" += + "- **Static SQL and Spark Core Configuration**" += + """ - For [Static SQL Configurations]( |https://spark.apache.org/docs/latest/configuration.html#static-sql-configuration) and | other spark core configs, e.g. `spark.executor.memory`, they will take effect if there - | is no existing SQL engine application. Otherwise, they will just be ignored""") - .line("### Via SET Syntax") - .line("""Please refer to the Spark official online documentation for + | is no existing SQL engine application. Otherwise, they will just be ignored + | """ += + "### Via SET Syntax" += + """Please refer to the Spark official online documentation for | [SET Command](https://spark.apache.org/docs/latest/sql-ref-syntax-aux-conf-mgmt-set.html) - |""") + |""" - builder - .lines(""" - |## Flink Configurations - |### Via flink-conf.yaml""") - .line("""Setting them in `$FLINK_HOME/conf/flink-conf.yaml` + builder ++= + """## Flink Configurations + |### Via flink-conf.yaml""" += """Setting them in `$FLINK_HOME/conf/flink-conf.yaml` + | | supplies with default values for SQL engine application. | Available properties can be found at Flink official online documentation for | [Flink Configurations] - |(https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/config/)""") - .line("### Via kyuubi-defaults.conf") - .line("""Setting them in `$KYUUBI_HOME/conf/kyuubi-defaults.conf` + |(https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/config/)""" += + "### Via kyuubi-defaults.conf" += + """Setting them in `$KYUUBI_HOME/conf/kyuubi-defaults.conf` | supplies with default values for SQL engine application too. | You can use properties with the additional prefix `flink.` to override settings in - | `$FLINK_HOME/conf/flink-conf.yaml`.""") - .lines(""" - | + | `$FLINK_HOME/conf/flink-conf.yaml`.""" ++= + """ |For example: |``` |flink.parallelism.default 2 |flink.taskmanager.memory.process.size 5g - |```""") - .line("""The below options in `kyuubi-defaults.conf` will set `parallelism.default: 2` - | and `taskmanager.memory.process.size: 5g` into flink configurations.""") - .line("### Via JDBC Connection URL") - .line("""Setting them in the JDBC Connection URL supplies session-specific + |```""" += + """The below options in `kyuubi-defaults.conf` will set `parallelism.default: 2` + | and `taskmanager.memory.process.size: 5g` into flink configurations.""" += + "### Via JDBC Connection URL" += + """Setting them in the JDBC Connection URL supplies session-specific | for each SQL engine. For example: ```jdbc:hive2://localhost:10009/default; |#parallelism.default=2;taskmanager.memory.process.size=5g``` - |""") - .line("### Via SET Statements") - .line("""Please refer to the Flink official online documentation for [SET Statements] - |(https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/table/sql/set/)""") - - builder - .line("## Logging") - .line("""Kyuubi uses [log4j](https://logging.apache.org/log4j/2.x/) for logging. + |""" += + "### Via SET Statements" += + """Please refer to the Flink official online documentation for [SET Statements] + |(https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/table/sql/set/)""" + + builder += "## Logging" += + """Kyuubi uses [log4j](https://logging.apache.org/log4j/2.x/) for logging. | You can configure it using `$KYUUBI_HOME/conf/log4j2.xml`, see | `$KYUUBI_HOME/conf/log4j2.xml.template` as an example. - | """) + | """ - builder - .lines(""" - |## Other Configurations + builder ++= + """## Other Configurations |### Hadoop Configurations - |""") - .line("""Specifying `HADOOP_CONF_DIR` to the directory containing Hadoop configuration + |""" += + """Specifying `HADOOP_CONF_DIR` to the directory containing Hadoop configuration | files or treating them as Spark properties with a `spark.hadoop.` prefix. | Please refer to the Spark official online documentation for | [Inheriting Hadoop Cluster Configuration](https://spark.apache.org/docs/latest/ |configuration.html#inheriting-hadoop-cluster-configuration). | Also, please refer to the [Apache Hadoop](https://hadoop.apache.org)'s - | online documentation for an overview on how to configure Hadoop.""") - .line("### Hive Configurations") - .line("""These configurations are used for SQL engine application to talk to + | online documentation for an overview on how to configure Hadoop.""" += + "### Hive Configurations" += + """These configurations are used for SQL engine application to talk to | Hive MetaStore and could be configured in a `hive-site.xml`. | Placed it in `$SPARK_HOME/conf` directory, or treat them as Spark properties with - | a `spark.hadoop.` prefix.""") + | a `spark.hadoop.` prefix.""" - builder - .line("## User Defaults") - .line("""In Kyuubi, we can configure user default settings to meet separate needs. + builder += "## User Defaults" += + """In Kyuubi, we can configure user default settings to meet separate needs. | These user defaults override system defaults, but will be overridden by those from | [JDBC Connection URL](#via-jdbc-connection-url) or [Set Command](#via-set-syntax) - | if could be. They will take effect when creating the SQL engine application ONLY.""") - .line("""User default settings are in the form of `___{username}___.{config key}`. + | if could be. They will take effect when creating the SQL engine application ONLY.""" += + """User default settings are in the form of `___{username}___.{config key}`. | There are three continuous underscores(`_`) at both sides of the `username` and - | a dot(`.`) that separates the config key and the prefix. For example:""") - .lines(""" - |```bash + | a dot(`.`) that separates the config key and the prefix. For example:""" ++= + """```bash |# For system defaults |spark.master=local |spark.sql.adaptive.enabled=true @@ -232,14 +220,14 @@ class AllKyuubiConfiguration extends KyuubiFunSuite { |___bob___.spark.executor.memory=8g |``` | - |""") - .line("""In the above case, if there are related configurations from + |""" += + """In the above case, if there are related configurations from | [JDBC Connection URL](#via-jdbc-connection-url), `kent` will run his SQL engine | application on YARN and prefer the Spark AQE to be off, while `bob` will activate | his SQL engine application on a Spark standalone cluster with 8g heap memory for each | executor and obey the Spark AQE behavior of Kyuubi system default. On the other hand, - | for those users who do not have custom configurations will use system defaults.""") + | for those users who do not have custom configurations will use system defaults.""" - MarkdownUtils.verifyOutput(markdown, builder, getClass.getCanonicalName, "kyuubi-server") + verifyOrRegenerateGoldenFile(markdown, builder.toMarkdown, "dev/gen/gen_all_config_docs.sh") } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/credentials/HiveDelegationTokenProviderSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/credentials/HiveDelegationTokenProviderSuite.scala index c3977e80799..6c0370f5530 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/credentials/HiveDelegationTokenProviderSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/credentials/HiveDelegationTokenProviderSuite.scala @@ -37,6 +37,7 @@ import org.apache.hadoop.security.{Credentials, UserGroupInformation} import org.apache.hadoop.security.authorize.ProxyUsers import org.apache.thrift.TProcessor import org.apache.thrift.protocol.TProtocol +import org.scalatest.Assertions._ import org.scalatest.concurrent.Eventually._ import org.scalatest.time.SpanSugar.convertIntToGrainOfTime diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/EngineRefTests.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/EngineRefTests.scala index 5ca8723f5bd..08b36b84a73 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/EngineRefTests.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/EngineRefTests.scala @@ -20,6 +20,8 @@ package org.apache.kyuubi.engine import java.util.UUID import java.util.concurrent.Executors +import scala.collection.JavaConverters._ + import org.scalatest.time.SpanSugar.convertIntToGrainOfTime import org.apache.kyuubi.{KYUUBI_VERSION, Utils} @@ -33,6 +35,7 @@ import org.apache.kyuubi.ha.client.DiscoveryClientProvider import org.apache.kyuubi.ha.client.DiscoveryPaths import org.apache.kyuubi.metrics.MetricsConstants.ENGINE_TOTAL import org.apache.kyuubi.metrics.MetricsSystem +import org.apache.kyuubi.plugin.PluginLoader import org.apache.kyuubi.util.NamedThreadFactory trait EngineRefTests extends KyuubiFunSuite { @@ -68,7 +71,9 @@ trait EngineRefTests extends KyuubiFunSuite { Seq(None, Some("suffix")).foreach { domain => conf.set(KyuubiConf.ENGINE_SHARE_LEVEL, CONNECTION.toString) domain.foreach(conf.set(KyuubiConf.ENGINE_SHARE_LEVEL_SUBDOMAIN.key, _)) - val engine = new EngineRef(conf, user, "grp", id, null) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val engine = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(engine.engineSpace === DiscoveryPaths.makePath( s"kyuubi_${KYUUBI_VERSION}_${CONNECTION}_${engineType}", @@ -82,7 +87,9 @@ trait EngineRefTests extends KyuubiFunSuite { val id = UUID.randomUUID().toString conf.set(KyuubiConf.ENGINE_SHARE_LEVEL, USER.toString) conf.set(KyuubiConf.ENGINE_TYPE, FLINK_SQL.toString) - val appName = new EngineRef(conf, user, "grp", id, null) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val appName = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(appName.engineSpace === DiscoveryPaths.makePath( s"kyuubi_${KYUUBI_VERSION}_${USER}_$FLINK_SQL", @@ -94,7 +101,7 @@ trait EngineRefTests extends KyuubiFunSuite { k => conf.unset(KyuubiConf.ENGINE_SHARE_LEVEL_SUBDOMAIN) conf.set(k.key, "abc") - val appName2 = new EngineRef(conf, user, "grp", id, null) + val appName2 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(appName2.engineSpace === DiscoveryPaths.makePath( s"kyuubi_${KYUUBI_VERSION}_${USER}_${FLINK_SQL}", @@ -108,8 +115,12 @@ trait EngineRefTests extends KyuubiFunSuite { val id = UUID.randomUUID().toString conf.set(KyuubiConf.ENGINE_SHARE_LEVEL, GROUP.toString) conf.set(KyuubiConf.ENGINE_TYPE, SPARK_SQL.toString) - val primaryGroupName = "primary_grp" - val engineRef = new EngineRef(conf, user, primaryGroupName, id, null) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val primaryGroupName = + PluginLoader.loadGroupProvider(conf).primaryGroup(user, Map.empty[String, String].asJava) + + val engineRef = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(engineRef.engineSpace === DiscoveryPaths.makePath( s"kyuubi_${KYUUBI_VERSION}_GROUP_SPARK_SQL", @@ -122,7 +133,7 @@ trait EngineRefTests extends KyuubiFunSuite { k => conf.unset(k) conf.set(k.key, "abc") - val engineRef2 = new EngineRef(conf, user, primaryGroupName, id, null) + val engineRef2 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(engineRef2.engineSpace === DiscoveryPaths.makePath( s"kyuubi_${KYUUBI_VERSION}_${GROUP}_${SPARK_SQL}", @@ -137,7 +148,9 @@ trait EngineRefTests extends KyuubiFunSuite { val id = UUID.randomUUID().toString conf.set(KyuubiConf.ENGINE_SHARE_LEVEL, SERVER.toString) conf.set(KyuubiConf.ENGINE_TYPE, FLINK_SQL.toString) - val appName = new EngineRef(conf, user, "grp", id, null) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val appName = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(appName.engineSpace === DiscoveryPaths.makePath( s"kyuubi_${KYUUBI_VERSION}_${SERVER}_${FLINK_SQL}", @@ -146,7 +159,7 @@ trait EngineRefTests extends KyuubiFunSuite { assert(appName.defaultEngineName === s"kyuubi_${SERVER}_${FLINK_SQL}_${user}_default_$id") conf.set(KyuubiConf.ENGINE_SHARE_LEVEL_SUBDOMAIN.key, "abc") - val appName2 = new EngineRef(conf, user, "grp", id, null) + val appName2 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(appName2.engineSpace === DiscoveryPaths.makePath( s"kyuubi_${KYUUBI_VERSION}_${SERVER}_${FLINK_SQL}", @@ -161,31 +174,33 @@ trait EngineRefTests extends KyuubiFunSuite { // set subdomain and disable engine pool conf.set(ENGINE_SHARE_LEVEL_SUBDOMAIN.key, "abc") conf.set(ENGINE_POOL_SIZE, -1) - val engine1 = new EngineRef(conf, user, "grp", id, null) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val engine1 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(engine1.subdomain === "abc") // unset subdomain and disable engine pool conf.unset(ENGINE_SHARE_LEVEL_SUBDOMAIN) conf.set(ENGINE_POOL_SIZE, -1) - val engine2 = new EngineRef(conf, user, "grp", id, null) + val engine2 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(engine2.subdomain === "default") // set subdomain and 1 <= engine pool size < threshold conf.set(ENGINE_SHARE_LEVEL_SUBDOMAIN.key, "abc") conf.set(ENGINE_POOL_SIZE, 1) - val engine3 = new EngineRef(conf, user, "grp", id, null) + val engine3 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(engine3.subdomain === "abc") // unset subdomain and 1 <= engine pool size < threshold conf.unset(ENGINE_SHARE_LEVEL_SUBDOMAIN) conf.set(ENGINE_POOL_SIZE, 3) - val engine4 = new EngineRef(conf, user, "grp", id, null) + val engine4 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(engine4.subdomain.startsWith("engine-pool-")) // unset subdomain and engine pool size > threshold conf.unset(ENGINE_SHARE_LEVEL_SUBDOMAIN) conf.set(ENGINE_POOL_SIZE, 100) - val engine5 = new EngineRef(conf, user, "grp", id, null) + val engine5 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) val engineNumber = Integer.parseInt(engine5.subdomain.substring(12)) val threshold = ENGINE_POOL_SIZE_THRESHOLD.defaultVal.get assert(engineNumber <= threshold) @@ -195,7 +210,7 @@ trait EngineRefTests extends KyuubiFunSuite { val enginePoolName = "test-pool" conf.set(ENGINE_POOL_NAME, enginePoolName) conf.set(ENGINE_POOL_SIZE, 3) - val engine6 = new EngineRef(conf, user, "grp", id, null) + val engine6 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(engine6.subdomain.startsWith(s"$enginePoolName-")) conf.unset(ENGINE_SHARE_LEVEL_SUBDOMAIN) @@ -205,8 +220,8 @@ trait EngineRefTests extends KyuubiFunSuite { conf.set(HighAvailabilityConf.HA_NAMESPACE, "engine_test") conf.set(HighAvailabilityConf.HA_ADDRESSES, getConnectString()) conf.set(ENGINE_POOL_SELECT_POLICY, "POLLING") - (0 until (10)).foreach { i => - val engine7 = new EngineRef(conf, user, "grp", id, null) + (0 until 10).foreach { i => + val engine7 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) val engineNumber = Integer.parseInt(engine7.subdomain.substring(pool_name.length + 1)) assert(engineNumber == (i % conf.get(ENGINE_POOL_SIZE))) } @@ -219,7 +234,9 @@ trait EngineRefTests extends KyuubiFunSuite { conf.set(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0) conf.set(HighAvailabilityConf.HA_NAMESPACE, "engine_test") conf.set(HighAvailabilityConf.HA_ADDRESSES, getConnectString()) - val engine = new EngineRef(conf, user, id, "grp", null) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val engine = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) var port1 = 0 var port2 = 0 @@ -261,18 +278,24 @@ trait EngineRefTests extends KyuubiFunSuite { conf.set(KyuubiConf.ENGINE_INIT_TIMEOUT, 3000L) conf.set(HighAvailabilityConf.HA_NAMESPACE, "engine_test2") conf.set(HighAvailabilityConf.HA_ADDRESSES, getConnectString()) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") val beforeEngines = MetricsSystem.counterValue(ENGINE_TOTAL).getOrElse(0L) val start = System.currentTimeMillis() val times = new Array[Long](3) val executor = Executors.newFixedThreadPool(3) try { - (0 until (3)).foreach { i => + (0 until 3).foreach { i => val cloned = conf.clone executor.execute(() => { DiscoveryClientProvider.withDiscoveryClient(cloned) { client => try { - new EngineRef(cloned, user, "grp", id, null).getOrCreate(client) + new EngineRef( + cloned, + user, + PluginLoader.loadGroupProvider(conf), + id, + null).getOrCreate(client) } finally { times(i) = System.currentTimeMillis() } @@ -300,20 +323,22 @@ trait EngineRefTests extends KyuubiFunSuite { conf.set(ENGINE_SHARE_LEVEL_SUBDOMAIN.key, "abc") conf.set(ENGINE_POOL_IGNORE_SUBDOMAIN, false) conf.set(ENGINE_POOL_SIZE, -1) - val engine1 = new EngineRef(conf, user, "grp", id, null) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val engine1 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(engine1.subdomain === "abc") conf.set(ENGINE_POOL_SIZE, 1) - val engine2 = new EngineRef(conf, user, "grp", id, null) + val engine2 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(engine2.subdomain === "abc") conf.unset(ENGINE_SHARE_LEVEL_SUBDOMAIN) - val engine3 = new EngineRef(conf, user, "grp", id, null) + val engine3 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(engine3.subdomain.startsWith("engine-pool-")) conf.set(ENGINE_SHARE_LEVEL_SUBDOMAIN.key, "abc") conf.set(ENGINE_POOL_IGNORE_SUBDOMAIN, true) - val engine4 = new EngineRef(conf, user, "grp", id, null) + val engine4 = new EngineRef(conf, user, PluginLoader.loadGroupProvider(conf), id, null) assert(engine4.subdomain.startsWith("engine-pool-")) } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/EngineRefWithZookeeperSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/EngineRefWithZookeeperSuite.scala index 8695e13c414..40fc818706c 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/EngineRefWithZookeeperSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/EngineRefWithZookeeperSuite.scala @@ -29,6 +29,7 @@ import org.apache.kyuubi.engine.EngineType.SPARK_SQL import org.apache.kyuubi.engine.ShareLevel.USER import org.apache.kyuubi.ha.HighAvailabilityConf import org.apache.kyuubi.ha.client.DiscoveryClientProvider +import org.apache.kyuubi.plugin.PluginLoader import org.apache.kyuubi.zookeeper.EmbeddedZookeeper import org.apache.kyuubi.zookeeper.ZookeeperConf @@ -62,6 +63,8 @@ class EngineRefWithZookeeperSuite extends EngineRefTests { conf.set(KyuubiConf.ENGINE_INIT_TIMEOUT, 3000L) conf.set(HighAvailabilityConf.HA_NAMESPACE, "engine_test1") conf.set(HighAvailabilityConf.HA_ADDRESSES, getConnectString()) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + val conf1 = conf.clone conf1.set(KyuubiConf.ENGINE_TYPE, SPARK_SQL.toString) val conf2 = conf.clone @@ -74,7 +77,12 @@ class EngineRefWithZookeeperSuite extends EngineRefTests { executor.execute(() => { DiscoveryClientProvider.withDiscoveryClient(conf1) { client => try { - new EngineRef(conf1, user, "grp", UUID.randomUUID().toString, null) + new EngineRef( + conf1, + user, + PluginLoader.loadGroupProvider(conf), + UUID.randomUUID().toString, + null) .getOrCreate(client) } finally { times(0) = System.currentTimeMillis() @@ -84,7 +92,12 @@ class EngineRefWithZookeeperSuite extends EngineRefTests { executor.execute(() => { DiscoveryClientProvider.withDiscoveryClient(conf2) { client => try { - new EngineRef(conf2, user, "grp", UUID.randomUUID().toString, null) + new EngineRef( + conf2, + user, + PluginLoader.loadGroupProvider(conf), + UUID.randomUUID().toString, + null) .getOrCreate(client) } finally { times(1) = System.currentTimeMillis() diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/JpsApplicationOperationSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/JpsApplicationOperationSuite.scala index 22e7119639a..a0914afcf0d 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/JpsApplicationOperationSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/JpsApplicationOperationSuite.scala @@ -19,9 +19,8 @@ package org.apache.kyuubi.engine import java.lang.management.ManagementFactory import java.time.Duration -import java.util.{ServiceLoader, UUID} +import java.util.UUID -import scala.collection.JavaConverters._ import scala.sys.process._ import org.scalatest.concurrent.PatienceConfiguration.Timeout @@ -31,18 +30,18 @@ import org.apache.kyuubi.{KyuubiFunSuite, Utils} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf.SESSION_IDLE_TIMEOUT import org.apache.kyuubi.engine.spark.SparkProcessBuilder +import org.apache.kyuubi.util.reflect.ReflectUtils._ class JpsApplicationOperationSuite extends KyuubiFunSuite { - private val operations = ServiceLoader.load(classOf[ApplicationOperation]) - .asScala.filter(_.getClass.isAssignableFrom(classOf[JpsApplicationOperation])) - private val jps = operations.head + private val jps = loadFromServiceLoader[ApplicationOperation]() + .find(_.getClass.isAssignableFrom(classOf[JpsApplicationOperation])).get jps.initialize(null) test("JpsApplicationOperation with jstat") { - assert(jps.isSupported(None)) - assert(jps.isSupported(Some("local"))) - assert(!jps.killApplicationByTag(null)._1) - assert(!jps.killApplicationByTag("have a space")._1) + assert(jps.isSupported(ApplicationManagerInfo(None))) + assert(jps.isSupported(ApplicationManagerInfo(Some("local")))) + assert(!jps.killApplicationByTag(ApplicationManagerInfo(None), null)._1) + assert(!jps.killApplicationByTag(ApplicationManagerInfo(None), "have a space")._1) val currentProcess = ManagementFactory.getRuntimeMXBean.getName val currentPid = currentProcess.splitAt(currentProcess.indexOf("@"))._1 @@ -53,16 +52,16 @@ class JpsApplicationOperationSuite extends KyuubiFunSuite { }.start() eventually(Timeout(10.seconds)) { - val desc1 = jps.getApplicationInfoByTag("sun.tools.jstat.Jstat") + val desc1 = jps.getApplicationInfoByTag(ApplicationManagerInfo(None), "sun.tools.jstat.Jstat") assert(desc1.id != null) assert(desc1.name != null) assert(desc1.state == ApplicationState.RUNNING) } - jps.killApplicationByTag("sun.tools.jstat.Jstat") + jps.killApplicationByTag(ApplicationManagerInfo(None), "sun.tools.jstat.Jstat") eventually(Timeout(10.seconds)) { - val desc2 = jps.getApplicationInfoByTag("sun.tools.jstat.Jstat") + val desc2 = jps.getApplicationInfoByTag(ApplicationManagerInfo(None), "sun.tools.jstat.Jstat") assert(desc2.id == null) assert(desc2.name == null) assert(desc2.state == ApplicationState.NOT_FOUND) @@ -79,25 +78,25 @@ class JpsApplicationOperationSuite extends KyuubiFunSuite { val builder = new SparkProcessBuilder(user, conf) builder.start - assert(jps.isSupported(builder.clusterManager())) + assert(jps.isSupported(ApplicationManagerInfo(builder.clusterManager()))) eventually(Timeout(10.seconds)) { - val desc1 = jps.getApplicationInfoByTag(id) + val desc1 = jps.getApplicationInfoByTag(ApplicationManagerInfo(None), id) assert(desc1.id != null) assert(desc1.name != null) assert(desc1.state == ApplicationState.RUNNING) - val response = jps.killApplicationByTag(id) + val response = jps.killApplicationByTag(ApplicationManagerInfo(None), id) assert(response._1, response._2) assert(response._2 startsWith "Succeeded to terminate:") } eventually(Timeout(10.seconds)) { - val desc2 = jps.getApplicationInfoByTag(id) + val desc2 = jps.getApplicationInfoByTag(ApplicationManagerInfo(None), id) assert(desc2.id == null) assert(desc2.name == null) assert(desc2.state == ApplicationState.NOT_FOUND) } - val response2 = jps.killApplicationByTag(id) + val response2 = jps.killApplicationByTag(ApplicationManagerInfo(None), id) assert(!response2._1) assert(response2._2 === ApplicationOperation.NOT_FOUND) } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/KubernetesApplicationOperationSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/KubernetesApplicationOperationSuite.scala new file mode 100644 index 00000000000..2ea1939d2fc --- /dev/null +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/KubernetesApplicationOperationSuite.scala @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.engine + +import org.apache.kyuubi.{KyuubiException, KyuubiFunSuite} +import org.apache.kyuubi.config.KyuubiConf + +class KubernetesApplicationOperationSuite extends KyuubiFunSuite { + + test("test check kubernetes info") { + val kyuubiConf = KyuubiConf() + kyuubiConf.set(KyuubiConf.KUBERNETES_CONTEXT_ALLOW_LIST.key, "1,2") + kyuubiConf.set(KyuubiConf.KUBERNETES_NAMESPACE_ALLOW_LIST.key, "ns1,ns2") + + val operation = new KubernetesApplicationOperation() + operation.initialize(kyuubiConf) + + operation.checkKubernetesInfo(KubernetesInfo(None, None)) + operation.checkKubernetesInfo(KubernetesInfo(Some("1"), None)) + operation.checkKubernetesInfo(KubernetesInfo(Some("2"), None)) + operation.checkKubernetesInfo(KubernetesInfo(Some("1"), Some("ns1"))) + operation.checkKubernetesInfo(KubernetesInfo(Some("1"), Some("ns2"))) + operation.checkKubernetesInfo(KubernetesInfo(Some("2"), Some("ns1"))) + operation.checkKubernetesInfo(KubernetesInfo(Some("2"), Some("ns2"))) + + intercept[KyuubiException] { + operation.checkKubernetesInfo(KubernetesInfo(Some("3"), Some("ns1"))) + } + intercept[KyuubiException] { + operation.checkKubernetesInfo(KubernetesInfo(Some("1"), Some("ns3"))) + } + intercept[KyuubiException] { + operation.checkKubernetesInfo(KubernetesInfo(Some("3"), None)) + } + intercept[KyuubiException] { + operation.checkKubernetesInfo(KubernetesInfo(None, Some("ns3"))) + } + + kyuubiConf.unset(KyuubiConf.KUBERNETES_CONTEXT_ALLOW_LIST.key) + operation.checkKubernetesInfo(KubernetesInfo(Some("3"), None)) + kyuubiConf.unset(KyuubiConf.KUBERNETES_NAMESPACE_ALLOW_LIST.key) + operation.checkKubernetesInfo(KubernetesInfo(None, Some("ns3"))) + } +} diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/KyuubiApplicationManagerSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/KyuubiApplicationManagerSuite.scala index b01f82a247a..0f54520fc77 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/KyuubiApplicationManagerSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/KyuubiApplicationManagerSuite.scala @@ -24,7 +24,7 @@ import org.apache.kyuubi.engine.KubernetesApplicationOperation.LABEL_KYUUBI_UNIQ class KyuubiApplicationManagerSuite extends KyuubiFunSuite { test("application access path") { val localDirLimitConf = KyuubiConf() - .set(KyuubiConf.SESSION_LOCAL_DIR_ALLOW_LIST, Seq("/apache/kyuubi")) + .set(KyuubiConf.SESSION_LOCAL_DIR_ALLOW_LIST, Set("/apache/kyuubi")) val noLocalDirLimitConf = KyuubiConf() var path = "/apache/kyuubi/a.jar" diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilderSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilderSuite.scala index 7ee38d4ef99..26e355a87bd 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilderSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilderSuite.scala @@ -18,6 +18,7 @@ package org.apache.kyuubi.engine.flink import java.io.File +import java.nio.file.{Files, Paths} import scala.collection.JavaConverters._ import scala.collection.immutable.ListMap @@ -25,30 +26,58 @@ import scala.util.matching.Regex import org.apache.kyuubi.KyuubiFunSuite import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.config.KyuubiConf.{ENGINE_FLINK_EXTRA_CLASSPATH, ENGINE_FLINK_JAVA_OPTIONS, ENGINE_FLINK_MEMORY} +import org.apache.kyuubi.config.KyuubiConf.{ENGINE_FLINK_APPLICATION_JARS, ENGINE_FLINK_EXTRA_CLASSPATH, ENGINE_FLINK_JAVA_OPTIONS, ENGINE_FLINK_MEMORY} +import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_ENGINE_CREDENTIALS_KEY import org.apache.kyuubi.engine.flink.FlinkProcessBuilder._ class FlinkProcessBuilderSuite extends KyuubiFunSuite { - private def conf = KyuubiConf().set("kyuubi.on", "off") + private def sessionModeConf = KyuubiConf() + .set("flink.execution.target", "yarn-session") + .set("kyuubi.on", "off") .set(ENGINE_FLINK_MEMORY, "512m") .set( ENGINE_FLINK_JAVA_OPTIONS, "-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005") + .set(KYUUBI_ENGINE_CREDENTIALS_KEY, "should-not-be-used") + + private def applicationModeConf = KyuubiConf() + .set("flink.execution.target", "yarn-application") + .set(ENGINE_FLINK_APPLICATION_JARS, tempUdfJar.toString) + .set(APP_KEY, "kyuubi_connection_flink_paul") + .set("kyuubi.on", "off") + .set(KYUUBI_ENGINE_CREDENTIALS_KEY, "should-not-be-used") + + private val tempFlinkHome = Files.createTempDirectory("flink-home").toFile + private val tempOpt = + Files.createDirectories(Paths.get(tempFlinkHome.toPath.toString, "opt")).toFile + Files.createFile(Paths.get(tempOpt.toPath.toString, "flink-sql-client-1.16.1.jar")) + Files.createFile(Paths.get(tempOpt.toPath.toString, "flink-sql-gateway-1.16.1.jar")) + private val tempUsrLib = + Files.createDirectories(Paths.get(tempFlinkHome.toPath.toString, "usrlib")).toFile + private val tempUdfJar = + Files.createFile(Paths.get(tempUsrLib.toPath.toString, "test-udf.jar")) + private val tempHiveDir = + Files.createDirectories(Paths.get(tempFlinkHome.toPath.toString, "hive-conf")).toFile + Files.createFile(Paths.get(tempHiveDir.toPath.toString, "hive-site.xml")) private def envDefault: ListMap[String, String] = ListMap( - "JAVA_HOME" -> s"${File.separator}jdk") + "JAVA_HOME" -> s"${File.separator}jdk", + "FLINK_HOME" -> s"${tempFlinkHome.toPath}") private def envWithoutHadoopCLASSPATH: ListMap[String, String] = envDefault + ("HADOOP_CONF_DIR" -> s"${File.separator}hadoop${File.separator}conf") + ("YARN_CONF_DIR" -> s"${File.separator}yarn${File.separator}conf") + - ("HBASE_CONF_DIR" -> s"${File.separator}hbase${File.separator}conf") + ("HBASE_CONF_DIR" -> s"${File.separator}hbase${File.separator}conf") + + ("HIVE_CONF_DIR" -> s"$tempHiveDir") private def envWithAllHadoop: ListMap[String, String] = envWithoutHadoopCLASSPATH + (FLINK_HADOOP_CLASSPATH_KEY -> s"${File.separator}hadoop") private def confStr: String = { - conf.clone.set("yarn.tags", "KYUUBI").getAll + sessionModeConf.clone.getAll + .filter(!_._1.equals(KYUUBI_ENGINE_CREDENTIALS_KEY)) .map { case (k, v) => s"\\\\\\n\\t--conf $k=$v" } .mkString(" ") } - private def matchActualAndExpected(builder: FlinkProcessBuilder): Unit = { + + private def matchActualAndExpectedSessionMode(builder: FlinkProcessBuilder): Unit = { val actualCommands = builder.toString val classpathStr = constructClasspathStr(builder) val expectedCommands = @@ -59,21 +88,46 @@ class FlinkProcessBuilderSuite extends KyuubiFunSuite { assert(matcher.matches()) } + private def matchActualAndExpectedApplicationMode(builder: FlinkProcessBuilder): Unit = { + val actualCommands = builder.toString + val expectedCommands = + escapePaths(s"${builder.flinkExecutable} run-application ") + + s"-t yarn-application " + + s"-Dyarn.ship-files=.*\\/flink-sql-client.*jar;.*\\/flink-sql-gateway.*jar;$tempUdfJar" + + s";.*\\/hive-site\\.xml " + + s"-Dyarn\\.application\\.name=kyuubi_.* " + + s"-Dyarn\\.tags=KYUUBI " + + s"-Dcontainerized\\.master\\.env\\.FLINK_CONF_DIR=\\. " + + s"-Dcontainerized\\.master\\.env\\.HIVE_CONF_DIR=\\. " + + s"-Dexecution.target=yarn-application " + + s"-c org\\.apache\\.kyuubi\\.engine\\.flink\\.FlinkSQLEngine " + + s".*kyuubi-flink-sql-engine_.*jar" + + s"(?: \\\\\\n\\t--conf \\S+=\\S+)+" + val regex = new Regex(expectedCommands) + val matcher = regex.pattern.matcher(actualCommands) + assert(matcher.matches()) + } + + private def escapePaths(path: String): String = { + path.replaceAll("/", "\\/") + } + private def constructClasspathStr(builder: FlinkProcessBuilder) = { val classpathEntries = new java.util.LinkedHashSet[String] builder.mainResource.foreach(classpathEntries.add) val flinkHome = builder.flinkHome classpathEntries.add(s"$flinkHome$flinkSqlClientJarPathSuffixRegex") + classpathEntries.add(s"$flinkHome$flinkSqlGatewayJarPathSuffixRegex") classpathEntries.add(s"$flinkHome$flinkLibPathSuffixRegex") classpathEntries.add(s"$flinkHome$flinkConfPathSuffix") val envMap = builder.env envMap.foreach { case (k, v) => - if (!k.equals("JAVA_HOME")) { + if (!k.equals("JAVA_HOME") && !k.equals("FLINK_HOME")) { classpathEntries.add(v) } } - val extraCp = conf.get(ENGINE_FLINK_EXTRA_CLASSPATH) + val extraCp = sessionModeConf.get(ENGINE_FLINK_EXTRA_CLASSPATH) extraCp.foreach(classpathEntries.add) val classpathStr = classpathEntries.asScala.mkString(File.pathSeparator) classpathStr @@ -82,22 +136,31 @@ class FlinkProcessBuilderSuite extends KyuubiFunSuite { private val javaPath = s"${envDefault("JAVA_HOME")}${File.separator}bin${File.separator}java" private val flinkSqlClientJarPathSuffixRegex = s"${File.separator}opt${File.separator}" + s"flink-sql-client-.*.jar" + private val flinkSqlGatewayJarPathSuffixRegex = s"${File.separator}opt${File.separator}" + + s"flink-sql-gateway-.*.jar" private val flinkLibPathSuffixRegex = s"${File.separator}lib${File.separator}\\*" private val flinkConfPathSuffix = s"${File.separator}conf" private val mainClassStr = "org.apache.kyuubi.engine.flink.FlinkSQLEngine" - test("all hadoop related environment variables are configured") { - val builder = new FlinkProcessBuilder("vinoyang", conf) { + test("session mode - all hadoop related environment variables are configured") { + val builder = new FlinkProcessBuilder("vinoyang", sessionModeConf) { override def env: Map[String, String] = envWithAllHadoop } - matchActualAndExpected(builder) + matchActualAndExpectedSessionMode(builder) } - test("only FLINK_HADOOP_CLASSPATH environment variables are configured") { - val builder = new FlinkProcessBuilder("vinoyang", conf) { + test("session mode - only FLINK_HADOOP_CLASSPATH environment variables are configured") { + val builder = new FlinkProcessBuilder("vinoyang", sessionModeConf) { override def env: Map[String, String] = envDefault + (FLINK_HADOOP_CLASSPATH_KEY -> s"${File.separator}hadoop") } - matchActualAndExpected(builder) + matchActualAndExpectedSessionMode(builder) + } + + test("application mode - all hadoop related environment variables are configured") { + val builder = new FlinkProcessBuilder("paullam", applicationModeConf) { + override def env: Map[String, String] = envWithAllHadoop + } + matchActualAndExpectedApplicationMode(builder) } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/spark/PySparkTests.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/spark/PySparkTests.scala index 6af7e21e25d..16a7f728ea6 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/spark/PySparkTests.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/spark/PySparkTests.scala @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.kyuubi.engine.spark.operation +package org.apache.kyuubi.engine.spark import java.io.PrintWriter import java.nio.file.Files @@ -158,16 +158,16 @@ class PySparkTests extends WithKyuubiServer with HiveJDBCTestHelper { } } - private def withTempPyFile(code: String)(op: (String) => Unit): Unit = { + private def withTempPyFile(code: String)(op: String => Unit): Unit = { val tempPyFile = Files.createTempFile("", ".py").toFile try { new PrintWriter(tempPyFile) { write(code) - close + close() } op(tempPyFile.getPath) } finally { - Files.delete(tempPyFile.toPath) + Files.deleteIfExists(tempPyFile.toPath) } } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/spark/SparkProcessBuilderSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/spark/SparkProcessBuilderSuite.scala index 7b204dafb9a..a4227d26e74 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/spark/SparkProcessBuilderSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/spark/SparkProcessBuilderSuite.scala @@ -29,6 +29,8 @@ import org.scalatestplus.mockito.MockitoSugar import org.apache.kyuubi.{KerberizedTestHelper, KyuubiSQLException, Utils} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf.{ENGINE_LOG_TIMEOUT, ENGINE_SPARK_MAIN_RESOURCE} +import org.apache.kyuubi.engine.ProcBuilder.KYUUBI_ENGINE_LOG_PATH_KEY +import org.apache.kyuubi.engine.spark.SparkProcessBuilder._ import org.apache.kyuubi.ha.HighAvailabilityConf import org.apache.kyuubi.ha.client.AuthTypes import org.apache.kyuubi.service.ServiceUtils @@ -141,7 +143,7 @@ class SparkProcessBuilderSuite extends KerberizedTestHelper with MockitoSugar { assert(!process.logCaptureThreadReleased) subProcess.waitFor(3, TimeUnit.SECONDS) } finally { - process.close() + process.close(true) } eventually(timeout(3.seconds), interval(100.milliseconds)) { assert(process.logCaptureThreadReleased) @@ -165,17 +167,15 @@ class SparkProcessBuilderSuite extends KerberizedTestHelper with MockitoSugar { val config = KyuubiConf().set(KyuubiConf.ENGINE_LOG_TIMEOUT, 20000L) (1 to 10).foreach { _ => - pool.execute(new Runnable { - override def run(): Unit = { - val pb = new FakeSparkProcessBuilder(config) { - override val workingDir: Path = fakeWorkDir - } - try { - val p = pb.start - p.waitFor() - } finally { - pb.close() - } + pool.execute(() => { + val pb = new FakeSparkProcessBuilder(config) { + override val workingDir: Path = fakeWorkDir + } + try { + val p = pb.start + p.waitFor() + } finally { + pb.close(true) } }) } @@ -298,9 +298,74 @@ class SparkProcessBuilderSuite extends KerberizedTestHelper with MockitoSugar { assert(!c3.contains(s"spark.kubernetes.driverEnv.SPARK_USER_NAME=$proxyName")) assert(!c3.contains(s"spark.executorEnv.SPARK_USER_NAME=$proxyName")) } + + test("[KYUUBI #5009] Test pass spark engine log path to spark conf") { + val b1 = new SparkProcessBuilder("kyuubi", conf) + assert( + b1.toString.contains( + s"$CONF spark.$KYUUBI_ENGINE_LOG_PATH_KEY=${b1.engineLog.getAbsolutePath}")) + } + + test("[KYUUBI #5165] Test SparkProcessBuilder#appendDriverPodPrefix") { + val engineRefId = "kyuubi-test-engine" + val appName = "test-app" + val processBuilder = new SparkProcessBuilder( + "kyuubi", + conf.set(MASTER_KEY, "k8s://internal").set(DEPLOY_MODE_KEY, "cluster"), + engineRefId) + val conf1 = Map(APP_KEY -> "test-app") + val driverPodName1 = processBuilder.appendPodNameConf(conf1).get(KUBERNETES_DRIVER_POD_NAME) + assert(driverPodName1 === Some(s"kyuubi-$appName-$engineRefId-driver")) + // respect user specified driver pod name + val conf2 = conf1 ++ Map(KUBERNETES_DRIVER_POD_NAME -> "kyuubi-test-1-driver") + val driverPodName2 = processBuilder.appendPodNameConf(conf2).get(KUBERNETES_DRIVER_POD_NAME) + assert(driverPodName2 === None) + val longAppName = "thisisalonglonglonglonglonglonglonglonglonglonglonglong" + + "longlonglonglonglonglonglonglonglonglonglonglonglonglong" + + "longlonglonglonglonglonglonglonglonglonglonglonglonglong" + + "longlonglonglonglonglonglonglonglonglonglonglonglonglong" + + "longlonglonglonglonglonglonglonglonglonglonglonglonglong" + + "longlonglonglonglonglonglonglonglonglonglonglonglongappname" + val conf3 = Map(APP_KEY -> longAppName) + val driverPodName3 = processBuilder.appendPodNameConf(conf3).get(KUBERNETES_DRIVER_POD_NAME) + assert(driverPodName3 === Some(s"kyuubi-$engineRefId-driver")) + // scalastyle:off + val chineseAppName = "你好_test_任务" + // scalastyle:on + val conf4 = Map(APP_KEY -> chineseAppName) + val driverPodName4 = processBuilder.appendPodNameConf(conf4).get(KUBERNETES_DRIVER_POD_NAME) + assert(driverPodName4 === Some(s"kyuubi-test-$engineRefId-driver")) + } + + test("[KYUUBI #5165] Test SparkProcessBuilder#appendExecutorPodPrefix") { + val engineRefId = "kyuubi-test-engine" + val appName = "test-app" + val processBuilder = new SparkProcessBuilder( + "kyuubi", + conf.set(MASTER_KEY, "k8s://internal").set(DEPLOY_MODE_KEY, "cluster"), + engineRefId) + val conf1 = Map(APP_KEY -> "test-app") + val execPodNamePrefix1 = processBuilder + .appendPodNameConf(conf1).get(KUBERNETES_EXECUTOR_POD_NAME_PREFIX) + assert(execPodNamePrefix1 === Some(s"kyuubi-$appName-$engineRefId")) + val conf2 = conf1 ++ Map(KUBERNETES_EXECUTOR_POD_NAME_PREFIX -> "kyuubi-test") + val execPodNamePrefix2 = processBuilder + .appendPodNameConf(conf2).get(KUBERNETES_EXECUTOR_POD_NAME_PREFIX) + assert(execPodNamePrefix2 === None) + val longAppName = "thisisalonglonglonglonglonglonglonglonglonglonglonglong" + + "longlonglonglonglonglonglonglonglonglonglonglonglonglong" + + "longlonglonglonglonglonglonglonglonglonglonglonglonglong" + + "longlonglonglonglonglonglonglonglonglonglonglonglonglong" + + "longlonglonglonglonglonglonglonglonglonglonglonglonglong" + + "longlonglonglonglonglonglonglonglonglonglonglonglongappname" + val conf3 = Map(APP_KEY -> longAppName) + val execPodNamePrefix3 = processBuilder + .appendPodNameConf(conf3).get(KUBERNETES_EXECUTOR_POD_NAME_PREFIX) + assert(execPodNamePrefix3 === Some(s"kyuubi-$engineRefId")) + } } class FakeSparkProcessBuilder(config: KyuubiConf) extends SparkProcessBuilder("fake", config) { - override protected val commands: Array[String] = Array("ls") + override protected lazy val commands: Array[String] = Array("ls") } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerJsonLoggingEventHandlerSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerJsonLoggingEventHandlerSuite.scala index 3bdc9cd3808..2f794ed4819 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerJsonLoggingEventHandlerSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerJsonLoggingEventHandlerSuite.scala @@ -33,6 +33,7 @@ import org.scalatest.time.SpanSugar.convertIntToGrainOfTime import org.apache.kyuubi._ import org.apache.kyuubi.client.util.BatchUtils._ import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.events.ServerEventHandlerRegister import org.apache.kyuubi.operation.HiveJDBCTestHelper import org.apache.kyuubi.operation.OperationState._ import org.apache.kyuubi.server.KyuubiServer @@ -134,13 +135,12 @@ class ServerJsonLoggingEventHandlerSuite extends WithKyuubiServer with HiveJDBCT } } - val batchRequest = newSparkBatchRequest() + val batchRequest = newSparkBatchRequest(Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString)) val sessionMgr = server.backendService.sessionManager.asInstanceOf[KyuubiSessionManager] val batchSessionHandle = sessionMgr.openBatchSession( Utils.currentUser, "kyuubi", "127.0.0.1", - Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString), batchRequest) withSessionConf()(Map.empty)(Map("spark.sql.shuffle.partitions" -> "2")) { withJdbcStatement() { statement => @@ -197,6 +197,7 @@ class ServerJsonLoggingEventHandlerSuite extends WithKyuubiServer with HiveJDBCT server.initialize(conf) server.start() server.stop() + ServerEventHandlerRegister.registerEventLoggers(conf) // register event loggers again val hostName = InetAddress.getLocalHost.getCanonicalHostName val kyuubiServerInfoPath = diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandlerSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandlerSuite.scala new file mode 100644 index 00000000000..461414f3f91 --- /dev/null +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandlerSuite.scala @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.events.handler + +import java.time.Duration +import java.util.Properties + +import scala.collection.JavaConverters._ +import scala.concurrent.duration._ +import scala.util.Random + +import com.dimafeng.testcontainers.KafkaContainer +import com.dimafeng.testcontainers.scalatest.TestContainerForAll +import com.fasterxml.jackson.databind.json.JsonMapper +import org.apache.kafka.clients.admin.{AdminClient, NewTopic} +import org.apache.kafka.clients.consumer.KafkaConsumer + +import org.apache.kyuubi._ +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.events.handler.ServerKafkaLoggingEventHandler.KAFKA_SERVER_EVENT_HANDLER_PREFIX +import org.apache.kyuubi.operation.HiveJDBCTestHelper + +abstract class ServerKafkaLoggingEventHandlerSuite extends WithKyuubiServer with HiveJDBCTestHelper + with BatchTestHelper with TestContainerForAll { + + /** + * `confluentinc/cp-kafka` is Confluent Community Docker Image for Apache Kafka. + * The list of compatibility for Kafka's version refers to: + * https://docs.confluent.io/platform/current/installation + * /versions-interoperability.html#cp-and-apache-ak-compatibility + */ + protected val imageTag: String + override lazy val containerDef: KafkaContainer.Def = + KafkaContainer.Def(s"confluentinc/cp-kafka:$imageTag") + private val destTopic = "server-event-topic" + private val mapper = JsonMapper.builder().build() + override protected def jdbcUrl: String = getJdbcUrl + + override protected val conf: KyuubiConf = { + KyuubiConf() + .set(KyuubiConf.SERVER_EVENT_LOGGERS, Seq("KAFKA")) + .set(KyuubiConf.SERVER_EVENT_KAFKA_TOPIC, destTopic) + } + + override def beforeAll(): Unit = withContainers { kafkaContainer => + val bootstrapServers = kafkaContainer.bootstrapServers + createTopic(kafkaContainer.bootstrapServers, destTopic) + conf.set(s"$KAFKA_SERVER_EVENT_HANDLER_PREFIX.bootstrap.servers", bootstrapServers) + + super.beforeAll() + } + + private def createTopic(kafkaServerUrl: String, topic: String): Unit = { + val adminProps = new Properties + adminProps.setProperty("bootstrap.servers", kafkaServerUrl) + val adminClient = AdminClient.create(adminProps) + adminClient.createTopics(List(new NewTopic(topic, 1, 1.toShort)).asJava) + adminClient.close() + } + + test("check server events sent to kafka topic") { + withContainers { kafkaContainer => + val consumerConf = new Properties + Map( + "bootstrap.servers" -> kafkaContainer.bootstrapServers, + "group.id" -> s"server-kafka-logger-test-${Random.nextInt}", + "auto.offset.reset" -> "earliest", + "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer", + "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer") + .foreach(p => consumerConf.setProperty(p._1, p._2)) + val consumer = new KafkaConsumer[String, String](consumerConf) + try { + consumer.subscribe(List(destTopic).asJava) + eventually(timeout(10.seconds), interval(500.milliseconds)) { + val records = consumer.poll(Duration.ofMillis(500)) + assert(records.count() > 0) + records.forEach { record => + val jsonObj = mapper.readTree(record.value()) + assertResult("kyuubi_server_info")(record.key) + assertResult(server.getName)(jsonObj.get("serverName").asText()) + } + } + } finally { + consumer.close() + } + } + } +} + +class ServerKafkaLoggingEventHandlerSuiteForKafka2 extends ServerKafkaLoggingEventHandlerSuite { + // equivalent to Apache Kafka 2.8.x + override val imageTag = "6.2.10" +} + +class ServerKafkaLoggingEventHandlerSuiteForKafka3 extends ServerKafkaLoggingEventHandlerSuite { + // equivalent to Apache Kafka 3.3.x + override val imageTag = "7.3.3" +} diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationKerberosAndPlainAuthSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationKerberosAndPlainAuthSuite.scala index 31cde639734..1791b492e25 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationKerberosAndPlainAuthSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationKerberosAndPlainAuthSuite.scala @@ -64,7 +64,7 @@ class KyuubiOperationKerberosAndPlainAuthSuite extends WithKyuubiServer with Ker assert(UserGroupInformation.isSecurityEnabled) KyuubiConf() - .set(KyuubiConf.AUTHENTICATION_METHOD, Seq("KERBEROS", "LDAP", "CUSTOM")) + .set(KyuubiConf.AUTHENTICATION_METHOD, Set("KERBEROS", "LDAP", "CUSTOM")) .set(KyuubiConf.SERVER_KEYTAB, testKeytab) .set(KyuubiConf.SERVER_PRINCIPAL, testPrincipal) .set(KyuubiConf.AUTHENTICATION_LDAP_URL, ldapUrl) diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationPerConnectionSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationPerConnectionSuite.scala index 669475b6cba..97ab21998b9 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationPerConnectionSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationPerConnectionSuite.scala @@ -29,12 +29,12 @@ import org.scalatest.time.SpanSugar.convertIntToGrainOfTime import org.apache.kyuubi.{KYUUBI_VERSION, WithKyuubiServer} import org.apache.kyuubi.config.{KyuubiConf, KyuubiReservedKeys} import org.apache.kyuubi.config.KyuubiConf.SESSION_CONF_ADVISOR -import org.apache.kyuubi.engine.ApplicationState +import org.apache.kyuubi.engine.{ApplicationManagerInfo, ApplicationState} import org.apache.kyuubi.jdbc.KyuubiHiveDriver -import org.apache.kyuubi.jdbc.hive.KyuubiConnection +import org.apache.kyuubi.jdbc.hive.{KyuubiConnection, KyuubiSQLException} import org.apache.kyuubi.metrics.{MetricsConstants, MetricsSystem} import org.apache.kyuubi.plugin.SessionConfAdvisor -import org.apache.kyuubi.session.{KyuubiSessionManager, SessionType} +import org.apache.kyuubi.session.{KyuubiSessionImpl, KyuubiSessionManager, SessionHandle, SessionType} /** * UT with Connection level engine shared cost much time, only run basic jdbc tests. @@ -48,6 +48,7 @@ class KyuubiOperationPerConnectionSuite extends WithKyuubiServer with HiveJDBCTe override protected val conf: KyuubiConf = { KyuubiConf().set(KyuubiConf.ENGINE_SHARE_LEVEL, "connection") .set(SESSION_CONF_ADVISOR.key, classOf[TestSessionConfAdvisor].getName) + .set(KyuubiConf.ENGINE_SPARK_MAX_INITIAL_WAIT.key, "0") } test("KYUUBI #647 - async query causes engine crash") { @@ -77,8 +78,9 @@ class KyuubiOperationPerConnectionSuite extends WithKyuubiServer with HiveJDBCTe val executeStmtResp = client.ExecuteStatement(executeStmtReq) assert(executeStmtResp.getStatus.getStatusCode === TStatusCode.ERROR_STATUS) assert(executeStmtResp.getOperationHandle === null) - assert(executeStmtResp.getStatus.getErrorMessage contains - "Caused by: java.net.SocketException: Connection reset") + val errMsg = executeStmtResp.getStatus.getErrorMessage + assert(errMsg.contains("Caused by: java.net.SocketException: Connection reset") || + errMsg.contains(s"Socket for ${SessionHandle(handle)} is closed")) } } @@ -137,6 +139,7 @@ class KyuubiOperationPerConnectionSuite extends WithKyuubiServer with HiveJDBCTe assert(connection.getEngineId.startsWith("local-")) assert(connection.getEngineName.startsWith("kyuubi")) assert(connection.getEngineUrl.nonEmpty) + assert(connection.getEngineRefId.nonEmpty) val stmt = connection.createStatement() try { stmt.execute("select engine_name()") @@ -231,9 +234,11 @@ class KyuubiOperationPerConnectionSuite extends WithKyuubiServer with HiveJDBCTe } val engineId = sessionManager.allSessions().head.handle.identifier.toString // kill the engine application and wait the engine terminate - sessionManager.applicationManager.killApplication(None, engineId) + sessionManager.applicationManager.killApplication(ApplicationManagerInfo(None), engineId) eventually(timeout(30.seconds), interval(100.milliseconds)) { - assert(sessionManager.applicationManager.getApplicationInfo(None, engineId) + assert(sessionManager.applicationManager.getApplicationInfo( + ApplicationManagerInfo(None), + engineId) .exists(_.state == ApplicationState.NOT_FOUND)) } assert(!conn.isValid(3000)) @@ -280,6 +285,64 @@ class KyuubiOperationPerConnectionSuite extends WithKyuubiServer with HiveJDBCTe assert(rs.getString(2) === KYUUBI_VERSION) } } + + test("JDBC client should catch task failed exception in the incremental mode") { + withJdbcStatement() { statement => + statement.executeQuery(s"set ${KyuubiConf.OPERATION_INCREMENTAL_COLLECT.key}=true;") + val resultSet = statement.executeQuery( + "SELECT raise_error('client should catch this exception');") + val e = intercept[KyuubiSQLException](resultSet.next()) + assert(e.getMessage.contains("client should catch this exception")) + } + } + + test("support to interrupt the thrift request if remote engine is broken") { + withSessionConf(Map( + KyuubiConf.ENGINE_ALIVE_PROBE_ENABLED.key -> "true", + KyuubiConf.ENGINE_ALIVE_PROBE_INTERVAL.key -> "1000", + KyuubiConf.ENGINE_ALIVE_TIMEOUT.key -> "1000"))(Map.empty)( + Map.empty) { + withSessionHandle { (client, handle) => + val preReq = new TExecuteStatementReq() + preReq.setStatement("select engine_name()") + preReq.setSessionHandle(handle) + preReq.setRunAsync(false) + client.ExecuteStatement(preReq) + + val sessionHandle = SessionHandle(handle) + val session = server.backendService.sessionManager.asInstanceOf[KyuubiSessionManager] + .getSession(sessionHandle).asInstanceOf[KyuubiSessionImpl] + + val exitReq = new TExecuteStatementReq() + exitReq.setStatement("SELECT java_method('java.lang.Thread', 'sleep', 1000L)," + + "java_method('java.lang.System', 'exit', 1)") + exitReq.setSessionHandle(handle) + exitReq.setRunAsync(true) + client.ExecuteStatement(exitReq) + + session.sessionManager.getConf + .set(KyuubiConf.OPERATION_STATUS_UPDATE_INTERVAL, 3000L) + + val executeStmtReq = new TExecuteStatementReq() + executeStmtReq.setStatement("SELECT java_method('java.lang.Thread', 'sleep', 30000l)") + executeStmtReq.setSessionHandle(handle) + executeStmtReq.setRunAsync(false) + val startTime = System.currentTimeMillis() + val executeStmtResp = client.ExecuteStatement(executeStmtReq) + assert(executeStmtResp.getStatus.getStatusCode === TStatusCode.ERROR_STATUS) + val errorMsg = executeStmtResp.getStatus.getErrorMessage + assert(errorMsg.contains("java.net.SocketException") || + errorMsg.contains("org.apache.thrift.transport.TTransportException") || + errorMsg.contains("connection does not exist") || + errorMsg.contains(s"Socket for ${SessionHandle(handle)} is closed")) + val elapsedTime = System.currentTimeMillis() - startTime + assert(elapsedTime < 20 * 1000) + eventually(timeout(3.seconds)) { + assert(session.client.asyncRequestInterrupted) + } + } + } + } } class TestSessionConfAdvisor extends SessionConfAdvisor { diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationPerUserSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationPerUserSuite.scala index 40bb165b8b1..a67534164bd 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationPerUserSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationPerUserSuite.scala @@ -17,7 +17,7 @@ package org.apache.kyuubi.operation -import java.util.UUID +import java.util.{Properties, UUID} import org.apache.hadoop.fs.{FileSystem, FileUtil, Path} import org.apache.hive.service.rpc.thrift.{TExecuteStatementReq, TGetInfoReq, TGetInfoType, TStatusCode} @@ -26,10 +26,11 @@ import org.scalatest.time.SpanSugar._ import org.apache.kyuubi.{KYUUBI_VERSION, Utils, WithKyuubiServer, WithSimpleDFSService} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf.KYUUBI_ENGINE_ENV_PREFIX -import org.apache.kyuubi.engine.SemanticVersion -import org.apache.kyuubi.jdbc.hive.KyuubiStatement +import org.apache.kyuubi.jdbc.KyuubiHiveDriver +import org.apache.kyuubi.jdbc.hive.{KyuubiConnection, KyuubiStatement} import org.apache.kyuubi.metrics.{MetricsConstants, MetricsSystem} -import org.apache.kyuubi.session.{KyuubiSessionImpl, KyuubiSessionManager, SessionHandle} +import org.apache.kyuubi.session.{KyuubiSessionImpl, SessionHandle} +import org.apache.kyuubi.util.SemanticVersion import org.apache.kyuubi.zookeeper.ZookeeperConf class KyuubiOperationPerUserSuite @@ -68,6 +69,19 @@ class KyuubiOperationPerUserSuite } } + test("kyuubi defined function - engine_url") { + withSessionConf(Map.empty)(Map.empty)(Map( + "spark.ui.enabled" -> "true")) { + val driver = new KyuubiHiveDriver() + val connection = driver.connect(jdbcUrlWithConf, new Properties()) + .asInstanceOf[KyuubiConnection] + val stmt = connection.createStatement() + val rs = stmt.executeQuery("SELECT engine_url()") + assert(rs.next()) + assert(rs.getString(1).nonEmpty) + } + } + test("ensure two connections in user mode share the same engine") { var r1: String = null var r2: String = null @@ -166,50 +180,6 @@ class KyuubiOperationPerUserSuite assert(r1 !== r2) } - test("support to interrupt the thrift request if remote engine is broken") { - assume(!httpMode) - withSessionConf(Map( - KyuubiConf.ENGINE_ALIVE_PROBE_ENABLED.key -> "true", - KyuubiConf.ENGINE_ALIVE_PROBE_INTERVAL.key -> "1000", - KyuubiConf.ENGINE_ALIVE_TIMEOUT.key -> "1000"))(Map.empty)( - Map.empty) { - withSessionHandle { (client, handle) => - val preReq = new TExecuteStatementReq() - preReq.setStatement("select engine_name()") - preReq.setSessionHandle(handle) - preReq.setRunAsync(false) - client.ExecuteStatement(preReq) - - val sessionHandle = SessionHandle(handle) - val session = server.backendService.sessionManager.asInstanceOf[KyuubiSessionManager] - .getSession(sessionHandle).asInstanceOf[KyuubiSessionImpl] - session.client.getEngineAliveProbeProtocol.foreach(_.getTransport.close()) - - val exitReq = new TExecuteStatementReq() - exitReq.setStatement("SELECT java_method('java.lang.Thread', 'sleep', 1000L)," + - "java_method('java.lang.System', 'exit', 1)") - exitReq.setSessionHandle(handle) - exitReq.setRunAsync(true) - client.ExecuteStatement(exitReq) - - val executeStmtReq = new TExecuteStatementReq() - executeStmtReq.setStatement("SELECT java_method('java.lang.Thread', 'sleep', 30000l)") - executeStmtReq.setSessionHandle(handle) - executeStmtReq.setRunAsync(false) - val startTime = System.currentTimeMillis() - val executeStmtResp = client.ExecuteStatement(executeStmtReq) - assert(executeStmtResp.getStatus.getStatusCode === TStatusCode.ERROR_STATUS) - assert(executeStmtResp.getStatus.getErrorMessage.contains( - "java.net.SocketException: Connection reset") || - executeStmtResp.getStatus.getErrorMessage.contains( - "Caused by: java.net.SocketException: Broken pipe (Write failed)")) - val elapsedTime = System.currentTimeMillis() - startTime - assert(elapsedTime < 20 * 1000) - assert(session.client.asyncRequestInterrupted) - } - } - } - test("max result rows") { Seq("true", "false").foreach { incremental => Seq("thrift", "arrow").foreach { resultFormat => @@ -374,13 +344,49 @@ class KyuubiOperationPerUserSuite eventually(timeout(10.seconds)) { assert(session.handle === SessionHandle.apply(session.client.remoteSessionHandle)) } - val opHandle = session.executeStatement("SELECT engine_id()", Map.empty, true, 0L) - eventually(timeout(10.seconds)) { - val operation = session.sessionManager.operationManager.getOperation( - opHandle).asInstanceOf[KyuubiOperation] - assert(opHandle == OperationHandle.apply(operation.remoteOpHandle())) + + def checkOpHandleAlign(statement: String, confOverlay: Map[String, String]): Unit = { + val opHandle = session.executeStatement(statement, confOverlay, true, 0L) + eventually(timeout(10.seconds)) { + val operation = session.sessionManager.operationManager.getOperation( + opHandle).asInstanceOf[KyuubiOperation] + assert(opHandle == OperationHandle.apply(operation.remoteOpHandle())) + } + } + + val statement = "SELECT engine_id()" + + val confOverlay = Map(KyuubiConf.OPERATION_PLAN_ONLY_MODE.key -> "PARSE") + checkOpHandleAlign(statement, confOverlay) + + Map( + statement -> "SQL", + s"""spark.sql("$statement")""" -> "SCALA", + s"spark.sql('$statement')" -> "PYTHON").foreach { case (statement, lang) => + val confOverlay = Map(KyuubiConf.OPERATION_LANGUAGE.key -> lang) + checkOpHandleAlign(statement, confOverlay) } } } } + + test("support to expose kyuubi operation metrics") { + withSessionConf()(Map.empty)(Map.empty) { + withJdbcStatement() { statement => + val uuid = UUID.randomUUID().toString + val query = s"select '$uuid'" + val res = statement.executeQuery(query) + assert(res.next()) + assert(!res.next()) + + val operationMetrics = + server.backendService.sessionManager.operationManager.allOperations() + .map(_.asInstanceOf[KyuubiOperation]) + .filter(_.statement == query) + .head.metrics + assert(operationMetrics.get("fetchResultsCount") == Some("1")) + assert(operationMetrics.get("fetchLogCount") == Some("0")) + } + } + } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationWithEngineSecurity.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationWithEngineSecuritySuite.scala similarity index 80% rename from kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationWithEngineSecurity.scala rename to kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationWithEngineSecuritySuite.scala index 63369f4b21a..da6367bc453 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationWithEngineSecurity.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationWithEngineSecuritySuite.scala @@ -17,25 +17,26 @@ package org.apache.kyuubi.operation +import java.nio.charset.StandardCharsets + import org.apache.kyuubi.WithKyuubiServer import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.ha.HighAvailabilityConf import org.apache.kyuubi.ha.client.DiscoveryClientProvider -import org.apache.kyuubi.service.authentication.{InternalSecurityAccessor, ZooKeeperEngineSecuritySecretProviderImpl} +import org.apache.kyuubi.service.authentication.InternalSecurityAccessor -class KyuubiOperationWithEngineSecurity extends WithKyuubiServer with HiveJDBCTestHelper { +class KyuubiOperationWithEngineSecuritySuite extends WithKyuubiServer with HiveJDBCTestHelper { import DiscoveryClientProvider._ override protected def jdbcUrl: String = getJdbcUrl private val engineSecretNode = "/SECRET" + private val engineSecret = "_ENGINE_SECRET_" override protected val conf: KyuubiConf = { KyuubiConf() .set(KyuubiConf.ENGINE_SECURITY_ENABLED, false) - .set( - KyuubiConf.ENGINE_SECURITY_SECRET_PROVIDER, - classOf[ZooKeeperEngineSecuritySecretProviderImpl].getCanonicalName) + .set(KyuubiConf.ENGINE_SECURITY_SECRET_PROVIDER, "zookeeper") .set(HighAvailabilityConf.HA_ZK_ENGINE_SECURE_SECRET_NODE, engineSecretNode) } @@ -43,7 +44,9 @@ class KyuubiOperationWithEngineSecurity extends WithKyuubiServer with HiveJDBCTe super.beforeAll() withDiscoveryClient(conf) { discoveryClient => discoveryClient.create(engineSecretNode, "PERSISTENT", false) - discoveryClient.startSecretNode("PERSISTENT", engineSecretNode, "_ENGINE_SECRET_") + discoveryClient.startSecretNode("PERSISTENT", engineSecretNode, engineSecret) + val expected = engineSecret.getBytes(StandardCharsets.UTF_8) + assert(discoveryClient.getData(engineSecretNode) === expected) } conf.set(KyuubiConf.ENGINE_SECURITY_ENABLED, true) diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiRestAuthenticationSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiRestAuthenticationSuite.scala index 64707ce012e..089b756f54f 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiRestAuthenticationSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiRestAuthenticationSuite.scala @@ -25,7 +25,6 @@ import javax.ws.rs.core.MediaType import scala.collection.JavaConverters._ import org.apache.hadoop.security.UserGroupInformation -import org.apache.hive.service.rpc.thrift.TProtocolVersion import org.apache.kyuubi.RestClientTestHelper import org.apache.kyuubi.client.api.v1.dto.{SessionHandle, SessionOpenCount, SessionOpenRequest} @@ -38,12 +37,20 @@ import org.apache.kyuubi.session.KyuubiSession class KyuubiRestAuthenticationSuite extends RestClientTestHelper { override protected val otherConfigs: Map[String, String] = { - // allow to impersonate other users with spnego authentication Map( + KyuubiConf.ENGINE_SECURITY_ENABLED.key -> "true", + KyuubiConf.ENGINE_SECURITY_SECRET_PROVIDER.key -> "simple", + KyuubiConf.SIMPLE_SECURITY_SECRET_PROVIDER_PROVIDER_SECRET.key -> "_KYUUBI_REST_", + // allow to impersonate other users with spnego authentication s"hadoop.proxyuser.$clientPrincipalUser.groups" -> "*", s"hadoop.proxyuser.$clientPrincipalUser.hosts" -> "*") } + override def beforeAll(): Unit = { + super.beforeAll() + InternalSecurityAccessor.initialize(conf, true) + } + test("test with LDAP authorization") { val encodeAuthorization = new String( Base64.getEncoder.encode( @@ -129,11 +136,9 @@ class KyuubiRestAuthenticationSuite extends RestClientTestHelper { val proxyUser = "user1" UserGroupInformation.loginUserFromKeytab(testPrincipal, testKeytab) var token = generateToken(hostName) - val sessionOpenRequest = new SessionOpenRequest( - TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V11.getValue, - Map( - KyuubiConf.ENGINE_SHARE_LEVEL.key -> "CONNECTION", - "hive.server2.proxy.user" -> proxyUser).asJava) + val sessionOpenRequest = new SessionOpenRequest(Map( + KyuubiConf.ENGINE_SHARE_LEVEL.key -> "CONNECTION", + "hive.server2.proxy.user" -> proxyUser).asJava) var response = webTarget.path("api/v1/sessions") .request() diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/PlanOnlyOperationSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/PlanOnlyOperationSuite.scala index 6a37e823db5..8773440a686 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/PlanOnlyOperationSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/PlanOnlyOperationSuite.scala @@ -201,6 +201,34 @@ class PlanOnlyOperationSuite extends WithKyuubiServer with HiveJDBCTestHelper { } } + test("kyuubi #3444: Plan only mode with lineage mode") { + + val ddl = "create table if not exists t0(a int) using parquet" + val dql = "select * from t0" + withSessionConf()(Map(KyuubiConf.OPERATION_PLAN_ONLY_MODE.key -> NoneMode.name))() { + withJdbcStatement("t0") { statement => + statement.execute(ddl) + statement.execute("SET kyuubi.operation.plan.only.mode=lineage") + val lineageParserClassName = "org.apache.kyuubi.plugin.lineage.LineageParserProvider" + try { + val resultSet = statement.executeQuery(dql) + assert(resultSet.next()) + val actualResult = + """ + |{"inputTables":["spark_catalog.default.t0"],"outputTables":[], + |"columnLineage":[{"column":"a","originalColumns":["spark_catalog.default.t0.a"]}]} + |""".stripMargin.split("\n").mkString("") + assert(resultSet.getString(1) == actualResult) + } catch { + case e: Throwable => + assert(e.getMessage.contains(s"'$lineageParserClassName' not found")) + } finally { + statement.execute("SET kyuubi.operation.plan.only.mode=none") + } + } + } + } + private def getOperationPlanWithStatement(statement: Statement): String = { val resultSet = statement.executeQuery("select 1 where true") assert(resultSet.next()) diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/thrift/http/KyuubiOperationThriftHttpKerberosAndPlainAuthSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/thrift/http/KyuubiOperationThriftHttpKerberosAndPlainAuthSuite.scala index 941e121a6cd..cee43bf5cf1 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/thrift/http/KyuubiOperationThriftHttpKerberosAndPlainAuthSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/thrift/http/KyuubiOperationThriftHttpKerberosAndPlainAuthSuite.scala @@ -49,7 +49,7 @@ class KyuubiOperationThriftHttpKerberosAndPlainAuthSuite UserGroupInformation.setConfiguration(config) assert(UserGroupInformation.isSecurityEnabled) - KyuubiConf().set(KyuubiConf.AUTHENTICATION_METHOD, Seq("KERBEROS", "LDAP", "CUSTOM")) + KyuubiConf().set(KyuubiConf.AUTHENTICATION_METHOD, Set("KERBEROS", "LDAP", "CUSTOM")) .set(KyuubiConf.SERVER_KEYTAB, testKeytab) .set(KyuubiConf.SERVER_PRINCIPAL, testPrincipal) .set(KyuubiConf.AUTHENTICATION_LDAP_URL, ldapUrl) diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/thrift/http/KyuubiOperationThriftHttpPerUserSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/thrift/http/KyuubiOperationThriftHttpPerUserSuite.scala index d30dd94a302..b475e75de26 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/thrift/http/KyuubiOperationThriftHttpPerUserSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/thrift/http/KyuubiOperationThriftHttpPerUserSuite.scala @@ -34,5 +34,5 @@ class KyuubiOperationThriftHttpPerUserSuite extends KyuubiOperationPerUserSuite s"jdbc:hive2://${server.frontendServices.head.connectionUrl}/;transportMode=http;" + s"httpPath=cliservice;" - override protected lazy val httpMode = true; + override protected lazy val httpMode = true } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/tpcds/OutputSchemaTPCDSSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/tpcds/OutputSchemaTPCDSSuite.scala index 80ed6bb1721..9505c4a3be7 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/tpcds/OutputSchemaTPCDSSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/tpcds/OutputSchemaTPCDSSuite.scala @@ -26,24 +26,20 @@ import org.apache.kyuubi.{DeltaSuiteMixin, WithKyuubiServer} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.server.mysql.MySQLJDBCTestHelper import org.apache.kyuubi.tags.DeltaTest +import org.apache.kyuubi.util.AssertionUtils._ +import org.apache.kyuubi.util.GoldenFileUtils._ -// scalastyle:off line.size.limit /** * To run this test suite: * {{{ - * build/mvn clean install \ - * -Dmaven.plugin.scalatest.exclude.tags="" \ - * -Dtest=none -DwildcardSuites=org.apache.kyuubi.operation.tpcds.OutputSchemaTPCDSSuite + * KYUUBI_UPDATE=0 dev/gen/gen_tpcds_output_schema.sh * }}} * * To re-generate golden files for this suite: * {{{ - * KYUUBI_UPDATE=1 build/mvn clean install \ - * -Dmaven.plugin.scalatest.exclude.tags="" \ - * -Dtest=none -DwildcardSuites=org.apache.kyuubi.operation.tpcds.OutputSchemaTPCDSSuite + * dev/gen/gen_tpcds_output_schema.sh * }}} */ -// scalastyle:on line.size.limit @Slow @DeltaTest class OutputSchemaTPCDSSuite extends WithKyuubiServer @@ -78,7 +74,6 @@ class OutputSchemaTPCDSSuite extends WithKyuubiServer super.afterAll() } - private val regenerateGoldenFiles = sys.env.get("KYUUBI_UPDATE").contains("1") protected val baseResourcePath: Path = Paths.get("src", "test", "resources") private def fileToString(file: Path): String = { @@ -93,12 +88,15 @@ class OutputSchemaTPCDSSuite extends WithKyuubiServer val columnTypes = (1 to result.getMetaData.getColumnCount).map { i => s"${result.getMetaData.getColumnName(i)}:${result.getMetaData.getColumnTypeName(i)}" }.mkString("struct<", ",", ">\n") - if (regenerateGoldenFiles) { + if (isRegenerateGoldenFiles) { Files.write(goldenFile, columnTypes.getBytes()) + } else { + assertFileContent( + goldenFile, + Seq(columnTypes), + "dev/gen/gen_tpcds_output_schema.sh", + splitFirstExpectedLine = true) } - - val expected = fileToString(goldenFile) - assert(columnTypes === expected) } finally { result.close() } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/parser/trino/KyuubiTrinoFeParserSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/parser/trino/KyuubiTrinoFeParserSuite.scala index bbced0b61ad..205a6a7be90 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/parser/trino/KyuubiTrinoFeParserSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/parser/trino/KyuubiTrinoFeParserSuite.scala @@ -20,7 +20,7 @@ package org.apache.kyuubi.parser.trino import org.apache.kyuubi.KyuubiFunSuite import org.apache.kyuubi.sql.parser.trino.KyuubiTrinoFeParser import org.apache.kyuubi.sql.plan.{KyuubiTreeNode, PassThroughNode} -import org.apache.kyuubi.sql.plan.trino.{GetCatalogs, GetColumns, GetPrimaryKeys, GetSchemas, GetTables, GetTableTypes, GetTypeInfo} +import org.apache.kyuubi.sql.plan.trino.{Deallocate, ExecuteForPreparing, GetCatalogs, GetColumns, GetPrimaryKeys, GetSchemas, GetTables, GetTableTypes, GetTypeInfo} class KyuubiTrinoFeParserSuite extends KyuubiFunSuite { val parser = new KyuubiTrinoFeParser() @@ -369,4 +369,22 @@ class KyuubiTrinoFeParserSuite extends KyuubiFunSuite { assert(kyuubiTreeNode.isInstanceOf[GetPrimaryKeys]) } + + test("Support PreparedStatement for Trino Fe (ExecuteForPreparing)") { + val kyuubiTreeNode = parse( + """ + | EXECUTE statement1 USING INTEGER '1' + |""".stripMargin) + + assert(kyuubiTreeNode.isInstanceOf[ExecuteForPreparing]) + } + + test("Support PreparedStatement for Trino Fe (Deallocate)") { + val kyuubiTreeNode = parse( + """ + | DEALLOCATE PREPARE statement1 + |""".stripMargin) + + assert(kyuubiTreeNode.isInstanceOf[Deallocate]) + } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/BackendServiceMetricSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/BackendServiceMetricSuite.scala index 53a53ef1dbe..5a086c86027 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/BackendServiceMetricSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/BackendServiceMetricSuite.scala @@ -21,10 +21,12 @@ import java.nio.file.{Path, Paths} import java.time.Duration import com.fasterxml.jackson.databind.ObjectMapper +import org.scalatest.time.SpanSugar.convertIntToGrainOfTime import org.apache.kyuubi.{Utils, WithKyuubiServer} import org.apache.kyuubi.config.KyuubiConf -import org.apache.kyuubi.metrics.{MetricsConf, MetricsConstants} +import org.apache.kyuubi.metrics.{MetricsConf, ReporterType} +import org.apache.kyuubi.metrics.MetricsConstants._ import org.apache.kyuubi.operation.HiveJDBCTestHelper class BackendServiceMetricSuite extends WithKyuubiServer with HiveJDBCTestHelper { @@ -34,7 +36,7 @@ class BackendServiceMetricSuite extends WithKyuubiServer with HiveJDBCTestHelper val reportPath: Path = Utils.createTempDir() override protected val conf: KyuubiConf = { KyuubiConf() - .set(MetricsConf.METRICS_REPORTERS, Seq("JSON")) + .set(MetricsConf.METRICS_REPORTERS, Set(ReporterType.JSON.toString)) .set(MetricsConf.METRICS_JSON_LOCATION, reportPath.toString) .set(MetricsConf.METRICS_JSON_INTERVAL, Duration.ofMillis(100).toMillis) } @@ -45,43 +47,40 @@ class BackendServiceMetricSuite extends WithKyuubiServer with HiveJDBCTestHelper withJdbcStatement() { statement => statement.executeQuery("CREATE TABLE stu_test(id int, name string) USING parquet") statement.execute("insert into stu_test values(1, 'a'), (2, 'b'), (3, 'c')") - Thread.sleep(Duration.ofMillis(111).toMillis) + val logRows1 = eventually(timeout(10.seconds), interval(1.second)) { + val res = objMapper.readTree(Paths.get(reportPath.toString, "report.json").toFile) + assert(res.has("timers")) + val timer = res.get("timers") + assert(timer.get(BS_EXECUTE_STATEMENT).get("count").asInt() == 2) + assert(timer.get(BS_EXECUTE_STATEMENT).get("mean").asDouble() > 0) - val res1 = objMapper.readTree(Paths.get(reportPath.toString, "report.json").toFile) - assert(res1.has("timers")) - val timer1 = res1.get("timers") - assert( - timer1.get(MetricsConstants.BS_EXECUTE_STATEMENT).get("count").asInt() == 2) - assert( - timer1.get(MetricsConstants.BS_EXECUTE_STATEMENT).get("mean").asDouble() > 0) - - assert(res1.has("meters")) - val meters1 = res1.get("meters") - val logRows1 = meters1.get(MetricsConstants.BS_FETCH_LOG_ROWS_RATE).get("count").asInt() - assert(logRows1 > 0) + assert(res.has("meters")) + val meters = res.get("meters") + val logRows = meters.get(BS_FETCH_LOG_ROWS_RATE).get("count").asInt() + assert(logRows > 0) + logRows + } statement.execute("select * from stu_test limit 2") statement.getResultSet.next() - Thread.sleep(Duration.ofMillis(111).toMillis) - - val res2 = objMapper.readTree(Paths.get(reportPath.toString, "report.json").toFile) - val timer2 = res2.get("timers") - assert( - timer2.get(MetricsConstants.BS_OPEN_SESSION).get("count").asInt() == 1) - assert( - timer2.get(MetricsConstants.BS_OPEN_SESSION).get("min").asInt() > 0) - val execStatementNode2 = timer2.get(MetricsConstants.BS_EXECUTE_STATEMENT) - assert(execStatementNode2.get("count").asInt() == 3) - assert( - execStatementNode2.get("max").asDouble() >= execStatementNode2.get("mean").asDouble() && - execStatementNode2.get("mean").asDouble() >= execStatementNode2.get("min").asDouble()) + eventually(timeout(60.seconds), interval(1.second)) { + val res = objMapper.readTree(Paths.get(reportPath.toString, "report.json").toFile) + val timer = res.get("timers") + assert(timer.get(BS_OPEN_SESSION).get("count").asInt() == 1) + assert(timer.get(BS_OPEN_SESSION).get("min").asDouble() > 0) + val execStatementNode = timer.get(BS_EXECUTE_STATEMENT) + assert(execStatementNode.get("count").asInt() == 3) + assert( + execStatementNode.get("max").asDouble() >= execStatementNode.get("mean").asDouble() && + execStatementNode.get("mean").asDouble() >= execStatementNode.get("min").asDouble()) - val meters2 = - objMapper.readTree(Paths.get(reportPath.toString, "report.json").toFile).get("meters") - assert(meters2.get(MetricsConstants.BS_FETCH_RESULT_ROWS_RATE).get("count").asInt() == 7) - assert(meters2.get(MetricsConstants.BS_FETCH_LOG_ROWS_RATE).get("count").asInt() >= logRows1) + val meters = + objMapper.readTree(Paths.get(reportPath.toString, "report.json").toFile).get("meters") + assert(meters.get(BS_FETCH_RESULT_ROWS_RATE).get("count").asInt() == 8) + assert(meters.get(BS_FETCH_LOG_ROWS_RATE).get("count").asInt() >= logRows1) - statement.executeQuery("DROP TABLE stu_test") + statement.executeQuery("DROP TABLE stu_test") + } } } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/KyuubiMySQLFrontendServiceSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/KyuubiMySQLFrontendServiceSuite.scala index 735863b8b20..4bf8b8eda55 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/KyuubiMySQLFrontendServiceSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/KyuubiMySQLFrontendServiceSuite.scala @@ -19,6 +19,7 @@ package org.apache.kyuubi.server import org.apache.kyuubi.KyuubiFunSuite import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.config.KyuubiConf.{FRONTEND_ADVERTISED_HOST, FRONTEND_MYSQL_BIND_HOST, FRONTEND_MYSQL_BIND_PORT} import org.apache.kyuubi.service.NoopMySQLFrontendServer import org.apache.kyuubi.service.ServiceState._ @@ -53,4 +54,23 @@ class KyuubiMySQLFrontendServiceSuite extends KyuubiFunSuite { assert(frontendService.getServiceState == STOPPED) server.stop() } + + test("advertised host") { + val server = new NoopMySQLFrontendServer + val conf = KyuubiConf() + .set(FRONTEND_MYSQL_BIND_HOST.key, "localhost") + .set(FRONTEND_MYSQL_BIND_PORT, 0) + .set(FRONTEND_ADVERTISED_HOST, "dummy.host") + + server.initialize(conf) + assert(server.frontendServices.head.connectionUrl.startsWith("dummy.host")) + + val server2 = new NoopMySQLFrontendServer + val conf2 = KyuubiConf() + .set(FRONTEND_MYSQL_BIND_HOST.key, "localhost") + .set(FRONTEND_MYSQL_BIND_PORT, 0) + + server2.initialize(conf2) + assert(server2.frontendServices.head.connectionUrl.startsWith("localhost")) + } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/KyuubiTBinaryFrontendServiceSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/KyuubiTBinaryFrontendServiceSuite.scala index 69c10e7302f..5c54cbbb4b7 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/KyuubiTBinaryFrontendServiceSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/KyuubiTBinaryFrontendServiceSuite.scala @@ -17,7 +17,9 @@ package org.apache.kyuubi.server -import org.apache.hive.service.rpc.thrift.TOpenSessionReq +import scala.collection.JavaConverters._ + +import org.apache.hive.service.rpc.thrift.{TOpenSessionReq, TSessionHandle} import org.scalatest.time.SpanSugar.convertIntToGrainOfTime import org.apache.kyuubi.{KyuubiFunSuite, Utils, WithKyuubiServer} @@ -79,4 +81,39 @@ class KyuubiTBinaryFrontendServiceSuite extends WithKyuubiServer with KyuubiFunS MetricsConstants.THRIFT_BINARY_CONN_OPEN).getOrElse(0L) - openConnections === 0) } } + + test("do not close session when disconnect") { + val sessionCount = server.backendService.sessionManager.allSessions().size + var handle: TSessionHandle = null + TClientTestUtils.withThriftClient(server.frontendServices.head) { + client => + val req = new TOpenSessionReq() + req.setUsername(Utils.currentUser) + req.setPassword("anonymous") + req.setConfiguration(Map("kyuubi.session.close.on.disconnect" -> "false").asJava) + val resp = client.OpenSession(req) + handle = resp.getSessionHandle + + assert(server.backendService.sessionManager.allSessions().size - sessionCount == 1) + } + Thread.sleep(3000L) + assert(server.backendService.sessionManager.allSessions().size - sessionCount == 1) + } + + test("close session when disconnect - default behavior") { + val sessionCount = server.backendService.sessionManager.allSessions().size + var handle: TSessionHandle = null + TClientTestUtils.withThriftClient(server.frontendServices.head) { + client => + val req = new TOpenSessionReq() + req.setUsername(Utils.currentUser) + req.setPassword("anonymous") + val resp = client.OpenSession(req) + handle = resp.getSessionHandle + + assert(server.backendService.sessionManager.allSessions().size - sessionCount == 1) + } + Thread.sleep(3000L) + assert(server.backendService.sessionManager.allSessions().size == sessionCount) + } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/MiniYarnService.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/MiniYarnService.scala index 1a73cc24ca0..68a175efc4e 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/MiniYarnService.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/MiniYarnService.scala @@ -34,7 +34,7 @@ import org.apache.kyuubi.service.AbstractService class MiniYarnService extends AbstractService("TestMiniYarnService") { private val hadoopConfDir: File = Utils.createTempDir().toFile - private val yarnConf: YarnConfiguration = { + private var yarnConf: YarnConfiguration = { val yarnConfig = new YarnConfiguration() // Disable the disk utilization check to avoid the test hanging when people's disks are // getting full. @@ -71,6 +71,10 @@ class MiniYarnService extends AbstractService("TestMiniYarnService") { } private val yarnCluster: MiniYARNCluster = new MiniYARNCluster(getName, 1, 1, 1) + def setYarnConf(yarnConf: YarnConfiguration): Unit = { + this.yarnConf = yarnConf + } + override def initialize(conf: KyuubiConf): Unit = { yarnCluster.init(yarnConf) super.initialize(conf) diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/AdminResourceSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/AdminResourceSuite.scala index 8aaf6c5122c..6ca00c802c9 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/AdminResourceSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/AdminResourceSuite.scala @@ -17,6 +17,7 @@ package org.apache.kyuubi.server.api.v1 +import java.nio.charset.StandardCharsets import java.util.{Base64, UUID} import javax.ws.rs.client.Entity import javax.ws.rs.core.{GenericType, MediaType} @@ -24,19 +25,22 @@ import javax.ws.rs.core.{GenericType, MediaType} import scala.collection.JavaConverters._ import org.apache.hive.service.rpc.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V2 +import org.mockito.Mockito.lenient import org.scalatest.time.SpanSugar.convertIntToGrainOfTime +import org.scalatestplus.mockito.MockitoSugar.mock import org.apache.kyuubi.{KYUUBI_VERSION, KyuubiFunSuite, RestFrontendTestHelper, Utils} -import org.apache.kyuubi.client.api.v1.dto.{Engine, SessionData, SessionHandle, SessionOpenRequest} +import org.apache.kyuubi.client.api.v1.dto.{Engine, OperationData, ServerData, SessionData, SessionHandle, SessionOpenRequest} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_CONNECTION_URL_KEY -import org.apache.kyuubi.engine.{ApplicationState, EngineRef, KyuubiApplicationManager} +import org.apache.kyuubi.engine.{ApplicationManagerInfo, ApplicationState, EngineRef, KyuubiApplicationManager} import org.apache.kyuubi.engine.EngineType.SPARK_SQL -import org.apache.kyuubi.engine.ShareLevel.{CONNECTION, USER} -import org.apache.kyuubi.events.KyuubiOperationEvent +import org.apache.kyuubi.engine.ShareLevel.{CONNECTION, GROUP, USER} import org.apache.kyuubi.ha.HighAvailabilityConf +import org.apache.kyuubi.ha.client.{DiscoveryPaths, ServiceDiscovery} import org.apache.kyuubi.ha.client.DiscoveryClientProvider.withDiscoveryClient -import org.apache.kyuubi.ha.client.DiscoveryPaths +import org.apache.kyuubi.plugin.PluginLoader +import org.apache.kyuubi.server.KyuubiRestFrontendService import org.apache.kyuubi.server.http.authentication.AuthenticationHandler.AUTHORIZATION_HEADER class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { @@ -44,7 +48,14 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { private val engineMgr = new KyuubiApplicationManager() override protected lazy val conf: KyuubiConf = KyuubiConf() - .set(KyuubiConf.SERVER_ADMINISTRATORS, Seq("admin001")) + .set(KyuubiConf.SERVER_ADMINISTRATORS, Set("admin001")) + + private val encodeAuthorization: String = { + new String( + Base64.getEncoder.encode( + s"${Utils.currentUser}:".getBytes()), + StandardCharsets.UTF_8) + } override def beforeAll(): Unit = { super.beforeAll() @@ -63,11 +74,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { .post(null) assert(405 == response.getStatus) - val adminUser = Utils.currentUser - val encodeAuthorization = new String( - Base64.getEncoder.encode( - s"$adminUser:".getBytes()), - "UTF-8") response = webTarget.path("api/v1/admin/refresh/hadoop_conf") .request() .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") @@ -76,7 +82,7 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { val admin001AuthHeader = new String( Base64.getEncoder.encode("admin001".getBytes()), - "UTF-8") + StandardCharsets.UTF_8) response = webTarget.path("api/v1/admin/refresh/hadoop_conf") .request() .header(AUTHORIZATION_HEADER, s"BASIC $admin001AuthHeader") @@ -85,7 +91,7 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { val admin002AuthHeader = new String( Base64.getEncoder.encode("admin002".getBytes()), - "UTF-8") + StandardCharsets.UTF_8) response = webTarget.path("api/v1/admin/refresh/hadoop_conf") .request() .header(AUTHORIZATION_HEADER, s"BASIC $admin002AuthHeader") @@ -99,11 +105,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { .post(null) assert(405 == response.getStatus) - val adminUser = Utils.currentUser - val encodeAuthorization = new String( - Base64.getEncoder.encode( - s"$adminUser:".getBytes()), - "UTF-8") response = webTarget.path("api/v1/admin/refresh/user_defaults_conf") .request() .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") @@ -117,11 +118,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { .post(null) assert(405 == response.getStatus) - val adminUser = Utils.currentUser - val encodeAuthorization = new String( - Base64.getEncoder.encode( - s"$adminUser:".getBytes()), - "UTF-8") response = webTarget.path("api/v1/admin/refresh/unlimited_users") .request() .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") @@ -129,21 +125,26 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { assert(200 == response.getStatus) } + test("refresh deny users of the kyuubi server") { + var response = webTarget.path("api/v1/admin/refresh/deny_users") + .request() + .post(null) + assert(405 == response.getStatus) + + response = webTarget.path("api/v1/admin/refresh/deny_users") + .request() + .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") + .post(null) + assert(200 == response.getStatus) + } + test("list/close sessions") { - val requestObj = new SessionOpenRequest( - 1, - Map("testConfig" -> "testValue").asJava) + val requestObj = new SessionOpenRequest(Map("testConfig" -> "testValue").asJava) var response = webTarget.path("api/v1/sessions") .request(MediaType.APPLICATION_JSON_TYPE) .post(Entity.entity(requestObj, MediaType.APPLICATION_JSON_TYPE)) - val adminUser = Utils.currentUser - val encodeAuthorization = new String( - Base64.getEncoder.encode( - s"$adminUser:".getBytes()), - "UTF-8") - // get session list var response2 = webTarget.path("api/v1/admin/sessions").request() .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") @@ -169,6 +170,73 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { assert(sessions2.isEmpty) } + test("list sessions/operations with filter") { + fe.be.openSession( + HIVE_CLI_SERVICE_PROTOCOL_V2, + "admin", + "123456", + "localhost", + Map("testConfig" -> "testValue")) + + fe.be.openSession( + HIVE_CLI_SERVICE_PROTOCOL_V2, + "admin", + "123456", + "localhost", + Map("testConfig" -> "testValue")) + + fe.be.openSession( + HIVE_CLI_SERVICE_PROTOCOL_V2, + "test_user_1", + "xxxxxx", + "localhost", + Map("testConfig" -> "testValue")) + + val sessionHandle = fe.be.openSession( + HIVE_CLI_SERVICE_PROTOCOL_V2, + "test_user_2", + "xxxxxx", + "localhost", + Map("testConfig" -> "testValue")) + + // list sessions + var response = webTarget.path("api/v1/admin/sessions") + .queryParam("users", "admin") + .request() + .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") + .get() + var sessions = response.readEntity(classOf[Seq[SessionData]]) + assert(200 == response.getStatus) + assert(sessions.size == 2) + + response = webTarget.path("api/v1/admin/sessions") + .queryParam("users", "test_user_1,test_user_2") + .request() + .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") + .get() + sessions = response.readEntity(classOf[Seq[SessionData]]) + assert(200 == response.getStatus) + assert(sessions.size == 2) + + // list operations + response = webTarget.path("api/v1/admin/operations") + .queryParam("users", "test_user_1,test_user_2") + .request() + .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") + .get() + var operations = response.readEntity(classOf[Seq[OperationData]]) + assert(operations.size == 2) + + response = webTarget.path("api/v1/admin/operations") + .queryParam("sessionHandle", sessionHandle.identifier) + .request() + .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") + .get() + operations = response.readEntity(classOf[Seq[OperationData]]) + assert(200 == response.getStatus) + assert(operations.size == 1) + } + test("list/close operations") { val sessionHandle = fe.be.openSession( HIVE_CLI_SERVICE_PROTOCOL_V2, @@ -178,20 +246,14 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { Map("testConfig" -> "testValue")) val operation = fe.be.getCatalogs(sessionHandle) - val adminUser = Utils.currentUser - val encodeAuthorization = new String( - Base64.getEncoder.encode( - s"$adminUser:".getBytes()), - "UTF-8") - // list operations var response = webTarget.path("api/v1/admin/operations").request() .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") .get() assert(200 == response.getStatus) - var operations = response.readEntity(new GenericType[Seq[KyuubiOperationEvent]]() {}) + var operations = response.readEntity(new GenericType[Seq[OperationData]]() {}) assert(operations.nonEmpty) - assert(operations.map(op => op.statementId).contains(operation.identifier.toString)) + assert(operations.map(op => op.getIdentifier).contains(operation.identifier.toString)) // close operation response = webTarget.path(s"api/v1/admin/operations/${operation.identifier}").request() @@ -203,8 +265,8 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { response = webTarget.path("api/v1/admin/operations").request() .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") .get() - operations = response.readEntity(new GenericType[Seq[KyuubiOperationEvent]]() {}) - assert(!operations.map(op => op.statementId).contains(operation.identifier.toString)) + operations = response.readEntity(new GenericType[Seq[OperationData]]() {}) + assert(!operations.map(op => op.getIdentifier).contains(operation.identifier.toString)) } test("delete engine - user share level") { @@ -214,7 +276,10 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { conf.set(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0) conf.set(HighAvailabilityConf.HA_NAMESPACE, "kyuubi_test") conf.set(KyuubiConf.ENGINE_IDLE_TIMEOUT, 180000L) - val engine = new EngineRef(conf.clone, Utils.currentUser, "grp", id, null) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val engine = + new EngineRef(conf.clone, Utils.currentUser, PluginLoader.loadGroupProvider(conf), id, null) val engineSpace = DiscoveryPaths.makePath( s"kyuubi_test_${KYUUBI_VERSION}_USER_SPARK_SQL", @@ -227,11 +292,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { assert(client.pathExists(engineSpace)) assert(client.getChildren(engineSpace).size == 1) - val adminUser = Utils.currentUser - val encodeAuthorization = new String( - Base64.getEncoder.encode( - s"$adminUser:".getBytes()), - "UTF-8") val response = webTarget.path("api/v1/admin/engine") .queryParam("sharelevel", "USER") .queryParam("type", "spark_sql") @@ -242,13 +302,61 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { assert(200 == response.getStatus) assert(client.pathExists(engineSpace)) eventually(timeout(5.seconds), interval(100.milliseconds)) { - assert(client.getChildren(engineSpace).size == 0, s"refId same with $id?") + assert(client.getChildren(engineSpace).isEmpty, s"refId same with $id?") } // kill the engine application - engineMgr.killApplication(None, id) + engineMgr.killApplication(ApplicationManagerInfo(None), id) eventually(timeout(30.seconds), interval(100.milliseconds)) { - assert(engineMgr.getApplicationInfo(None, id).exists(_.state == ApplicationState.NOT_FOUND)) + assert(engineMgr.getApplicationInfo(ApplicationManagerInfo(None), id).exists( + _.state == ApplicationState.NOT_FOUND)) + } + } + } + + test("delete engine - group share level") { + val id = UUID.randomUUID().toString + conf.set(KyuubiConf.ENGINE_SHARE_LEVEL, GROUP.toString) + conf.set(KyuubiConf.ENGINE_TYPE, SPARK_SQL.toString) + conf.set(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0) + conf.set(HighAvailabilityConf.HA_NAMESPACE, "kyuubi_test") + conf.set(KyuubiConf.ENGINE_IDLE_TIMEOUT, 180000L) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val engine = + new EngineRef(conf.clone, Utils.currentUser, PluginLoader.loadGroupProvider(conf), id, null) + + val engineSpace = DiscoveryPaths.makePath( + s"kyuubi_test_${KYUUBI_VERSION}_GROUP_SPARK_SQL", + fe.asInstanceOf[KyuubiRestFrontendService].sessionManager.groupProvider.primaryGroup( + Utils.currentUser, + null), + "default") + + withDiscoveryClient(conf) { client => + engine.getOrCreate(client) + + assert(client.pathExists(engineSpace)) + assert(client.getChildren(engineSpace).size == 1) + + val response = webTarget.path("api/v1/admin/engine") + .queryParam("sharelevel", "GROUP") + .queryParam("type", "spark_sql") + .request(MediaType.APPLICATION_JSON_TYPE) + .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") + .delete() + + assert(200 == response.getStatus) + assert(client.pathExists(engineSpace)) + eventually(timeout(5.seconds), interval(100.milliseconds)) { + assert(client.getChildren(engineSpace).isEmpty, s"refId same with $id?") + } + + // kill the engine application + engineMgr.killApplication(ApplicationManagerInfo(None), id) + eventually(timeout(30.seconds), interval(100.milliseconds)) { + assert(engineMgr.getApplicationInfo(ApplicationManagerInfo(None), id).exists( + _.state == ApplicationState.NOT_FOUND)) } } } @@ -259,9 +367,11 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { conf.set(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0) conf.set(HighAvailabilityConf.HA_NAMESPACE, "kyuubi_test") conf.set(KyuubiConf.ENGINE_IDLE_TIMEOUT, 180000L) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") val id = UUID.randomUUID().toString - val engine = new EngineRef(conf.clone, Utils.currentUser, "grp", id, null) + val engine = + new EngineRef(conf.clone, Utils.currentUser, PluginLoader.loadGroupProvider(conf), id, null) val engineSpace = DiscoveryPaths.makePath( s"kyuubi_test_${KYUUBI_VERSION}_CONNECTION_SPARK_SQL", Utils.currentUser, @@ -273,11 +383,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { assert(client.pathExists(engineSpace)) assert(client.getChildren(engineSpace).size == 1) - val adminUser = Utils.currentUser - val encodeAuthorization = new String( - Base64.getEncoder.encode( - s"$adminUser:".getBytes()), - "UTF-8") val response = webTarget.path("api/v1/admin/engine") .queryParam("sharelevel", "connection") .queryParam("type", "spark_sql") @@ -297,7 +402,10 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { conf.set(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0) conf.set(HighAvailabilityConf.HA_NAMESPACE, "kyuubi_test") conf.set(KyuubiConf.ENGINE_IDLE_TIMEOUT, 180000L) - val engine = new EngineRef(conf.clone, Utils.currentUser, id, "grp", null) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val engine = + new EngineRef(conf.clone, Utils.currentUser, PluginLoader.loadGroupProvider(conf), id, null) val engineSpace = DiscoveryPaths.makePath( s"kyuubi_test_${KYUUBI_VERSION}_USER_SPARK_SQL", @@ -310,11 +418,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { assert(client.pathExists(engineSpace)) assert(client.getChildren(engineSpace).size == 1) - val adminUser = Utils.currentUser - val encodeAuthorization = new String( - Base64.getEncoder.encode( - s"$adminUser:".getBytes()), - "UTF-8") val response = webTarget.path("api/v1/admin/engine") .queryParam("type", "spark_sql") .request(MediaType.APPLICATION_JSON_TYPE) @@ -329,9 +432,57 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { assert(engines(0).getSubdomain == "default") // kill the engine application - engineMgr.killApplication(None, id) + engineMgr.killApplication(ApplicationManagerInfo(None), id) eventually(timeout(30.seconds), interval(100.milliseconds)) { - assert(engineMgr.getApplicationInfo(None, id).exists(_.state == ApplicationState.NOT_FOUND)) + assert(engineMgr.getApplicationInfo(ApplicationManagerInfo(None), id).exists( + _.state == ApplicationState.NOT_FOUND)) + } + } + } + + test("list engine - group share level") { + val id = UUID.randomUUID().toString + conf.set(KyuubiConf.ENGINE_SHARE_LEVEL, GROUP.toString) + conf.set(KyuubiConf.ENGINE_TYPE, SPARK_SQL.toString) + conf.set(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0) + conf.set(HighAvailabilityConf.HA_NAMESPACE, "kyuubi_test") + conf.set(KyuubiConf.ENGINE_IDLE_TIMEOUT, 180000L) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val engine = + new EngineRef(conf.clone, Utils.currentUser, PluginLoader.loadGroupProvider(conf), id, null) + + val engineSpace = DiscoveryPaths.makePath( + s"kyuubi_test_${KYUUBI_VERSION}_GROUP_SPARK_SQL", + fe.asInstanceOf[KyuubiRestFrontendService].sessionManager.groupProvider.primaryGroup( + Utils.currentUser, + null), + "") + + withDiscoveryClient(conf) { client => + engine.getOrCreate(client) + + assert(client.pathExists(engineSpace)) + assert(client.getChildren(engineSpace).size == 1) + + val response = webTarget.path("api/v1/admin/engine") + .queryParam("type", "spark_sql") + .request(MediaType.APPLICATION_JSON_TYPE) + .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") + .get + + assert(200 == response.getStatus) + val engines = response.readEntity(new GenericType[Seq[Engine]]() {}) + assert(engines.size == 1) + assert(engines(0).getEngineType == "SPARK_SQL") + assert(engines(0).getSharelevel == "GROUP") + assert(engines(0).getSubdomain == "default") + + // kill the engine application + engineMgr.killApplication(ApplicationManagerInfo(None), id) + eventually(timeout(30.seconds), interval(100.milliseconds)) { + assert(engineMgr.getApplicationInfo(ApplicationManagerInfo(None), id).exists( + _.state == ApplicationState.NOT_FOUND)) } } } @@ -342,6 +493,7 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { conf.set(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0) conf.set(HighAvailabilityConf.HA_NAMESPACE, "kyuubi_test") conf.set(KyuubiConf.ENGINE_IDLE_TIMEOUT, 180000L) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") val engineSpace = DiscoveryPaths.makePath( s"kyuubi_test_${KYUUBI_VERSION}_CONNECTION_SPARK_SQL", @@ -349,14 +501,16 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { "") val id1 = UUID.randomUUID().toString - val engine1 = new EngineRef(conf.clone, Utils.currentUser, "grp", id1, null) + val engine1 = + new EngineRef(conf.clone, Utils.currentUser, PluginLoader.loadGroupProvider(conf), id1, null) val engineSpace1 = DiscoveryPaths.makePath( s"kyuubi_test_${KYUUBI_VERSION}_CONNECTION_SPARK_SQL", Utils.currentUser, id1) val id2 = UUID.randomUUID().toString - val engine2 = new EngineRef(conf.clone, Utils.currentUser, "grp", id2, null) + val engine2 = + new EngineRef(conf.clone, Utils.currentUser, PluginLoader.loadGroupProvider(conf), id2, null) val engineSpace2 = DiscoveryPaths.makePath( s"kyuubi_test_${KYUUBI_VERSION}_CONNECTION_SPARK_SQL", Utils.currentUser, @@ -371,11 +525,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { assert(client.pathExists(engineSpace1)) assert(client.pathExists(engineSpace2)) - val adminUser = Utils.currentUser - val encodeAuthorization = new String( - Base64.getEncoder.encode( - s"$adminUser:".getBytes()), - "UTF-8") val response = webTarget.path("api/v1/admin/engine") .queryParam("type", "spark_sql") .request(MediaType.APPLICATION_JSON_TYPE) @@ -396,15 +545,197 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { assert(result1.size == 1) // kill the engine application - engineMgr.killApplication(None, id1) - engineMgr.killApplication(None, id2) + engineMgr.killApplication(ApplicationManagerInfo(None), id1) + engineMgr.killApplication(ApplicationManagerInfo(None), id2) eventually(timeout(30.seconds), interval(100.milliseconds)) { - assert(engineMgr.getApplicationInfo(None, id1) + assert(engineMgr.getApplicationInfo(ApplicationManagerInfo(None), id1) .exists(_.state == ApplicationState.NOT_FOUND)) - assert(engineMgr.getApplicationInfo(None, id2) + assert(engineMgr.getApplicationInfo(ApplicationManagerInfo(None), id2) .exists(_.state == ApplicationState.NOT_FOUND)) } } } + test("list server") { + // Mock Kyuubi Server + val serverDiscovery = mock[ServiceDiscovery] + lenient.when(serverDiscovery.fe).thenReturn(fe) + val namespace = conf.get(HighAvailabilityConf.HA_NAMESPACE) + withDiscoveryClient(conf) { client => + client.registerService(conf, namespace, serverDiscovery) + + val response = webTarget.path("api/v1/admin/server") + .request() + .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") + .get + + assert(200 == response.getStatus) + val result = response.readEntity(new GenericType[Seq[ServerData]]() {}) + assert(result.size == 1) + val testServer = result.head + val restFrontendService = fe.asInstanceOf[KyuubiRestFrontendService] + + assert(namespace.equals(testServer.getNamespace.replaceFirst("/", ""))) + assert(restFrontendService.host.equals(testServer.getHost)) + assert(restFrontendService.connectionUrl.equals(testServer.getInstance())) + assert(!testServer.getAttributes.isEmpty) + val attributes = testServer.getAttributes + assert(attributes.containsKey("serviceUri") && + attributes.get("serviceUri").equals(fe.connectionUrl)) + assert(attributes.containsKey("version")) + assert(attributes.containsKey("sequence")) + assert("Running".equals(testServer.getStatus)) + } + } + + test("list all engine - user share level") { + val id = UUID.randomUUID().toString + conf.set(KyuubiConf.ENGINE_SHARE_LEVEL, USER.toString) + conf.set(KyuubiConf.ENGINE_TYPE, SPARK_SQL.toString) + conf.set(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0) + conf.set(HighAvailabilityConf.HA_NAMESPACE, "kyuubi_test") + conf.set(KyuubiConf.ENGINE_IDLE_TIMEOUT, 180000L) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val engine = + new EngineRef(conf.clone, Utils.currentUser, PluginLoader.loadGroupProvider(conf), id, null) + + val engineSpace = DiscoveryPaths.makePath( + s"kyuubi_test_${KYUUBI_VERSION}_USER_SPARK_SQL", + Utils.currentUser, + "") + + withDiscoveryClient(conf) { client => + engine.getOrCreate(client) + + assert(client.pathExists(engineSpace)) + assert(client.getChildren(engineSpace).size == 1) + + val response = webTarget.path("api/v1/admin/engine") + .queryParam("all", "true") + .request(MediaType.APPLICATION_JSON_TYPE) + .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") + .get + + assert(200 == response.getStatus) + val engines = response.readEntity(new GenericType[Seq[Engine]]() {}) + assert(engines.size == 1) + assert(engines(0).getEngineType == "SPARK_SQL") + assert(engines(0).getSharelevel == "USER") + assert(engines(0).getSubdomain == "default") + + // kill the engine application + engineMgr.killApplication(ApplicationManagerInfo(None), id) + eventually(timeout(30.seconds), interval(100.milliseconds)) { + assert(engineMgr.getApplicationInfo(ApplicationManagerInfo(None), id).exists( + _.state == ApplicationState.NOT_FOUND)) + } + } + } + + test("list all engines - group share level") { + val id = UUID.randomUUID().toString + conf.set(KyuubiConf.ENGINE_SHARE_LEVEL, GROUP.toString) + conf.set(KyuubiConf.ENGINE_TYPE, SPARK_SQL.toString) + conf.set(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0) + conf.set(HighAvailabilityConf.HA_NAMESPACE, "kyuubi_test") + conf.set(KyuubiConf.ENGINE_IDLE_TIMEOUT, 180000L) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val engine = + new EngineRef(conf.clone, Utils.currentUser, PluginLoader.loadGroupProvider(conf), id, null) + + val engineSpace = DiscoveryPaths.makePath( + s"kyuubi_test_${KYUUBI_VERSION}_GROUP_SPARK_SQL", + fe.asInstanceOf[KyuubiRestFrontendService].sessionManager.groupProvider.primaryGroup( + Utils.currentUser, + null), + "") + + withDiscoveryClient(conf) { client => + engine.getOrCreate(client) + + assert(client.pathExists(engineSpace)) + assert(client.getChildren(engineSpace).size == 1) + + val response = webTarget.path("api/v1/admin/engine") + .queryParam("all", "true") + .request(MediaType.APPLICATION_JSON_TYPE) + .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") + .get + + assert(200 == response.getStatus) + val engines = response.readEntity(new GenericType[Seq[Engine]]() {}) + assert(engines.size == 1) + assert(engines(0).getEngineType == "SPARK_SQL") + assert(engines(0).getSharelevel == "GROUP") + assert(engines(0).getSubdomain == "default") + + // kill the engine application + engineMgr.killApplication(ApplicationManagerInfo(None), id) + eventually(timeout(30.seconds), interval(100.milliseconds)) { + assert(engineMgr.getApplicationInfo(ApplicationManagerInfo(None), id).exists( + _.state == ApplicationState.NOT_FOUND)) + } + } + } + + test("list all engines - connection share level") { + conf.set(KyuubiConf.ENGINE_SHARE_LEVEL, CONNECTION.toString) + conf.set(KyuubiConf.ENGINE_TYPE, SPARK_SQL.toString) + conf.set(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0) + conf.set(HighAvailabilityConf.HA_NAMESPACE, "kyuubi_test") + conf.set(KyuubiConf.ENGINE_IDLE_TIMEOUT, 180000L) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + + val engineSpace = DiscoveryPaths.makePath( + s"kyuubi_test_${KYUUBI_VERSION}_CONNECTION_SPARK_SQL", + Utils.currentUser, + "") + + val id1 = UUID.randomUUID().toString + val engine1 = + new EngineRef(conf.clone, Utils.currentUser, PluginLoader.loadGroupProvider(conf), id1, null) + val engineSpace1 = DiscoveryPaths.makePath( + s"kyuubi_test_${KYUUBI_VERSION}_CONNECTION_SPARK_SQL", + Utils.currentUser, + id1) + + val id2 = UUID.randomUUID().toString + val engine2 = + new EngineRef(conf.clone, Utils.currentUser, PluginLoader.loadGroupProvider(conf), id2, null) + val engineSpace2 = DiscoveryPaths.makePath( + s"kyuubi_test_${KYUUBI_VERSION}_CONNECTION_SPARK_SQL", + Utils.currentUser, + id2) + + withDiscoveryClient(conf) { client => + engine1.getOrCreate(client) + engine2.getOrCreate(client) + + assert(client.pathExists(engineSpace)) + assert(client.getChildren(engineSpace).size == 2) + assert(client.pathExists(engineSpace1)) + assert(client.pathExists(engineSpace2)) + + val response = webTarget.path("api/v1/admin/engine") + .queryParam("all", "true") + .request(MediaType.APPLICATION_JSON_TYPE) + .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") + .get + assert(200 == response.getStatus) + val result = response.readEntity(new GenericType[Seq[Engine]]() {}) + assert(result.size == 2) + + // kill the engine application + engineMgr.killApplication(ApplicationManagerInfo(None), id1) + engineMgr.killApplication(ApplicationManagerInfo(None), id2) + eventually(timeout(30.seconds), interval(100.milliseconds)) { + assert(engineMgr.getApplicationInfo(ApplicationManagerInfo(None), id1) + .exists(_.state == ApplicationState.NOT_FOUND)) + assert(engineMgr.getApplicationInfo(ApplicationManagerInfo(None), id2) + .exists(_.state == ApplicationState.NOT_FOUND)) + } + } + } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/BatchesResourceSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/BatchesResourceSuite.scala index 055496ff322..7270f68d6b7 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/BatchesResourceSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/BatchesResourceSuite.scala @@ -21,7 +21,7 @@ import java.net.InetAddress import java.nio.file.Paths import java.util.{Base64, UUID} import javax.ws.rs.client.Entity -import javax.ws.rs.core.MediaType +import javax.ws.rs.core.{MediaType, Response} import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer @@ -31,41 +31,82 @@ import org.apache.hive.service.rpc.thrift.TProtocolVersion import org.glassfish.jersey.media.multipart.FormDataMultiPart import org.glassfish.jersey.media.multipart.file.FileDataBodyPart -import org.apache.kyuubi.{BatchTestHelper, KyuubiFunSuite, RestFrontendTestHelper} +import org.apache.kyuubi.{BatchTestHelper, KyuubiFunSuite, RestFrontendTestHelper, Utils} import org.apache.kyuubi.client.api.v1.dto._ import org.apache.kyuubi.client.util.BatchUtils import org.apache.kyuubi.client.util.BatchUtils._ import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ -import org.apache.kyuubi.engine.{ApplicationInfo, KyuubiApplicationManager} +import org.apache.kyuubi.engine.{ApplicationInfo, ApplicationManagerInfo, KyuubiApplicationManager} import org.apache.kyuubi.engine.spark.SparkBatchProcessBuilder import org.apache.kyuubi.metrics.{MetricsConstants, MetricsSystem} import org.apache.kyuubi.operation.{BatchJobSubmission, OperationState} import org.apache.kyuubi.operation.OperationState.OperationState -import org.apache.kyuubi.server.KyuubiRestFrontendService +import org.apache.kyuubi.server.{KyuubiBatchService, KyuubiRestFrontendService} import org.apache.kyuubi.server.http.authentication.AuthenticationHandler.AUTHORIZATION_HEADER -import org.apache.kyuubi.server.metadata.api.Metadata -import org.apache.kyuubi.service.authentication.KyuubiAuthenticationFactory -import org.apache.kyuubi.session.{KyuubiBatchSessionImpl, KyuubiSessionManager, SessionHandle, SessionType} - -class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper with BatchTestHelper { - override protected lazy val conf: KyuubiConf = KyuubiConf() - .set(KyuubiConf.ENGINE_SECURITY_ENABLED, true) - .set(KyuubiConf.ENGINE_SECURITY_SECRET_PROVIDER, "simple") - .set(KyuubiConf.SIMPLE_SECURITY_SECRET_PROVIDER_PROVIDER_SECRET, "ENGINE____SECRET") - .set( - KyuubiConf.SESSION_LOCAL_DIR_ALLOW_LIST, - Seq(Paths.get(sparkBatchTestResource.get).getParent.toString)) +import org.apache.kyuubi.server.metadata.api.{Metadata, MetadataFilter} +import org.apache.kyuubi.service.authentication.{InternalSecurityAccessor, KyuubiAuthenticationFactory} +import org.apache.kyuubi.session.{KyuubiBatchSession, KyuubiSessionManager, SessionHandle, SessionType} + +class BatchesV1ResourceSuite extends BatchesResourceSuiteBase { + override def batchVersion: String = "1" + + override def customConf: Map[String, String] = Map.empty +} + +class BatchesV2ResourceSuite extends BatchesResourceSuiteBase { + override def batchVersion: String = "2" + + override def customConf: Map[String, String] = Map( + KyuubiConf.METADATA_REQUEST_ASYNC_RETRY_ENABLED.key -> "false", + KyuubiConf.BATCH_SUBMITTER_ENABLED.key -> "true") + + override def afterEach(): Unit = { + val sessionManager = fe.be.sessionManager.asInstanceOf[KyuubiSessionManager] + val batchService = server.getServices.collectFirst { case b: KyuubiBatchService => b }.get + sessionManager.getBatchesFromMetadataStore(MetadataFilter(), 0, Int.MaxValue) + .foreach { batch => batchService.cancelUnscheduledBatch(batch.getId) } + super.afterEach() + sessionManager.allSessions().foreach { session => + Utils.tryLogNonFatalError { sessionManager.closeSession(session.handle) } + } + } +} + +abstract class BatchesResourceSuiteBase extends KyuubiFunSuite + with RestFrontendTestHelper + with BatchTestHelper { + + def batchVersion: String + + def customConf: Map[String, String] + + override protected lazy val conf: KyuubiConf = { + val kyuubiConf = KyuubiConf() + .set(KyuubiConf.ENGINE_SECURITY_ENABLED, true) + .set(KyuubiConf.ENGINE_SECURITY_SECRET_PROVIDER, "simple") + .set(KyuubiConf.SIMPLE_SECURITY_SECRET_PROVIDER_PROVIDER_SECRET, "ENGINE____SECRET") + .set(KyuubiConf.BATCH_IMPL_VERSION, batchVersion) + .set( + KyuubiConf.SESSION_LOCAL_DIR_ALLOW_LIST, + Set(Paths.get(sparkBatchTestResource.get).getParent.toString)) + customConf.foreach { case (k, v) => kyuubiConf.set(k, v) } + kyuubiConf + } + + override def beforeAll(): Unit = { + super.beforeAll() + InternalSecurityAccessor.initialize(conf, true) + } override def afterEach(): Unit = { val sessionManager = fe.be.sessionManager.asInstanceOf[KyuubiSessionManager] sessionManager.allSessions().foreach { session => sessionManager.closeSession(session.handle) } - sessionManager.getBatchesFromMetadataStore(null, null, null, 0, 0, 0, Int.MaxValue).foreach { - batch => - sessionManager.applicationManager.killApplication(None, batch.getId) - sessionManager.cleanupMetadata(batch.getId) + sessionManager.getBatchesFromMetadataStore(MetadataFilter(), 0, Int.MaxValue).foreach { batch => + sessionManager.applicationManager.killApplication(ApplicationManagerInfo(None), batch.getId) + sessionManager.cleanupMetadata(batch.getId) } } @@ -75,9 +116,18 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi val response = webTarget.path("api/v1/batches") .request(MediaType.APPLICATION_JSON_TYPE) .post(Entity.entity(requestObj, MediaType.APPLICATION_JSON_TYPE)) - assert(200 == response.getStatus) + assert(response.getStatus === 200) var batch = response.readEntity(classOf[Batch]) - assert(batch.getKyuubiInstance === fe.connectionUrl) + batchVersion match { + case "1" => + assert(batch.getKyuubiInstance === fe.connectionUrl) + case "2" if batch.getState === "INITIALIZED" => + assert(batch.getKyuubiInstance === null) + case "2" if batch.getState === "PENDING" => // batch picked by BatchService + assert(batch.getKyuubiInstance === fe.connectionUrl) + case _ => + fail(s"unexpected batch info, version: $batchVersion state: ${batch.getState}") + } assert(batch.getBatchType === "SPARK") assert(batch.getName === sparkBatchTestAppName) assert(batch.getCreateTime > 0) @@ -89,16 +139,25 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi val proxyUserResponse = webTarget.path("api/v1/batches") .request(MediaType.APPLICATION_JSON_TYPE) .post(Entity.entity(proxyUserRequest, MediaType.APPLICATION_JSON_TYPE)) - assert(405 == proxyUserResponse.getStatus) + assert(proxyUserResponse.getStatus === 405) var errorMessage = "Failed to validate proxy privilege of anonymous for root" assert(proxyUserResponse.readEntity(classOf[String]).contains(errorMessage)) - var getBatchResponse = webTarget.path(s"api/v1/batches/${batch.getId()}") + var getBatchResponse = webTarget.path(s"api/v1/batches/${batch.getId}") .request(MediaType.APPLICATION_JSON_TYPE) .get() - assert(200 == getBatchResponse.getStatus) + assert(getBatchResponse.getStatus === 200) batch = getBatchResponse.readEntity(classOf[Batch]) - assert(batch.getKyuubiInstance === fe.connectionUrl) + batchVersion match { + case "1" => + assert(batch.getKyuubiInstance === fe.connectionUrl) + case "2" if batch.getState === "INITIALIZED" => + assert(batch.getKyuubiInstance === null) + case "2" if batch.getState === "PENDING" => // batch picked by BatchService + assert(batch.getKyuubiInstance === fe.connectionUrl) + case _ => + fail(s"unexpected batch info, version: $batchVersion state: ${batch.getState}") + } assert(batch.getBatchType === "SPARK") assert(batch.getName === sparkBatchTestAppName) assert(batch.getCreateTime > 0) @@ -111,22 +170,26 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi getBatchResponse = webTarget.path(s"api/v1/batches/invalidBatchId") .request(MediaType.APPLICATION_JSON_TYPE) .get() - assert(404 == getBatchResponse.getStatus) + assert(getBatchResponse.getStatus === 404) // get batch log - var logResponse = webTarget.path(s"api/v1/batches/${batch.getId()}/localLog") - .queryParam("from", "0") - .queryParam("size", "1") - .request(MediaType.APPLICATION_JSON_TYPE) - .get() - var log = logResponse.readEntity(classOf[OperationLog]) + var logResponse: Response = null + var log: OperationLog = null + eventually(timeout(10.seconds), interval(1.seconds)) { + logResponse = webTarget.path(s"api/v1/batches/${batch.getId}/localLog") + .queryParam("from", "0") + .queryParam("size", "1") + .request(MediaType.APPLICATION_JSON_TYPE) + .get() + log = logResponse.readEntity(classOf[OperationLog]) + assert(log.getRowCount === 1) + } val head = log.getLogRowSet.asScala.head - assert(log.getRowCount == 1) val logs = new ArrayBuffer[String] logs.append(head) eventually(timeout(10.seconds), interval(1.seconds)) { - logResponse = webTarget.path(s"api/v1/batches/${batch.getId()}/localLog") + logResponse = webTarget.path(s"api/v1/batches/${batch.getId}/localLog") .queryParam("from", "-1") .queryParam("size", "100") .request(MediaType.APPLICATION_JSON_TYPE) @@ -138,67 +201,67 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi // check both kyuubi log and engine log assert( - logs.exists(_.contains("/bin/spark-submit")) && logs.exists( - _.contains(s"SparkContext: Submitted application: $sparkBatchTestAppName"))) + logs.exists(_.contains("/bin/spark-submit")) && + logs.exists(_.contains(s"SparkContext: Submitted application: $sparkBatchTestAppName"))) } // invalid user name val encodeAuthorization = - new String(Base64.getEncoder.encode(batch.getId().getBytes()), "UTF-8") - var deleteBatchResponse = webTarget.path(s"api/v1/batches/${batch.getId()}") + new String(Base64.getEncoder.encode(batch.getId.getBytes()), "UTF-8") + var deleteBatchResponse = webTarget.path(s"api/v1/batches/${batch.getId}") .request(MediaType.APPLICATION_JSON_TYPE) .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") .delete() - assert(405 == deleteBatchResponse.getStatus) - errorMessage = s"${batch.getId()} is not allowed to close the session belong to anonymous" + assert(deleteBatchResponse.getStatus === 405) + errorMessage = s"${batch.getId} is not allowed to close the session belong to anonymous" assert(deleteBatchResponse.readEntity(classOf[String]).contains(errorMessage)) // invalid batchId deleteBatchResponse = webTarget.path(s"api/v1/batches/notValidUUID") .request(MediaType.APPLICATION_JSON_TYPE) .delete() - assert(404 == deleteBatchResponse.getStatus) + assert(deleteBatchResponse.getStatus === 404) // non-existed batch session deleteBatchResponse = webTarget.path(s"api/v1/batches/${UUID.randomUUID().toString}") .request(MediaType.APPLICATION_JSON_TYPE) .delete() - assert(404 == deleteBatchResponse.getStatus) + assert(deleteBatchResponse.getStatus === 404) // invalid proxy user - deleteBatchResponse = webTarget.path(s"api/v1/batches/${batch.getId()}") + deleteBatchResponse = webTarget.path(s"api/v1/batches/${batch.getId}") .queryParam("hive.server2.proxy.user", "invalidProxy") .request(MediaType.APPLICATION_JSON_TYPE) .delete() - assert(405 == deleteBatchResponse.getStatus) + assert(deleteBatchResponse.getStatus === 405) errorMessage = "Failed to validate proxy privilege of anonymous for invalidProxy" assert(deleteBatchResponse.readEntity(classOf[String]).contains(errorMessage)) // check close batch session - deleteBatchResponse = webTarget.path(s"api/v1/batches/${batch.getId()}") + deleteBatchResponse = webTarget.path(s"api/v1/batches/${batch.getId}") .request(MediaType.APPLICATION_JSON_TYPE) .delete() - assert(200 == deleteBatchResponse.getStatus) + assert(deleteBatchResponse.getStatus === 200) val closeBatchResponse = deleteBatchResponse.readEntity(classOf[CloseBatchResponse]) // check state after close batch session - getBatchResponse = webTarget.path(s"api/v1/batches/${batch.getId()}") + getBatchResponse = webTarget.path(s"api/v1/batches/${batch.getId}") .request(MediaType.APPLICATION_JSON_TYPE) .get() - assert(200 == getBatchResponse.getStatus) + assert(getBatchResponse.getStatus === 200) batch = getBatchResponse.readEntity(classOf[Batch]) - assert(batch.getId == batch.getId()) + assert(batch.getId === batch.getId) if (closeBatchResponse.isSuccess) { - assert(batch.getState == "CANCELED") + assert(batch.getState === "CANCELED") } else { assert(batch.getState != "CANCELED") } // close the closed batch session - deleteBatchResponse = webTarget.path(s"api/v1/batches/${batch.getId()}") + deleteBatchResponse = webTarget.path(s"api/v1/batches/${batch.getId}") .request(MediaType.APPLICATION_JSON_TYPE) .delete() - assert(200 == deleteBatchResponse.getStatus) + assert(deleteBatchResponse.getStatus === 200) assert(!deleteBatchResponse.readEntity(classOf[CloseBatchResponse]).isSuccess) } @@ -213,17 +276,36 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi val response = webTarget.path("api/v1/batches") .request(MediaType.APPLICATION_JSON) .post(Entity.entity(multipart, MediaType.MULTIPART_FORM_DATA)) - assert(200 == response.getStatus) + assert(response.getStatus === 200) val batch = response.readEntity(classOf[Batch]) - assert(batch.getKyuubiInstance === fe.connectionUrl) + batchVersion match { + case "1" => + assert(batch.getKyuubiInstance === fe.connectionUrl) + case "2" if batch.getState === "INITIALIZED" => + assert(batch.getKyuubiInstance === null) + case "2" if batch.getState === "PENDING" => // batch picked by BatchService + assert(batch.getKyuubiInstance === fe.connectionUrl) + case _ => + fail(s"unexpected batch info, version: $batchVersion state: ${batch.getState}") + } assert(batch.getBatchType === "SPARK") assert(batch.getName === sparkBatchTestAppName) assert(batch.getCreateTime > 0) assert(batch.getEndTime === 0) - webTarget.path(s"api/v1/batches/${batch.getId()}").request( - MediaType.APPLICATION_JSON_TYPE).delete() - eventually(timeout(3.seconds)) { + // wait for batch be scheduled + eventually(timeout(5.seconds), interval(200.millis)) { + val resp = webTarget.path(s"api/v1/batches/${batch.getId}") + .request(MediaType.APPLICATION_JSON_TYPE) + .get() + val batchState = resp.readEntity(classOf[Batch]).getState + assert(batchState === "PENDING" || batchState === "RUNNING") + } + + webTarget.path(s"api/v1/batches/${batch.getId}") + .request(MediaType.APPLICATION_JSON_TYPE) + .delete() + eventually(timeout(5.seconds), interval(200.millis)) { assert(KyuubiApplicationManager.uploadWorkDir.toFile.listFiles().isEmpty) } } @@ -237,14 +319,14 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi val resp1 = webTarget.path("api/v1/batches") .request(MediaType.APPLICATION_JSON_TYPE) .post(Entity.entity(reqObj, MediaType.APPLICATION_JSON_TYPE)) - assert(200 == resp1.getStatus) + assert(resp1.getStatus === 200) val batch1 = resp1.readEntity(classOf[Batch]) assert(batch1.getId === batchId) val resp2 = webTarget.path("api/v1/batches") .request(MediaType.APPLICATION_JSON_TYPE) .post(Entity.entity(reqObj, MediaType.APPLICATION_JSON_TYPE)) - assert(200 == resp2.getStatus) + assert(resp2.getStatus === 200) val batch2 = resp2.readEntity(classOf[Batch]) assert(batch2.getId === batchId) @@ -268,20 +350,20 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .request(MediaType.APPLICATION_JSON_TYPE) .get() - assert(response.getStatus == 200) + assert(response.getStatus === 200) val getBatchListResponse = response.readEntity(classOf[GetBatchesResponse]) - assert(getBatchListResponse.getBatches.isEmpty && getBatchListResponse.getTotal == 0) + assert(getBatchListResponse.getBatches.isEmpty && getBatchListResponse.getTotal === 0) sessionManager.openBatchSession( "kyuubi", "kyuubi", InetAddress.getLocalHost.getCanonicalHostName, - Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString), newBatchRequest( "spark", sparkBatchTestResource.get, "", - "")) + "", + Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString))) sessionManager.openSession( TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V11, "", @@ -298,22 +380,22 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi "kyuubi", "kyuubi", InetAddress.getLocalHost.getCanonicalHostName, - Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString), newBatchRequest( "spark", sparkBatchTestResource.get, "", - "")) + "", + Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString))) sessionManager.openBatchSession( "kyuubi", "kyuubi", InetAddress.getLocalHost.getCanonicalHostName, - Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString), newBatchRequest( "spark", sparkBatchTestResource.get, "", - "")) + "", + Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString))) val response2 = webTarget.path("api/v1/batches") .queryParam("batchType", "spark") @@ -322,10 +404,10 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .request(MediaType.APPLICATION_JSON_TYPE) .get() - assert(response2.getStatus == 200) + assert(response2.getStatus === 200) val getBatchListResponse2 = response2.readEntity(classOf[GetBatchesResponse]) - assert(getBatchListResponse2.getTotal == 2) + assert(getBatchListResponse2.getTotal === 2) val response3 = webTarget.path("api/v1/batches") .queryParam("batchType", "spark") @@ -334,10 +416,10 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .request(MediaType.APPLICATION_JSON_TYPE) .get() - assert(response3.getStatus == 200) + assert(response3.getStatus === 200) val getBatchListResponse3 = response3.readEntity(classOf[GetBatchesResponse]) - assert(getBatchListResponse3.getTotal == 1) + assert(getBatchListResponse3.getTotal === 1) val response4 = webTarget.path("api/v1/batches") .queryParam("batchType", "spark") @@ -346,9 +428,9 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .request(MediaType.APPLICATION_JSON_TYPE) .get() - assert(response4.getStatus == 200) + assert(response4.getStatus === 200) val getBatchListResponse4 = response4.readEntity(classOf[GetBatchesResponse]) - assert(getBatchListResponse4.getBatches.isEmpty && getBatchListResponse4.getTotal == 0) + assert(getBatchListResponse4.getBatches.isEmpty && getBatchListResponse4.getTotal === 0) val response5 = webTarget.path("api/v1/batches") .queryParam("batchType", "mock") @@ -357,10 +439,10 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .request(MediaType.APPLICATION_JSON_TYPE) .get() - assert(response5.getStatus == 200) + assert(response5.getStatus === 200) val getBatchListResponse5 = response5.readEntity(classOf[GetBatchesResponse]) - assert(getBatchListResponse5.getTotal == 0) + assert(getBatchListResponse5.getTotal === 0) // TODO add more test when add more batchType val response6 = webTarget.path("api/v1/batches") @@ -369,10 +451,10 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .request(MediaType.APPLICATION_JSON_TYPE) .get() - assert(response6.getStatus == 200) + assert(response6.getStatus === 200) val getBatchListResponse6 = response6.readEntity(classOf[GetBatchesResponse]) - assert(getBatchListResponse6.getTotal == 1) - sessionManager.allSessions().map(_.close()) + assert(getBatchListResponse6.getTotal === 1) + sessionManager.allSessions().foreach(_.close()) val queryCreateTime = System.currentTimeMillis() val response7 = webTarget.path("api/v1/batches") @@ -412,7 +494,7 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi val response = webTarget.path("api/v1/batches") .request(MediaType.APPLICATION_JSON_TYPE) .post(Entity.entity(req, MediaType.APPLICATION_JSON_TYPE)) - assert(500 == response.getStatus) + assert(response.getStatus === 500) assert(response.readEntity(classOf[String]).contains(msg)) } @@ -425,7 +507,7 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi val response = webTarget.path(s"api/v1/batches/$batchId") .request(MediaType.APPLICATION_JSON_TYPE) .get - assert(404 == response.getStatus) + assert(response.getStatus === 404) assert(response.readEntity(classOf[String]).contains(msg)) } } @@ -434,7 +516,7 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi val sessionManager = fe.be.sessionManager.asInstanceOf[KyuubiSessionManager] val kyuubiInstance = fe.connectionUrl - assert(sessionManager.getOpenSessionCount == 0) + assert(sessionManager.getOpenSessionCount === 0) val batchId1 = UUID.randomUUID().toString val batchId2 = UUID.randomUUID().toString @@ -460,8 +542,8 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi sessionManager.insertMetadata(batchMetadata) sessionManager.insertMetadata(batchMetadata2) - assert(sessionManager.getBatchFromMetadataStore(batchId1).getState.equals("PENDING")) - assert(sessionManager.getBatchFromMetadataStore(batchId2).getState.equals("PENDING")) + assert(sessionManager.getBatchFromMetadataStore(batchId1).map(_.getState).contains("PENDING")) + assert(sessionManager.getBatchFromMetadataStore(batchId2).map(_.getState).contains("PENDING")) val sparkBatchProcessBuilder = new SparkBatchProcessBuilder( "kyuubi", @@ -477,7 +559,8 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi var applicationStatus: Option[ApplicationInfo] = None eventually(timeout(5.seconds)) { - applicationStatus = sessionManager.applicationManager.getApplicationInfo(None, batchId2) + applicationStatus = + sessionManager.applicationManager.getApplicationInfo(ApplicationManagerInfo(None), batchId2) assert(applicationStatus.isDefined) } @@ -493,12 +576,12 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi val restFe = fe.asInstanceOf[KyuubiRestFrontendService] restFe.recoverBatchSessions() - assert(sessionManager.getOpenSessionCount == 2) + assert(sessionManager.getOpenSessionCount === 2) val sessionHandle1 = SessionHandle.fromUUID(batchId1) val sessionHandle2 = SessionHandle.fromUUID(batchId2) - val session1 = sessionManager.getSession(sessionHandle1).asInstanceOf[KyuubiBatchSessionImpl] - val session2 = sessionManager.getSession(sessionHandle2).asInstanceOf[KyuubiBatchSessionImpl] + val session1 = sessionManager.getSession(sessionHandle1).asInstanceOf[KyuubiBatchSession] + val session2 = sessionManager.getSession(sessionHandle2).asInstanceOf[KyuubiBatchSession] assert(session1.createTime === batchMetadata.createTime) assert(session2.createTime === batchMetadata2.createTime) @@ -513,13 +596,9 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi } assert(sessionManager.getBatchesFromMetadataStore( - "SPARK", - null, - null, - 0, + MetadataFilter(engineType = "SPARK"), 0, - 0, - Int.MaxValue).size == 2) + Int.MaxValue).size === 2) } test("get local log internal redirection") { @@ -544,8 +623,17 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .queryParam("size", "1") .request(MediaType.APPLICATION_JSON_TYPE) .get() - assert(logResponse.getStatus == 404) - assert(logResponse.readEntity(classOf[String]).contains("No local log found")) + batchVersion match { + case "1" => + assert(logResponse.getStatus === 404) + assert(logResponse.readEntity(classOf[String]).contains("No local log found")) + case "2" => + assert(logResponse.getStatus === 200) + assert(logResponse.readEntity(classOf[String]).contains( + s"Batch ${metadata.identifier} is waiting for submitting")) + case _ => + fail(s"unexpected batch version: $batchVersion") + } // get local batch log that is not existing logResponse = webTarget.path(s"api/v1/batches/${UUID.randomUUID.toString}/localLog") @@ -553,7 +641,7 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .queryParam("size", "1") .request(MediaType.APPLICATION_JSON_TYPE) .get() - assert(logResponse.getStatus == 404) + assert(logResponse.getStatus === 404) assert(logResponse.readEntity(classOf[String]).contains("Invalid batchId")) val metadata2 = metadata.copy( @@ -567,7 +655,7 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .queryParam("size", "1") .request(MediaType.APPLICATION_JSON_TYPE) .get() - assert(logResponse.getStatus == 500) + assert(logResponse.getStatus === 500) assert(logResponse.readEntity(classOf[String]).contains( s"Api request failed for http://${metadata2.kyuubiInstance}")) } @@ -596,7 +684,7 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .request(MediaType.APPLICATION_JSON_TYPE) .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") .delete() - assert(deleteResp.getStatus == 200) + assert(deleteResp.getStatus === 200) assert(!deleteResp.readEntity(classOf[CloseBatchResponse]).isSuccess) // delete batch that is not existing @@ -604,7 +692,7 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .request(MediaType.APPLICATION_JSON_TYPE) .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") .delete() - assert(deleteResp.getStatus == 404) + assert(deleteResp.getStatus === 404) assert(deleteResp.readEntity(classOf[String]).contains("Invalid batchId:")) val metadata2 = metadata.copy( @@ -617,7 +705,7 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .request(MediaType.APPLICATION_JSON_TYPE) .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization") .delete() - assert(deleteResp.getStatus == 200) + assert(deleteResp.getStatus === 200) assert(deleteResp.readEntity(classOf[CloseBatchResponse]).getMsg.contains( s"Api request failed for http://${metadata2.kyuubiInstance}")) } @@ -632,10 +720,12 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .request(MediaType.APPLICATION_JSON_TYPE) .header(conf.get(FRONTEND_PROXY_HTTP_CLIENT_IP_HEADER), realClientIp) .post(Entity.entity(requestObj, MediaType.APPLICATION_JSON_TYPE)) - assert(200 == response.getStatus) + assert(response.getStatus === 200) val batch = response.readEntity(classOf[Batch]) - val batchSession = sessionManager.getBatchSessionImpl(SessionHandle.fromUUID(batch.getId)) - assert(batchSession.ipAddress === realClientIp) + eventually(timeout(10.seconds)) { + val batchSession = sessionManager.getBatchSession(SessionHandle.fromUUID(batch.getId)) + assert(batchSession.map(_.ipAddress).contains(realClientIp)) + } } test("expose the metrics with operation type and current state") { @@ -645,42 +735,47 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi assert(getBatchJobSubmissionStateCounter(OperationState.RUNNING) === 0) } - val originalTerminateCounter = getBatchJobSubmissionStateCounter(OperationState.CANCELED) + - getBatchJobSubmissionStateCounter(OperationState.FINISHED) + - getBatchJobSubmissionStateCounter(OperationState.ERROR) - - val requestObj = newSparkBatchRequest(Map("spark.master" -> "local")) - - val response = webTarget.path("api/v1/batches") - .request(MediaType.APPLICATION_JSON_TYPE) - .post(Entity.entity(requestObj, MediaType.APPLICATION_JSON_TYPE)) - assert(200 == response.getStatus) - var batch = response.readEntity(classOf[Batch]) + val originalTerminatedCount = + getBatchJobSubmissionStateCounter(OperationState.CANCELED) + + getBatchJobSubmissionStateCounter(OperationState.FINISHED) + + getBatchJobSubmissionStateCounter(OperationState.ERROR) - assert(getBatchJobSubmissionStateCounter(OperationState.INITIALIZED) + - getBatchJobSubmissionStateCounter(OperationState.PENDING) + - getBatchJobSubmissionStateCounter(OperationState.RUNNING) === 1) + val batchId = UUID.randomUUID().toString + val requestObj = newSparkBatchRequest(Map( + "spark.master" -> "local", + KYUUBI_BATCH_ID_KEY -> batchId)) - while (batch.getState == OperationState.PENDING.toString || - batch.getState == OperationState.RUNNING.toString) { - val deleteResp = webTarget.path(s"api/v1/batches/${batch.getId}") + eventually(timeout(10.seconds)) { + val response = webTarget.path("api/v1/batches") .request(MediaType.APPLICATION_JSON_TYPE) - .delete() - assert(200 == deleteResp.getStatus) + .post(Entity.entity(requestObj, MediaType.APPLICATION_JSON_TYPE)) + assert(response.getStatus === 200) + val batch = response.readEntity(classOf[Batch]) + assert(batch.getState === OperationState.PENDING.toString || + batch.getState === OperationState.RUNNING.toString) + } - batch = webTarget.path(s"api/v1/batches/${batch.getId}") - .request(MediaType.APPLICATION_JSON_TYPE) - .get().readEntity(classOf[Batch]) + eventually(timeout(10.seconds)) { + assert(getBatchJobSubmissionStateCounter(OperationState.INITIALIZED) + + getBatchJobSubmissionStateCounter(OperationState.PENDING) + + getBatchJobSubmissionStateCounter(OperationState.RUNNING) === 1) } - assert(getBatchJobSubmissionStateCounter(OperationState.INITIALIZED) === 0) - assert(getBatchJobSubmissionStateCounter(OperationState.PENDING) === 0) - assert(getBatchJobSubmissionStateCounter(OperationState.RUNNING) === 0) + val deleteResp = webTarget.path(s"api/v1/batches/$batchId") + .request(MediaType.APPLICATION_JSON_TYPE) + .delete() + assert(deleteResp.getStatus === 200) - val currentTeminateCount = getBatchJobSubmissionStateCounter(OperationState.CANCELED) + + eventually(timeout(10.seconds)) { + assert(getBatchJobSubmissionStateCounter(OperationState.INITIALIZED) === 0) + assert(getBatchJobSubmissionStateCounter(OperationState.PENDING) === 0) + assert(getBatchJobSubmissionStateCounter(OperationState.RUNNING) === 0) + } + + val currentTerminatedCount = getBatchJobSubmissionStateCounter(OperationState.CANCELED) + getBatchJobSubmissionStateCounter(OperationState.FINISHED) + getBatchJobSubmissionStateCounter(OperationState.ERROR) - assert(currentTeminateCount - originalTerminateCounter === 1) + assert(currentTerminatedCount - originalTerminatedCount === 1) } private def getBatchJobSubmissionStateCounter(state: OperationState): Long = { @@ -694,16 +789,45 @@ class BatchesResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper wi .be.sessionManager.asInstanceOf[KyuubiSessionManager] val e = intercept[Exception] { + val conf = Map( + KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString, + "spark.jars" -> "disAllowPath") sessionManager.openBatchSession( "kyuubi", "kyuubi", InetAddress.getLocalHost.getCanonicalHostName, - Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString), - newSparkBatchRequest(Map("spark.jars" -> "disAllowPath"))) + newSparkBatchRequest(conf)) } - val sessionHandleRegex = "\\[[\\S]*\\]".r + val sessionHandleRegex = "\\[\\S*]".r val batchId = sessionHandleRegex.findFirstMatchIn(e.getMessage).get.group(0) - .replaceAll("\\[", "").replaceAll("\\]", "") - assert(sessionManager.getBatchMetadata(batchId).state == "CANCELED") + .replaceAll("\\[", "").replaceAll("]", "") + assert(sessionManager.getBatchMetadata(batchId).map(_.state).contains("CANCELED")) + } + + test("get batch list with batch name filter condition") { + val sessionManager = server.frontendServices.head + .be.sessionManager.asInstanceOf[KyuubiSessionManager] + sessionManager.allSessions().foreach(_.close()) + + val uniqueName = UUID.randomUUID().toString + sessionManager.openBatchSession( + "kyuubi", + "kyuubi", + InetAddress.getLocalHost.getCanonicalHostName, + newBatchRequest( + "spark", + sparkBatchTestResource.get, + "", + uniqueName, + Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString))) + + val response = webTarget.path("api/v1/batches") + .queryParam("batchName", uniqueName) + .request(MediaType.APPLICATION_JSON_TYPE) + .get() + + assert(response.getStatus == 200) + val getBatchListResponse = response.readEntity(classOf[GetBatchesResponse]) + assert(getBatchListResponse.getTotal == 1) } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/OperationsResourceSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/OperationsResourceSuite.scala index 51701b231a0..72cd4d87db1 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/OperationsResourceSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/OperationsResourceSuite.scala @@ -102,6 +102,47 @@ class OperationsResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper val logRowSet = response.readEntity(classOf[OperationLog]) assert(logRowSet.getLogRowSet.asScala.exists(_.contains("show tables"))) assert(logRowSet.getRowCount === 10) + + val response2 = webTarget.path( + s"api/v1/operations/$opHandleStr/log") + .queryParam("maxrows", "1000") + .queryParam("fetchorientation", "FETCH_NEXT") + .request(MediaType.APPLICATION_JSON).get() + assert(200 == response2.getStatus) + val logCount = response2.readEntity(classOf[OperationLog]).getRowCount + val totalLogCoung = logCount + 10 + assert(logCount > 0) + + val response3 = webTarget.path( + s"api/v1/operations/$opHandleStr/log") + .queryParam("maxrows", "1000") + .request(MediaType.APPLICATION_JSON).get() + assert(200 == response3.getStatus) + assert(response3.readEntity(classOf[OperationLog]).getRowCount == 0) + + val response4 = webTarget.path( + s"api/v1/operations/$opHandleStr/log") + .queryParam("maxrows", "10") + .queryParam("fetchorientation", "FETCH_FIRST") + .request(MediaType.APPLICATION_JSON).get() + assert(200 == response4.getStatus) + assert(response4.readEntity(classOf[OperationLog]).getRowCount == 10) + + val response5 = webTarget.path( + s"api/v1/operations/$opHandleStr/log") + .queryParam("maxrows", "10") + .queryParam("fetchorientation", "FETCH_PRIOR") + .request(MediaType.APPLICATION_JSON).get() + assert(400 == response5.getStatus) + assert(response5.getStatusInfo.getReasonPhrase == "Bad Request") + + val response6 = webTarget.path( + s"api/v1/operations/$opHandleStr/log") + .queryParam("maxrows", "1000") + .queryParam("fetchorientation", "FETCH_NEXT") + .request(MediaType.APPLICATION_JSON).get() + assert(200 == response6.getStatus) + assert(response6.readEntity(classOf[OperationLog]).getRowCount == totalLogCoung - 10) } test("test get result row set") { diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/SessionsResourceSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/SessionsResourceSuite.scala index dcd4d590463..b58e87bc8c2 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/SessionsResourceSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/SessionsResourceSuite.scala @@ -19,7 +19,7 @@ package org.apache.kyuubi.server.api.v1 import java.nio.charset.StandardCharsets import java.util -import java.util.Base64 +import java.util.{Base64, Collections} import javax.ws.rs.client.Entity import javax.ws.rs.core.{GenericType, MediaType, Response} @@ -28,11 +28,11 @@ import scala.collection.JavaConverters._ import org.scalatest.time.SpanSugar.convertIntToGrainOfTime import org.apache.kyuubi.{KyuubiFunSuite, RestFrontendTestHelper} -import org.apache.kyuubi.client.api.v1.dto._ +import org.apache.kyuubi.client.api.v1.dto +import org.apache.kyuubi.client.api.v1.dto.{SessionData, _} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_CONNECTION_URL_KEY import org.apache.kyuubi.engine.ShareLevel -import org.apache.kyuubi.events.KyuubiSessionEvent import org.apache.kyuubi.metrics.{MetricsConstants, MetricsSystem} import org.apache.kyuubi.operation.OperationHandle import org.apache.kyuubi.server.http.authentication.AuthenticationHandler.AUTHORIZATION_HEADER @@ -48,9 +48,7 @@ class SessionsResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { } test("open/close and count session") { - val requestObj = new SessionOpenRequest( - 1, - Map("testConfig" -> "testValue").asJava) + val requestObj = new SessionOpenRequest(Map("testConfig" -> "testValue").asJava) var response = webTarget.path("api/v1/sessions") .request(MediaType.APPLICATION_JSON_TYPE) @@ -81,9 +79,7 @@ class SessionsResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { } test("getSessionList") { - val requestObj = new SessionOpenRequest( - 1, - Map("testConfig" -> "testValue").asJava) + val requestObj = new SessionOpenRequest(Map("testConfig" -> "testValue").asJava) var response = webTarget.path("api/v1/sessions") .request(MediaType.APPLICATION_JSON_TYPE) @@ -109,9 +105,7 @@ class SessionsResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { } test("get session event") { - val sessionOpenRequest = new SessionOpenRequest( - 1, - Map("testConfig" -> "testValue").asJava) + val sessionOpenRequest = new SessionOpenRequest(Map("testConfig" -> "testValue").asJava) val user = "kyuubi".getBytes() @@ -127,10 +121,10 @@ class SessionsResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { // get session event var response = webTarget.path(s"api/v1/sessions/$sessionHandle").request().get() assert(200 == sessionOpenResp.getStatus) - val sessions = response.readEntity(classOf[KyuubiSessionEvent]) - assert(sessions.conf("testConfig").equals("testValue")) - assert(sessions.sessionType.equals(SessionType.INTERACTIVE.toString)) - assert(sessions.user.equals("kyuubi")) + val sessions = response.readEntity(classOf[dto.KyuubiSessionEvent]) + assert(sessions.getConf.get("testConfig").equals("testValue")) + assert(sessions.getSessionType.equals(SessionType.INTERACTIVE.toString)) + assert(sessions.getUser.equals("kyuubi")) // close an opened session response = webTarget.path(s"api/v1/sessions/$sessionHandle").request().delete() @@ -147,9 +141,9 @@ class SessionsResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { val failedConnections = MetricsSystem.counterValue(MetricsConstants.REST_CONN_FAIL).getOrElse(0L) - val requestObj = new SessionOpenRequest( - 1, - Map("testConfig" -> "testValue", KyuubiConf.SERVER_INFO_PROVIDER.key -> "SERVER").asJava) + val requestObj = new SessionOpenRequest(Map( + "testConfig" -> "testValue", + KyuubiConf.SERVER_INFO_PROVIDER.key -> "SERVER").asJava) var response: Response = webTarget.path("api/v1/sessions") .request(MediaType.APPLICATION_JSON_TYPE) @@ -188,9 +182,7 @@ class SessionsResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { } test("submit operation and get operation handle") { - val requestObj = new SessionOpenRequest( - 1, - Map("testConfig" -> "testValue").asJava) + val requestObj = new SessionOpenRequest(Map("testConfig" -> "testValue").asJava) var response: Response = webTarget.path("api/v1/sessions") .request(MediaType.APPLICATION_JSON_TYPE) @@ -200,7 +192,7 @@ class SessionsResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { val pathPrefix = s"api/v1/sessions/$sessionHandle" - val statementReq = new StatementRequest("show tables", true, 3000) + var statementReq = new StatementRequest("show tables", true, 3000) response = webTarget .path(s"$pathPrefix/operations/statement").request(MediaType.APPLICATION_JSON_TYPE) .post(Entity.entity(statementReq, MediaType.APPLICATION_JSON_TYPE)) @@ -208,6 +200,18 @@ class SessionsResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { var operationHandle = response.readEntity(classOf[OperationHandle]) assert(operationHandle !== null) + statementReq = new StatementRequest( + "spark.sql(\"show tables\")", + true, + 3000, + Collections.singletonMap(KyuubiConf.OPERATION_LANGUAGE.key, "SCALA")) + response = webTarget + .path(s"$pathPrefix/operations/statement").request(MediaType.APPLICATION_JSON_TYPE) + .post(Entity.entity(statementReq, MediaType.APPLICATION_JSON_TYPE)) + assert(200 == response.getStatus) + operationHandle = response.readEntity(classOf[OperationHandle]) + assert(operationHandle !== null) + response = webTarget.path(s"$pathPrefix/operations/typeInfo").request() .post(Entity.entity(null, MediaType.APPLICATION_JSON_TYPE)) assert(200 == response.getStatus) @@ -280,11 +284,9 @@ class SessionsResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { } test("post session exception if failed to open engine session") { - val requestObj = new SessionOpenRequest( - 1, - Map( - "spark.master" -> "invalid", - KyuubiConf.ENGINE_SHARE_LEVEL.key -> ShareLevel.CONNECTION.toString).asJava) + val requestObj = new SessionOpenRequest(Map( + "spark.master" -> "invalid", + KyuubiConf.ENGINE_SHARE_LEVEL.key -> ShareLevel.CONNECTION.toString).asJava) var response = webTarget.path("api/v1/sessions") .request(MediaType.APPLICATION_JSON_TYPE) @@ -299,4 +301,89 @@ class SessionsResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper { assert(sessionEvent.contains("The last 10 line(s) of log are:")) } } + + test("fix kyuubi session leak caused by engine stop") { + // clean up all sessions + var response = webTarget.path("api/v1/sessions").request().get() + val sessionDataList = response.readEntity(new GenericType[List[SessionData]]() {}) + sessionDataList.foreach(sessionData => { + response = webTarget.path(s"api/v1/sessions/${sessionData.getIdentifier}") + .request().delete() + assert(200 == response.getStatus) + }) + + // open a session + val requestObj = new SessionOpenRequest(Map( + KyuubiConf.ENGINE_ALIVE_PROBE_ENABLED.key -> "true", + KyuubiConf.ENGINE_ALIVE_PROBE_INTERVAL.key -> "5000", + KyuubiConf.ENGINE_ALIVE_TIMEOUT.key -> "3000").asJava) + response = webTarget.path("api/v1/sessions") + .request(MediaType.APPLICATION_JSON_TYPE) + .post(Entity.entity(requestObj, MediaType.APPLICATION_JSON_TYPE)) + val sessionHandle = response.readEntity(classOf[SessionHandle]).getIdentifier + val pathPrefix = s"api/v1/sessions/$sessionHandle" + + response = webTarget.path("api/v1/sessions/count").request().get() + val openedSessionCount = response.readEntity(classOf[SessionOpenCount]) + assert(openedSessionCount.getOpenSessionCount == 1) + + var statementReq = new StatementRequest( + "spark.sql(\"show tables\")", + true, + 3000, + Collections.singletonMap(KyuubiConf.OPERATION_LANGUAGE.key, "SCALA")) + response = webTarget + .path(s"$pathPrefix/operations/statement").request(MediaType.APPLICATION_JSON_TYPE) + .post(Entity.entity(statementReq, MediaType.APPLICATION_JSON_TYPE)) + assert(200 == response.getStatus) + var operationHandle = response.readEntity(classOf[OperationHandle]) + assert(operationHandle !== null) + assert(openedSessionCount.getOpenSessionCount == 1) + + statementReq = new StatementRequest( + "spark.close()", + true, + 3000, + Collections.singletonMap(KyuubiConf.OPERATION_LANGUAGE.key, "SCALA")) + response = webTarget + .path(s"$pathPrefix/operations/statement").request(MediaType.APPLICATION_JSON_TYPE) + .post(Entity.entity(statementReq, MediaType.APPLICATION_JSON_TYPE)) + assert(200 == response.getStatus) + operationHandle = response.readEntity(classOf[OperationHandle]) + assert(operationHandle !== null) + + // Because the engine has stopped (due to spark.close), the Spark session is closed. + // Therefore, the Kyuubi session count should be 0. + eventually(timeout(30.seconds), interval(1000.milliseconds)) { + var response = webTarget.path("api/v1/sessions/count").request().get() + val openedSessionCount = response.readEntity(classOf[SessionOpenCount]) + assert(openedSessionCount.getOpenSessionCount == 0) + + response = webTarget.path("api/v1/sessions").request().get() + val sessionDataList = response.readEntity(new GenericType[List[SessionData]]() {}) + assert(sessionDataList.isEmpty) + } + } + + test("list all type operations under session") { + val sessionOpenRequest = new SessionOpenRequest(Map("testConfig" -> "testValue").asJava) + val user = "kyuubi".getBytes() + val sessionOpenResp = webTarget.path("api/v1/sessions") + .request(MediaType.APPLICATION_JSON_TYPE) + .header( + AUTHORIZATION_HEADER, + s"Basic ${new String(Base64.getEncoder.encode(user), StandardCharsets.UTF_8)}") + .post(Entity.entity(sessionOpenRequest, MediaType.APPLICATION_JSON_TYPE)) + + val sessionHandle = sessionOpenResp.readEntity(classOf[SessionHandle]).getIdentifier + + // get operations belongs to specified session + val response = webTarget + .path(s"api/v1/sessions/${sessionHandle.toString}/operations") + .request().get() + assert(200 == response.getStatus) + val operations = response.readEntity(new GenericType[Seq[OperationData]]() {}) + assert(operations.size == 1) + assert(sessionHandle.toString.equals(operations.head.getSessionId)) + } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/http/authentication/AuthenticationFilterSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/http/authentication/AuthenticationFilterSuite.scala index 9a79d7922b5..de4b056ff46 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/http/authentication/AuthenticationFilterSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/http/authentication/AuthenticationFilterSuite.scala @@ -25,7 +25,7 @@ class AuthenticationFilterSuite extends KyuubiFunSuite { test("add auth handler and destroy") { val filter = new AuthenticationFilter(KyuubiConf()) filter.addAuthHandler(new BasicAuthenticationHandler(null)) - assert(filter.authSchemeHandlers.size == 0) + assert(filter.authSchemeHandlers.isEmpty) filter.addAuthHandler(new BasicAuthenticationHandler(AuthTypes.LDAP)) assert(filter.authSchemeHandlers.size == 1) filter.addAuthHandler(new BasicAuthenticationHandler(AuthTypes.LDAP)) diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/metadata/MetadataManagerSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/metadata/MetadataManagerSuite.scala index 75c935a3de2..564b5ebe939 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/metadata/MetadataManagerSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/metadata/MetadataManagerSuite.scala @@ -28,7 +28,7 @@ import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.config.KyuubiConf._ import org.apache.kyuubi.metrics.{MetricsConstants, MetricsSystem} import org.apache.kyuubi.metrics.MetricsConstants._ -import org.apache.kyuubi.server.metadata.api.Metadata +import org.apache.kyuubi.server.metadata.api.{Metadata, MetadataFilter} import org.apache.kyuubi.session.SessionType class MetadataManagerSuite extends KyuubiFunSuite { @@ -66,7 +66,7 @@ class MetadataManagerSuite extends KyuubiFunSuite { retryRef.addRetryingMetadataRequest(UpdateMetadata(metadataToUpdate)) eventually(timeout(3.seconds)) { assert(retryRef.hasRemainingRequests()) - assert(metadataManager.getBatch(metadata.identifier).getState === "PENDING") + assert(metadataManager.getBatch(metadata.identifier).map(_.getState).contains("PENDING")) } val metadata2 = metadata.copy(identifier = UUID.randomUUID().toString) @@ -84,7 +84,7 @@ class MetadataManagerSuite extends KyuubiFunSuite { eventually(timeout(3.seconds)) { assert(!retryRef2.hasRemainingRequests()) - assert(metadataManager.getBatch(metadata2.identifier).getState === "RUNNING") + assert(metadataManager.getBatch(metadata2.identifier).map(_.getState).contains("RUNNING")) } metadataManager.identifierRequestsAsyncRetryRefs.clear() @@ -116,7 +116,7 @@ class MetadataManagerSuite extends KyuubiFunSuite { MetricsSystem.meterValue(MetricsConstants.METADATA_REQUEST_RETRYING) .getOrElse(0L) - retryingRequests === 0) - val invalidMetadata = metadata.copy(kyuubiInstance = null) + val invalidMetadata = metadata.copy(state = null) intercept[Exception](metadataManager.insertMetadata(invalidMetadata, false)) assert( MetricsSystem.meterValue(MetricsConstants.METADATA_REQUEST_TOTAL) @@ -157,7 +157,7 @@ class MetadataManagerSuite extends KyuubiFunSuite { metadataManager.start() f(metadataManager) } finally { - metadataManager.getBatches(null, null, null, 0, 0, 0, Int.MaxValue).foreach { batch => + metadataManager.getBatches(MetadataFilter(), 0, Int.MaxValue).foreach { batch => metadataManager.cleanupMetadataById(batch.getId) } // ensure no metadata request leak diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/metadata/jdbc/JDBCMetadataStoreSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/metadata/jdbc/JDBCMetadataStoreSuite.scala index aa53af3a908..2ee082a1d2b 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/metadata/jdbc/JDBCMetadataStoreSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/metadata/jdbc/JDBCMetadataStoreSuite.scala @@ -30,7 +30,7 @@ import org.apache.kyuubi.session.SessionType class JDBCMetadataStoreSuite extends KyuubiFunSuite { private val conf = KyuubiConf() - .set(METADATA_STORE_JDBC_DATABASE_TYPE, DatabaseType.DERBY.toString) + .set(METADATA_STORE_JDBC_DATABASE_TYPE, DatabaseType.SQLITE.toString) .set(METADATA_STORE_JDBC_DATABASE_SCHEMA_INIT, true) .set(s"$METADATA_STORE_JDBC_DATASOURCE_PREFIX.connectionTimeout", "3000") .set(s"$METADATA_STORE_JDBC_DATASOURCE_PREFIX.maximumPoolSize", "99") @@ -39,11 +39,7 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { override def afterAll(): Unit = { super.afterAll() - jdbcMetadataStore.getMetadataList( - MetadataFilter(), - 0, - Int.MaxValue, - true).foreach { + jdbcMetadataStore.getMetadataList(MetadataFilter(), 0, Int.MaxValue).foreach { batch => jdbcMetadataStore.cleanupMetadataByIdentifier(batch.identifier) } @@ -82,28 +78,18 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { engineType = "spark", clusterManager = Some("local")) - var batchStateOnlyMetadata = batchMetadata.copy( - resource = null, - className = null, - requestConf = Map.empty, - requestArgs = Seq.empty) - jdbcMetadataStore.insertMetadata(batchMetadata) - assert(jdbcMetadataStore.getMetadata(batchId, true) != batchStateOnlyMetadata) - assert(jdbcMetadataStore.getMetadata(batchId, false) != batchMetadata) // the engine type is formatted with UPPER batchMetadata = batchMetadata.copy(engineType = "SPARK") - batchStateOnlyMetadata = batchStateOnlyMetadata.copy(engineType = "SPARK") - assert(jdbcMetadataStore.getMetadata(batchId, true) == batchStateOnlyMetadata) - assert(jdbcMetadataStore.getMetadata(batchId, false) == batchMetadata) + assert(jdbcMetadataStore.getMetadata(batchId) == batchMetadata) jdbcMetadataStore.cleanupMetadataByIdentifier(batchId) - assert(jdbcMetadataStore.getMetadata(batchId, true) == null) + assert(jdbcMetadataStore.getMetadata(batchId) == null) jdbcMetadataStore.insertMetadata(batchMetadata) - val batchState2 = batchStateOnlyMetadata.copy(identifier = UUID.randomUUID().toString) + val batchState2 = batchMetadata.copy(identifier = UUID.randomUUID().toString) jdbcMetadataStore.insertMetadata(batchState2) var batches = @@ -112,9 +98,8 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { sessionType = SessionType.BATCH, engineType = "Spark"), 0, - 1, - true) - assert(batches == Seq(batchStateOnlyMetadata)) + 1) + assert(batches == Seq(batchMetadata)) batches = jdbcMetadataStore.getMetadataList( MetadataFilter( @@ -122,9 +107,8 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { engineType = "Spark", username = "kyuubi"), 0, - Int.MaxValue, - true) - assert(batches == Seq(batchStateOnlyMetadata, batchState2)) + Int.MaxValue) + assert(batches == Seq(batchMetadata, batchState2)) jdbcMetadataStore.cleanupMetadataByIdentifier(batchState2.identifier) @@ -135,8 +119,7 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { username = "kyuubi", state = "PENDING"), 0, - Int.MaxValue, - true) + Int.MaxValue) assert(batches.isEmpty) batches = jdbcMetadataStore.getMetadataList( @@ -146,9 +129,8 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { username = "kyuubi", state = "PENDING"), 0, - Int.MaxValue, - true) - assert(batches == Seq(batchStateOnlyMetadata)) + Int.MaxValue) + assert(batches == Seq(batchMetadata)) batches = jdbcMetadataStore.getMetadataList( MetadataFilter( @@ -157,8 +139,7 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { username = "kyuubi", state = "RUNNING"), 0, - Int.MaxValue, - true) + Int.MaxValue) assert(batches.isEmpty) batches = jdbcMetadataStore.getMetadataList( @@ -168,8 +149,7 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { username = "no_kyuubi", state = "PENDING"), 0, - Int.MaxValue, - true) + Int.MaxValue) assert(batches.isEmpty) batches = jdbcMetadataStore.getMetadataList( @@ -178,31 +158,27 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { engineType = "SPARK", state = "PENDING"), 0, - Int.MaxValue, - true) - assert(batches == Seq(batchStateOnlyMetadata)) + Int.MaxValue) + assert(batches == Seq(batchMetadata)) batches = jdbcMetadataStore.getMetadataList( MetadataFilter(sessionType = SessionType.BATCH), 0, - Int.MaxValue, - true) - assert(batches == Seq(batchStateOnlyMetadata)) + Int.MaxValue) + assert(batches == Seq(batchMetadata)) batches = jdbcMetadataStore.getMetadataList( MetadataFilter( sessionType = SessionType.BATCH, peerInstanceClosed = true), 0, - Int.MaxValue, - true) + Int.MaxValue) assert(batches.isEmpty) jdbcMetadataStore.updateMetadata(Metadata( - identifier = batchStateOnlyMetadata.identifier, + identifier = batchMetadata.identifier, peerInstanceClosed = true)) - batchStateOnlyMetadata = batchStateOnlyMetadata.copy(peerInstanceClosed = true) batchMetadata = batchMetadata.copy(peerInstanceClosed = true) batches = jdbcMetadataStore.getMetadataList( @@ -210,9 +186,8 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { sessionType = SessionType.BATCH, peerInstanceClosed = true), 0, - Int.MaxValue, - true) - assert(batches === Seq(batchStateOnlyMetadata)) + Int.MaxValue) + assert(batches === Seq(batchMetadata)) var batchesToRecover = jdbcMetadataStore.getMetadataList( MetadataFilter( @@ -220,8 +195,7 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { state = "PENDING", kyuubiInstance = kyuubiInstance), 0, - Int.MaxValue, - false) + Int.MaxValue) assert(batchesToRecover == Seq(batchMetadata)) batchesToRecover = jdbcMetadataStore.getMetadataList( @@ -230,11 +204,10 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { state = "RUNNING", kyuubiInstance = kyuubiInstance), 0, - Int.MaxValue, - false) + Int.MaxValue) assert(batchesToRecover.isEmpty) - var newBatchState = batchStateOnlyMetadata.copy( + var newBatchState = batchMetadata.copy( state = "RUNNING", engineId = "app_id", engineName = "app_name", @@ -242,12 +215,12 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { engineState = "RUNNING", engineError = None) jdbcMetadataStore.updateMetadata(newBatchState) - assert(jdbcMetadataStore.getMetadata(batchId, true) == newBatchState) + assert(jdbcMetadataStore.getMetadata(batchId) == newBatchState) newBatchState = newBatchState.copy(state = "FINISHED", endTime = System.currentTimeMillis()) jdbcMetadataStore.updateMetadata(newBatchState) - assert(jdbcMetadataStore.getMetadata(batchId, true) == newBatchState) + assert(jdbcMetadataStore.getMetadata(batchId) == newBatchState) assert(jdbcMetadataStore.getMetadataList( MetadataFilter( @@ -255,8 +228,7 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { state = "PENDING", kyuubiInstance = kyuubiInstance), 0, - Int.MaxValue, - false).isEmpty) + Int.MaxValue).isEmpty) assert(jdbcMetadataStore.getMetadataList( MetadataFilter( @@ -264,12 +236,11 @@ class JDBCMetadataStoreSuite extends KyuubiFunSuite { state = "RUNNING", kyuubiInstance = kyuubiInstance), 0, - Int.MaxValue, - false).isEmpty) + Int.MaxValue).isEmpty) eventually(Timeout(3.seconds)) { jdbcMetadataStore.cleanupMetadataByAge(1000) - assert(jdbcMetadataStore.getMetadata(batchId, true) == null) + assert(jdbcMetadataStore.getMetadata(batchId) == null) } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/AdminCtlSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/AdminCtlSuite.scala index f7cbb20016c..32bb6fbb152 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/AdminCtlSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/AdminCtlSuite.scala @@ -19,13 +19,17 @@ package org.apache.kyuubi.server.rest.client import java.util.UUID +import org.mockito.Mockito.lenient +import org.scalatestplus.mockito.MockitoSugar.mock + import org.apache.kyuubi.{KYUUBI_VERSION, RestClientTestHelper} import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.ctl.{CtlConf, TestPrematureExit} import org.apache.kyuubi.engine.EngineRef import org.apache.kyuubi.ha.HighAvailabilityConf +import org.apache.kyuubi.ha.client.{DiscoveryPaths, ServiceDiscovery} import org.apache.kyuubi.ha.client.DiscoveryClientProvider.withDiscoveryClient -import org.apache.kyuubi.ha.client.DiscoveryPaths +import org.apache.kyuubi.plugin.PluginLoader class AdminCtlSuite extends RestClientTestHelper with TestPrematureExit { override def beforeAll(): Unit = { @@ -52,9 +56,11 @@ class AdminCtlSuite extends RestClientTestHelper with TestPrematureExit { val id = UUID.randomUUID().toString conf.set(HighAvailabilityConf.HA_NAMESPACE, "kyuubi_test") conf.set(KyuubiConf.ENGINE_IDLE_TIMEOUT, 180000L) - conf.set(KyuubiConf.AUTHENTICATION_METHOD, Seq("LDAP", "CUSTOM")) + conf.set(KyuubiConf.AUTHENTICATION_METHOD, Set("LDAP", "CUSTOM")) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") + val user = ldapUser - val engine = new EngineRef(conf.clone, user, "grp", id, null) + val engine = new EngineRef(conf.clone, user, PluginLoader.loadGroupProvider(conf), id, null) val engineSpace = DiscoveryPaths.makePath( s"kyuubi_test_${KYUUBI_VERSION}_USER_SPARK_SQL", @@ -99,4 +105,17 @@ class AdminCtlSuite extends RestClientTestHelper with TestPrematureExit { args, "Engine Node List (total 0)") } + + test("list server") { + // Mock Kyuubi Server + val serverDiscovery = mock[ServiceDiscovery] + lenient.when(serverDiscovery.fe).thenReturn(fe) + val namespace = conf.get(HighAvailabilityConf.HA_NAMESPACE) + withDiscoveryClient(conf) { client => + client.registerService(conf, namespace, serverDiscovery) + + val args = Array("list", "server", "--authSchema", "spnego") + testPrematureExitForAdminControlCli(args, "Server Node List (total 1)") + } + } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/AdminRestApiSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/AdminRestApiSuite.scala index ab1a102026c..d63e4660772 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/AdminRestApiSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/AdminRestApiSuite.scala @@ -21,13 +21,18 @@ import java.util.UUID import scala.collection.JavaConverters.asScalaBufferConverter +import org.apache.hive.service.rpc.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V2 +import org.mockito.Mockito.lenient +import org.scalatestplus.mockito.MockitoSugar.mock + import org.apache.kyuubi.{KYUUBI_VERSION, RestClientTestHelper} import org.apache.kyuubi.client.{AdminRestApi, KyuubiRestClient} import org.apache.kyuubi.config.{KyuubiConf, KyuubiReservedKeys} import org.apache.kyuubi.engine.EngineRef import org.apache.kyuubi.ha.HighAvailabilityConf +import org.apache.kyuubi.ha.client.{DiscoveryPaths, ServiceDiscovery} import org.apache.kyuubi.ha.client.DiscoveryClientProvider.withDiscoveryClient -import org.apache.kyuubi.ha.client.DiscoveryPaths +import org.apache.kyuubi.plugin.PluginLoader class AdminRestApiSuite extends RestClientTestHelper { test("refresh kyuubi server hadoop conf") { @@ -45,9 +50,10 @@ class AdminRestApiSuite extends RestClientTestHelper { val id = UUID.randomUUID().toString conf.set(HighAvailabilityConf.HA_NAMESPACE, "kyuubi_test") conf.set(KyuubiConf.ENGINE_IDLE_TIMEOUT, 180000L) - conf.set(KyuubiConf.AUTHENTICATION_METHOD, Seq("LDAP", "CUSTOM")) + conf.set(KyuubiConf.AUTHENTICATION_METHOD, Set("LDAP", "CUSTOM")) + conf.set(KyuubiConf.GROUP_PROVIDER, "hadoop") val user = ldapUser - val engine = new EngineRef(conf.clone, user, "grp", id, null) + val engine = new EngineRef(conf.clone, user, PluginLoader.loadGroupProvider(conf), id, null) val engineSpace = DiscoveryPaths.makePath( s"kyuubi_test_${KYUUBI_VERSION}_USER_SPARK_SQL", @@ -68,7 +74,7 @@ class AdminRestApiSuite extends RestClientTestHelper { .build() val adminRestApi = new AdminRestApi(basicKyuubiRestClient) - var engines = adminRestApi.listEngines("spark_sql", "user", "default", "").asScala + var engines = adminRestApi.listEngines("spark_sql", "user", "default", "", "false").asScala assert(engines.size == 1) assert(engines(0).getUser == user) assert(engines(0).getVersion == KYUUBI_VERSION) @@ -81,7 +87,88 @@ class AdminRestApiSuite extends RestClientTestHelper { val result = adminRestApi.deleteEngine("spark_sql", "user", "default", "") assert(result == s"Engine ${engineSpace} is deleted successfully.") - engines = adminRestApi.listEngines("spark_sql", "user", "default", "").asScala - assert(engines.size == 0) + engines = adminRestApi.listEngines("spark_sql", "user", "default", "", "false").asScala + assert(engines.isEmpty) + } + + test("list/close session") { + fe.be.sessionManager.openSession( + HIVE_CLI_SERVICE_PROTOCOL_V2, + "admin", + "123456", + "localhost", + Map("testConfig" -> "testValue")) + + val spnegoKyuubiRestClient: KyuubiRestClient = + KyuubiRestClient.builder(baseUri.toString) + .authHeaderMethod(KyuubiRestClient.AuthHeaderMethod.SPNEGO) + .spnegoHost("localhost") + .build() + val adminRestApi = new AdminRestApi(spnegoKyuubiRestClient) + + // list sessions + var sessions = adminRestApi.listSessions().asScala + assert(sessions.nonEmpty) + assert(sessions.head.getUser == "admin") + + // close session + val response = adminRestApi.closeSession(sessions.head.getIdentifier) + assert(response.contains("success")) + + // list again + sessions = adminRestApi.listSessions().asScala + assert(sessions.isEmpty) + } + + test("list/close operation") { + val sessionHandle = fe.be.openSession( + HIVE_CLI_SERVICE_PROTOCOL_V2, + "admin", + "123456", + "localhost", + Map("testConfig" -> "testValue")) + val operation = fe.be.getCatalogs(sessionHandle) + + val spnegoKyuubiRestClient: KyuubiRestClient = + KyuubiRestClient.builder(baseUri.toString) + .authHeaderMethod(KyuubiRestClient.AuthHeaderMethod.SPNEGO) + .spnegoHost("localhost") + .build() + val adminRestApi = new AdminRestApi(spnegoKyuubiRestClient) + + // list operations + var operations = adminRestApi.listOperations().asScala + assert(operations.nonEmpty) + assert(operations.map(op => op.getIdentifier).contains(operation.identifier.toString)) + + // close operation + val response = adminRestApi.closeOperation(operation.identifier.toString) + assert(response.contains("success")) + + // list again + operations = adminRestApi.listOperations().asScala + assert(!operations.map(op => op.getIdentifier).contains(operation.identifier.toString)) + + } + + test("list server") { + val spnegoKyuubiRestClient: KyuubiRestClient = + KyuubiRestClient.builder(baseUri.toString) + .authHeaderMethod(KyuubiRestClient.AuthHeaderMethod.SPNEGO) + .spnegoHost("localhost") + .build() + val adminRestApi = new AdminRestApi(spnegoKyuubiRestClient) + + // Mock Kyuubi Server + val serverDiscovery = mock[ServiceDiscovery] + lenient.when(serverDiscovery.fe).thenReturn(fe) + val namespace = conf.get(HighAvailabilityConf.HA_NAMESPACE) + withDiscoveryClient(conf) { client => + client.registerService(conf, namespace, serverDiscovery) + + val servers = adminRestApi.listServers().asScala + assert(servers.nonEmpty) + assert(servers.map(s => s.getInstance()).contains(server.frontendServices.last.connectionUrl)) + } } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/BatchCliSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/BatchCliSuite.scala index ff807ef027b..bcf8c450eb8 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/BatchCliSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/BatchCliSuite.scala @@ -32,13 +32,16 @@ import org.apache.kyuubi.{BatchTestHelper, RestClientTestHelper, Utils} import org.apache.kyuubi.client.util.BatchUtils._ import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.ctl.{CtlConf, TestPrematureExit} +import org.apache.kyuubi.engine.ApplicationManagerInfo import org.apache.kyuubi.metrics.{MetricsConstants, MetricsSystem} +import org.apache.kyuubi.server.metadata.api.MetadataFilter import org.apache.kyuubi.session.KyuubiSessionManager class BatchCliSuite extends RestClientTestHelper with TestPrematureExit with BatchTestHelper { val basePath: String = Utils.getCodeSourceLocation(getClass) val batchFile: String = s"${basePath}/batch.yaml" + val longTimeBatchFile: String = s"${basePath}/batch_long_time.yaml" override protected val otherConfigs: Map[String, String] = { Map(KyuubiConf.BATCH_APPLICATION_CHECK_INTERVAL.key -> "100") @@ -71,6 +74,27 @@ class BatchCliSuite extends RestClientTestHelper with TestPrematureExit with Bat |options: | verbose: true""".stripMargin Files.write(Paths.get(batchFile), batch_basic.getBytes(StandardCharsets.UTF_8)) + + val long_time_batch_basic = s"""apiVersion: v1 + |username: ${ldapUser} + |request: + | batchType: Spark + | name: LongTimeBatch + | resource: ${sparkBatchTestResource.get} + | className: org.apache.spark.examples.DriverSubmissionTest + | args: + | - 10 + | configs: + | spark.master: local + | wait.completion: true + | k1: v1 + | 1: test_integer_key + | key: + |options: + | verbose: true""".stripMargin + Files.write( + Paths.get(longTimeBatchFile), + long_time_batch_basic.getBytes(StandardCharsets.UTF_8)) } override def afterEach(): Unit = { @@ -78,10 +102,9 @@ class BatchCliSuite extends RestClientTestHelper with TestPrematureExit with Bat sessionManager.allSessions().foreach { session => sessionManager.closeSession(session.handle) } - sessionManager.getBatchesFromMetadataStore(null, null, null, 0, 0, 0, Int.MaxValue).foreach { - batch => - sessionManager.applicationManager.killApplication(None, batch.getId) - sessionManager.cleanupMetadata(batch.getId) + sessionManager.getBatchesFromMetadataStore(MetadataFilter(), 0, Int.MaxValue).foreach { batch => + sessionManager.applicationManager.killApplication(ApplicationManagerInfo(None), batch.getId) + sessionManager.cleanupMetadata(batch.getId) } } @@ -93,7 +116,7 @@ class BatchCliSuite extends RestClientTestHelper with TestPrematureExit with Bat "create", "batch", "-f", - batchFile, + longTimeBatchFile, "--password", ldapUserPasswd) var result = testPrematureExitForControlCli(createArgs, "") @@ -109,9 +132,15 @@ class BatchCliSuite extends RestClientTestHelper with TestPrematureExit with Bat ldapUser, "--password", ldapUserPasswd) - result = testPrematureExitForControlCli(getArgs, "SPARK") - assert(result.contains("SPARK")) - assert(result.contains(s"${fe.connectionUrl}")) + var invalidCount = 0 + eventually(timeout(5.seconds), interval(100.milliseconds)) { + invalidCount += 1 + result = testPrematureExitForControlCli(getArgs, "SPARK") + assert(result.contains("RUNNING")) + assert(result.contains("SPARK")) + assert(result.contains(s"${fe.connectionUrl}")) + invalidCount -= 1 + } val logArgs = Array( "log", @@ -139,7 +168,7 @@ class BatchCliSuite extends RestClientTestHelper with TestPrematureExit with Bat eventually(timeout(3.seconds), interval(200.milliseconds)) { assert(MetricsSystem.counterValue( - MetricsConstants.REST_CONN_TOTAL).getOrElse(0L) - totalConnections === 5) + MetricsConstants.REST_CONN_TOTAL).getOrElse(0L) - totalConnections - invalidCount === 5) assert(MetricsSystem.counterValue(MetricsConstants.REST_CONN_OPEN).getOrElse(0L) === 0) } } @@ -151,7 +180,7 @@ class BatchCliSuite extends RestClientTestHelper with TestPrematureExit with Bat "create", "batch", "-f", - batchFile, + longTimeBatchFile, "--authSchema", "SPNEGO") var result = testPrematureExitForControlCli(createArgs, "") @@ -165,9 +194,12 @@ class BatchCliSuite extends RestClientTestHelper with TestPrematureExit with Bat batchId, "--authSchema", "spnego") - result = testPrematureExitForControlCli(getArgs, "SPARK") - assert(result.contains("SPARK")) - assert(result.contains(s"${fe.connectionUrl}")) + eventually(timeout(5.seconds), interval(100.milliseconds)) { + result = testPrematureExitForControlCli(getArgs, "SPARK") + assert(result.contains("RUNNING")) + assert(result.contains("SPARK")) + assert(result.contains(s"${fe.connectionUrl}")) + } val logArgs = Array( "log", @@ -258,12 +290,12 @@ class BatchCliSuite extends RestClientTestHelper with TestPrematureExit with Bat "kyuubi", "kyuubi", InetAddress.getLocalHost.getCanonicalHostName, - Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString), newBatchRequest( "spark", "", "", - "")) + "", + Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString))) sessionManager.openSession( TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V11, "", @@ -280,22 +312,22 @@ class BatchCliSuite extends RestClientTestHelper with TestPrematureExit with Bat "kyuubi", "kyuubi", InetAddress.getLocalHost.getCanonicalHostName, - Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString), newBatchRequest( "spark", "", "", - "")) + "", + Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString))) sessionManager.openBatchSession( "kyuubi", "kyuubi", InetAddress.getLocalHost.getCanonicalHostName, - Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString), newBatchRequest( "spark", "", "", - "")) + "", + Map(KYUUBI_BATCH_ID_KEY -> UUID.randomUUID().toString))) val listArgs = Array( "list", diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/BatchRestApiSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/BatchRestApiSuite.scala index cb7905286f9..d04826a9d20 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/BatchRestApiSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/BatchRestApiSuite.scala @@ -68,8 +68,10 @@ class BatchRestApiSuite extends RestClientTestHelper with BatchTestHelper { assert(batch.getBatchType === "SPARK") // get batch log - val log = batchRestApi.getBatchLocalLog(batch.getId(), 0, 1) - assert(log.getRowCount == 1) + eventually(timeout(1.minutes)) { + val log = batchRestApi.getBatchLocalLog(batch.getId(), 0, 1) + assert(log.getRowCount == 1) + } // delete batch val closeResp = batchRestApi.deleteBatch(batch.getId(), null) @@ -162,8 +164,10 @@ class BatchRestApiSuite extends RestClientTestHelper with BatchTestHelper { assert(batch.getBatchType === "SPARK") // get batch log - val log = batchRestApi.getBatchLocalLog(batch.getId(), 0, 1) - assert(log.getRowCount == 1) + eventually(timeout(1.minutes)) { + val log = batchRestApi.getBatchLocalLog(batch.getId(), 0, 1) + assert(log.getRowCount == 1) + } // delete batch val closeResp = batchRestApi.deleteBatch(batch.getId(), proxyUser) diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/OperationRestApiSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/OperationRestApiSuite.scala new file mode 100644 index 00000000000..fed685c4478 --- /dev/null +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/OperationRestApiSuite.scala @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.server.rest.client + +import scala.collection.JavaConverters._ + +import org.apache.hive.service.rpc.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V2 +import org.scalatest.concurrent.PatienceConfiguration.Timeout +import org.scalatest.time.SpanSugar.convertIntToGrainOfTime + +import org.apache.kyuubi.RestClientTestHelper +import org.apache.kyuubi.client.{KyuubiRestClient, OperationRestApi} +import org.apache.kyuubi.client.api.v1.dto.OpActionRequest +import org.apache.kyuubi.client.exception.KyuubiRestException +import org.apache.kyuubi.operation.OperationState + +class OperationRestApiSuite extends RestClientTestHelper { + + test("get an operation event") { + val statementHandleStr = getOpHandleStr() + + withOperationRestApi { operationRestApi => + val kyuubiEvent = operationRestApi.getOperationEvent(statementHandleStr) + assert("show tables".equals(kyuubiEvent.getStatement)) + assert(kyuubiEvent.isShouldRunAsync == true) + } + } + + test("apply operation action") { + val statementHandleStr = getOpHandleStr( + "SELECT java_method('java.lang.Thread', 'sleep', 10000l)") + + withOperationRestApi { operationRestApi => + // successful request + operationRestApi.applyOperationAction(new OpActionRequest("cancel"), statementHandleStr) + eventually(Timeout(5.seconds)) { + val kyuubiEvent = operationRestApi.getOperationEvent(statementHandleStr) + assert(kyuubiEvent.getState === OperationState.CANCELED.name) + } + + operationRestApi.applyOperationAction(new OpActionRequest("close"), statementHandleStr) + // failed request + assertThrows[KyuubiRestException] { + operationRestApi.applyOperationAction(new OpActionRequest("close"), statementHandleStr) + } + + // invalid operation + assertThrows[KyuubiRestException] { + operationRestApi.applyOperationAction(new OpActionRequest("fake"), statementHandleStr) + } + } + } + + test("get result set metadata/get operation log/get result row set") { + val statementHandleStr = getOpHandleStr("select \"test_value\", 1, 0.32d, true") + + withOperationRestApi { operationRestApi => + // wait for complete + eventually(Timeout(5.seconds)) { + val kyuubiEvent = operationRestApi.getOperationEvent(statementHandleStr) + assert(kyuubiEvent.getState === OperationState.FINISHED.name) + } + + val resultSetMetadata = operationRestApi.getResultSetMetadata(statementHandleStr) + assert(resultSetMetadata.getColumns.size == 4) + assert(resultSetMetadata.getColumns.get(0).getColumnName.equals("test_value")) + + val logRowSet = operationRestApi.getOperationLog(statementHandleStr, 10) + assert(logRowSet.getLogRowSet.asScala.exists( + _.contains("select \"test_value\", 1, 0.32d, true"))) + assert(logRowSet.getRowCount === 10) + + val resultRowSet = operationRestApi.getNextRowSet(statementHandleStr) + assert("test_value".equals(resultRowSet.getRows.asScala.head.getFields.asScala.head.getValue)) + assert(resultRowSet.getRowCount == 1) + } + } + + def withOperationRestApi[T](f: OperationRestApi => T): T = { + val basicKyuubiRestClient: KyuubiRestClient = + KyuubiRestClient.builder(baseUri.toString) + .authHeaderMethod(KyuubiRestClient.AuthHeaderMethod.BASIC) + .username(ldapUser) + .password(ldapUserPasswd) + .socketTimeout(30000) + .build() + val operationRestApi = new OperationRestApi(basicKyuubiRestClient) + f(operationRestApi) + } + + def getOpHandleStr(statement: String = "show tables"): String = { + val sessionHandle = fe.be.openSession( + HIVE_CLI_SERVICE_PROTOCOL_V2, + "admin", + "123456", + "localhost", + Map("testConfig" -> "testValue")) + + val op = + if (statement.nonEmpty) { + fe.be.executeStatement(sessionHandle, statement, Map.empty, runAsync = true, 3000) + } else { + fe.be.getCatalogs(sessionHandle) + } + + op.identifier.toString + } +} diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/SessionRestApiSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/SessionRestApiSuite.scala index 1edfb5e5393..a1dfd243229 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/SessionRestApiSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/rest/client/SessionRestApiSuite.scala @@ -17,21 +17,186 @@ package org.apache.kyuubi.server.rest.client -import scala.collection.JavaConverters.asScalaBufferConverter +import java.util +import java.util.Collections -import org.apache.hive.service.rpc.thrift.TProtocolVersion +import scala.collection.JavaConverters._ +import scala.concurrent.duration.DurationInt + +import org.apache.hive.service.rpc.thrift.TGetInfoType +import org.scalatest.concurrent.PatienceConfiguration.Timeout import org.apache.kyuubi.RestClientTestHelper import org.apache.kyuubi.client.{KyuubiRestClient, SessionRestApi} +import org.apache.kyuubi.client.api.v1.dto._ +import org.apache.kyuubi.client.exception.KyuubiRestException +import org.apache.kyuubi.config.KyuubiConf +import org.apache.kyuubi.session.SessionType class SessionRestApiSuite extends RestClientTestHelper { - test("list session") { - fe.be.sessionManager.openSession( - TProtocolVersion.findByValue(1), - "admin", - "123456", - "localhost", - Map("testConfig" -> "testValue")) + test("get/close/list/count session") { + withSessionRestApi { sessionRestApi => + { + // open session + val sessionOpenRequest = new SessionOpenRequest(Map("testConfig" -> "testValue").asJava) + sessionRestApi.openSession(sessionOpenRequest) + + // list sessions + var sessions = sessionRestApi.listSessions().asScala + assert(sessions.size == 1) + val sessionHandle = sessions(0).getIdentifier + + // get open session count + var sessionCount = sessionRestApi.getOpenSessionCount + assert(sessionCount == 1) + + // close session + sessionRestApi.closeSession(sessionHandle) + + // list sessions again + sessions = sessionRestApi.listSessions().asScala + assert(sessions.isEmpty) + + // get open session count again + sessionCount = sessionRestApi.getOpenSessionCount + assert(sessionCount == 0) + } + } + } + + test("get session event") { + withSessionRestApi { sessionRestApi => + // open session + val sessionOpenRequest = new SessionOpenRequest(Map("testConfig" -> "testValue").asJava) + val sessionHandle = sessionRestApi.openSession(sessionOpenRequest) + + // get session event + val kyuubiEvent = sessionRestApi.getSessionEvent( + sessionHandle.getIdentifier.toString) + assert(kyuubiEvent.getConf.get("testConfig").equals("testValue")) + assert(kyuubiEvent.getSessionType.equals(SessionType.INTERACTIVE.toString)) + } + } + + test("get info type") { + withSessionRestApi { sessionRestApi => + // open session + val sessionOpenRequest = new SessionOpenRequest( + Map("testConfig" -> "testValue", KyuubiConf.SERVER_INFO_PROVIDER.key -> "SERVER").asJava) + val sessionHandle = sessionRestApi.openSession(sessionOpenRequest) + + // get session info + val info = sessionRestApi.getSessionInfo( + sessionHandle.getIdentifier.toString, + TGetInfoType.CLI_SERVER_NAME.getValue) + assert(info.getInfoType.equals("CLI_SERVER_NAME")) + assert(info.getInfoValue.equals("Apache Kyuubi")) + } + } + + test("submit operation") { + withSessionRestApi { sessionRestApi => + // open session + val sessionOpenRequest = new SessionOpenRequest(Map("testConfig" -> "testValue").asJava) + val sessionHandle = sessionRestApi.openSession(sessionOpenRequest) + val sessionHandleStr = sessionHandle.getIdentifier.toString + + // execute statement + val op1 = sessionRestApi.executeStatement( + sessionHandleStr, + new StatementRequest("show tables", true, 3000)) + assert(op1.getIdentifier != null) + + // get type info + val op2 = sessionRestApi.getTypeInfo(sessionHandleStr) + assert(op2.getIdentifier != null) + + // get catalogs + val op3 = sessionRestApi.getCatalogs(sessionHandleStr) + assert(op3.getIdentifier != null) + + // get schemas + val op4 = sessionRestApi.getSchemas( + sessionHandleStr, + new GetSchemasRequest("spark_catalog", "default")) + assert(op4.getIdentifier != null) + + // get tables + val tableTypes = new util.ArrayList[String]() + val op5 = sessionRestApi.getTables( + sessionHandleStr, + new GetTablesRequest("spark_catalog", "default", "default", tableTypes)) + assert(op5.getIdentifier != null) + + // get table types + val op6 = sessionRestApi.getTableTypes(sessionHandleStr) + assert(op6.getIdentifier != null) + + // get columns + val op7 = sessionRestApi.getColumns( + sessionHandleStr, + new GetColumnsRequest("spark_catalog", "default", "default", "default")) + assert(op7.getIdentifier != null) + + // get function + val op8 = sessionRestApi.getFunctions( + sessionHandleStr, + new GetFunctionsRequest("default", "default", "default")) + assert(op8.getIdentifier != null) + + // get primary keys + assertThrows[KyuubiRestException] { + sessionRestApi.getPrimaryKeys( + sessionHandleStr, + new GetPrimaryKeysRequest("spark_catalog", "default", "default")) + } + + // get cross reference + val getCrossReferenceReq = new GetCrossReferenceRequest( + "spark_catalog", + "default", + "default", + "spark_catalog", + "default", + "default") + assertThrows[KyuubiRestException] { + sessionRestApi.getCrossReference(sessionHandleStr, getCrossReferenceReq) + } + } + } + + test("fix kyuubi session leak caused by engine stop") { + withSessionRestApi { sessionRestApi => + // close all sessions + val sessions = sessionRestApi.listSessions().asScala + sessions.foreach(session => sessionRestApi.closeSession(session.getIdentifier)) + + // open new session + val sessionOpenRequest = new SessionOpenRequest(Map( + KyuubiConf.ENGINE_ALIVE_PROBE_ENABLED.key -> "true", + KyuubiConf.ENGINE_ALIVE_PROBE_INTERVAL.key -> "5000", + KyuubiConf.ENGINE_ALIVE_TIMEOUT.key -> "3000").asJava) + val sessionHandle = sessionRestApi.openSession(sessionOpenRequest) + + // get open session count + val sessionCount = sessionRestApi.getOpenSessionCount + assert(sessionCount == 1) + + val statementReq = new StatementRequest( + "spark.stop()", + true, + 3000, + Collections.singletonMap(KyuubiConf.OPERATION_LANGUAGE.key, "SCALA")) + sessionRestApi.executeStatement(sessionHandle.getIdentifier.toString, statementReq) + + eventually(Timeout(30.seconds), interval(1.seconds)) { + assert(sessionRestApi.getOpenSessionCount == 0) + assert(sessionRestApi.listSessions().asScala.isEmpty) + } + } + } + + def withSessionRestApi[T](f: SessionRestApi => T): T = { val basicKyuubiRestClient: KyuubiRestClient = KyuubiRestClient.builder(baseUri.toString) .authHeaderMethod(KyuubiRestClient.AuthHeaderMethod.BASIC) @@ -39,12 +204,7 @@ class SessionRestApiSuite extends RestClientTestHelper { .password(ldapUserPasswd) .socketTimeout(30000) .build() - val sessionRestApi = new SessionRestApi(basicKyuubiRestClient) - val sessions = sessionRestApi.listSessions().asScala - assert(sessions.size == 1) - assert(sessions(0).getUser == "admin") - assert(sessions(0).getIpAddr == "localhost") - assert(sessions(0).getConf.toString == "{testConfig=testValue}") + f(sessionRestApi) } } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/trino/api/TrinoClientApiSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/trino/api/TrinoClientApiSuite.scala index 13e10a11277..478bf917463 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/trino/api/TrinoClientApiSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/trino/api/TrinoClientApiSuite.scala @@ -114,6 +114,7 @@ class TrinoClientApiSuite extends KyuubiFunSuite with TrinoRestFrontendTestHelpe (false, List[List[Any]]()) } } + Iterator.continually(getData(trino)).takeWhile(_._1).flatMap(_._2).toList } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/trino/api/TrinoContextSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/trino/api/TrinoContextSuite.scala index 87c8eda968a..6c5a01e4659 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/trino/api/TrinoContextSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/trino/api/TrinoContextSuite.scala @@ -84,7 +84,7 @@ class TrinoContextSuite extends KyuubiFunSuite with RestFrontendTestHelper { checkOpState(opHandleStr, FINISHED) val metadataResp = fe.be.getResultSetMetadata(opHandle) - val tRowSet = fe.be.fetchResults(opHandle, FetchOrientation.FETCH_NEXT, 1000, false) + val tRowSet = fe.be.fetchResults(opHandle, FetchOrientation.FETCH_NEXT, 1000, false).getResults val status = fe.be.getOperationStatus(opHandle, Some(0)) val uri = new URI("sfdsfsdfdsf") @@ -111,7 +111,7 @@ class TrinoContextSuite extends KyuubiFunSuite with RestFrontendTestHelper { checkOpState(opHandleStr, FINISHED) val metadataResp = fe.be.getResultSetMetadata(opHandle) - val tRowSet = fe.be.fetchResults(opHandle, FetchOrientation.FETCH_NEXT, 1000, false) + val tRowSet = fe.be.fetchResults(opHandle, FetchOrientation.FETCH_NEXT, 1000, false).getResults val status = fe.be.getOperationStatus(opHandle, Some(0)) val uri = new URI("sfdsfsdfdsf") diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/trino/api/v1/StatementResourceSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/trino/api/v1/StatementResourceSuite.scala index 5740f6d38f6..1ace5861264 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/trino/api/v1/StatementResourceSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/trino/api/v1/StatementResourceSuite.scala @@ -26,7 +26,6 @@ import io.trino.client.{QueryError, QueryResults} import io.trino.client.ProtocolHeaders.TRINO_HEADERS import org.apache.kyuubi.{KyuubiFunSuite, KyuubiSQLException, TrinoRestFrontendTestHelper} -import org.apache.kyuubi.operation.{OperationHandle, OperationState} import org.apache.kyuubi.server.trino.api.{Query, TrinoContext} import org.apache.kyuubi.server.trino.api.v1.dto.Ok import org.apache.kyuubi.session.SessionHandle @@ -52,9 +51,9 @@ class StatementResourceSuite extends KyuubiFunSuite with TrinoRestFrontendTestHe val trinoResponseIter = Iterator.iterate(TrinoResponse(response = Option(response)))(getData) val isErr = trinoResponseIter.takeWhile(_.isEnd == false).exists { t => - t.queryError != None && t.response == None + t.queryError.isDefined && t.response.isEmpty } - assert(isErr == true) + assert(isErr) } test("statement submit and get result") { @@ -62,16 +61,14 @@ class StatementResourceSuite extends KyuubiFunSuite with TrinoRestFrontendTestHe .request().post(Entity.entity("select 1", MediaType.TEXT_PLAIN_TYPE)) val trinoResponseIter = Iterator.iterate(TrinoResponse(response = Option(response)))(getData) - val dataSet = trinoResponseIter - .takeWhile(_.isEnd == false) - .map(_.data) - .flatten.toList + val dataSet = trinoResponseIter.takeWhile(_.isEnd == false).flatMap(_.data).toList assert(dataSet == List(List(1))) } test("query cancel") { val response = webTarget.path("v1/statement") .request().post(Entity.entity("select 1", MediaType.TEXT_PLAIN_TYPE)) + assert(response.getStatus == 200) val qr = response.readEntity(classOf[QueryResults]) val sessionManager = fe.be.sessionManager val sessionHandle = @@ -84,16 +81,13 @@ class StatementResourceSuite extends KyuubiFunSuite with TrinoRestFrontendTestHe case Array(_, value) => SessionHandle.fromUUID(TrinoContext.urlDecode(value)) }.get sessionManager.getSession(sessionHandle) - val operationHandle = OperationHandle(qr.getId) - val operation = sessionManager.operationManager.getOperation(operationHandle) - assert(response.getStatus == 200) + val path = qr.getNextUri.getPath val nextResponse = webTarget.path(path).request().header( TRINO_HEADERS.requestSession(), s"${Query.KYUUBI_SESSION_ID}=${TrinoContext.urlEncode(sessionHandle.identifier.toString)}") .delete() assert(nextResponse.getStatus == 204) - assert(operation.getStatus.state == OperationState.CLOSED) val exception = intercept[KyuubiSQLException](sessionManager.getSession(sessionHandle)) assert(exception.getMessage === s"Invalid $sessionHandle") } diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/session/SessionLimiterSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/session/SessionLimiterSuite.scala index a22b84d1c34..775239f9b09 100644 --- a/kyuubi-server/src/test/scala/org/apache/kyuubi/session/SessionLimiterSuite.scala +++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/session/SessionLimiterSuite.scala @@ -20,8 +20,10 @@ import java.util.concurrent.{CountDownLatch, Executors} import java.util.concurrent.atomic.LongAdder import scala.collection.JavaConverters._ +import scala.util.Random import org.apache.kyuubi.{KyuubiFunSuite, KyuubiSQLException} +import org.apache.kyuubi.util.ThreadUtils class SessionLimiterSuite extends KyuubiFunSuite { @@ -97,7 +99,7 @@ class SessionLimiterSuite extends KyuubiFunSuite { .foreach(c => assert(c.get() == 0)) } - test("test session limiter with user unlimitted list") { + test("test session limiter with user unlimited list") { val user = "user001" val ipAddress = "127.0.0.1" val userLimit = 30 @@ -117,4 +119,84 @@ class SessionLimiterSuite extends KyuubiFunSuite { limiter.asInstanceOf[SessionLimiterImpl].counters().asScala.values .foreach(c => assert(c.get() == 0)) } + + test("test session limiter with user deny list") { + val ipAddress = "127.0.0.1" + val userLimit = 100 + val ipAddressLimit = 100 + val userIpAddressLimit = 100 + val denyUsers = Set("user002", "user003") + val limiter = + SessionLimiter(userLimit, ipAddressLimit, userIpAddressLimit, Set.empty, denyUsers) + + for (i <- 0 until 50) { + val userIpAddress = UserIpAddress("user001", ipAddress) + limiter.increment(userIpAddress) + } + limiter.asInstanceOf[SessionLimiterImpl].counters().asScala.values + .foreach(c => assert(c.get() == 50)) + + for (i <- 0 until 50) { + val userIpAddress = UserIpAddress("user001", ipAddress) + limiter.decrement(userIpAddress) + } + limiter.asInstanceOf[SessionLimiterImpl].counters().asScala.values + .foreach(c => assert(c.get() == 0)) + + val caught = intercept[KyuubiSQLException] { + val userIpAddress = UserIpAddress("user002", ipAddress) + limiter.increment(userIpAddress) + } + + assert(caught.getMessage.equals( + "Connection denied because the user is in the deny user list. (user: user002)")) + } + + test("test refresh unlimited users and deny users") { + val random: Random = new Random() + val latch = new CountDownLatch(600) + val userLimit = 100 + val ipAddressLimit = 101 + val userIpAddressLimit = 102 + val limiter = + SessionLimiter(userLimit, ipAddressLimit, userIpAddressLimit, Set.empty, Set.empty) + val threadPool = ThreadUtils.newDaemonCachedThreadPool("test-refresh-config") + + def checkUserLimit(userIpAddress: UserIpAddress): Unit = { + for (i <- 0 until 200) { + threadPool.execute(() => { + try { + Thread.sleep(random.nextInt(200)) + limiter.increment(userIpAddress) + } catch { + case _: Throwable => + } finally { + Thread.sleep(random.nextInt(500)) + // finally call limiter#decrement method. + limiter.decrement(userIpAddress) + latch.countDown() + } + }) + } + } + + checkUserLimit(UserIpAddress("user001", "127.0.0.1")) + checkUserLimit(UserIpAddress("user002", "127.0.0.2")) + checkUserLimit(UserIpAddress("user003", "127.0.0.3")) + + Thread.sleep(100) + // set unlimited users and deny users + SessionLimiter.resetUnlimitedUsers(limiter, Set("user001")) + SessionLimiter.resetDenyUsers(limiter, Set("user002")) + + Thread.sleep(300) + // unset unlimited users and deny users + SessionLimiter.resetUnlimitedUsers(limiter, Set.empty) + SessionLimiter.resetDenyUsers(limiter, Set.empty) + + latch.await() + threadPool.shutdown() + limiter.asInstanceOf[SessionLimiterImpl].counters().asScala.values + .foreach(c => assert(c.get() == 0)) + } } diff --git a/kyuubi-server/web-ui/.env.development b/kyuubi-server/web-ui/.env.development index d8297cf3624..d1d91dd384d 100644 --- a/kyuubi-server/web-ui/.env.development +++ b/kyuubi-server/web-ui/.env.development @@ -15,4 +15,4 @@ NODE_ENV=development -VITE_APP_DEV_WEB_URL='/' +VITE_APP_DEV_WEB_URL='http://0.0.0.0:10099/' diff --git a/kyuubi-server/web-ui/.eslintrc b/kyuubi-server/web-ui/.eslintrc index ebbf401995e..f2bff2cd6e3 100644 --- a/kyuubi-server/web-ui/.eslintrc +++ b/kyuubi-server/web-ui/.eslintrc @@ -69,6 +69,9 @@ "exports": "never", "functions": "never" }], + "prettier/prettier": ["error", { + "bracketSameLine": true + }], "vue/multi-word-component-names": "off", "vue/component-definition-name-casing": "off", "vue/require-valid-default-prop": "off", diff --git a/kyuubi-server/web-ui/.prettierrc b/kyuubi-server/web-ui/.prettierrc index 1fceefb9885..01db7f49bc1 100644 --- a/kyuubi-server/web-ui/.prettierrc +++ b/kyuubi-server/web-ui/.prettierrc @@ -4,7 +4,7 @@ "vueIndentScriptAndStyle": true, "singleQuote": true, "quoteProps": "as-needed", - "jsxBracketSameLine": false, + "bracketSameLine": true, "jsxSingleQuote": true, "arrowParens": "always", "htmlWhitespaceSensitivity": "strict", diff --git a/kyuubi-server/web-ui/README.md b/kyuubi-server/web-ui/README.md index f93373414f8..abac83e9f05 100644 --- a/kyuubi-server/web-ui/README.md +++ b/kyuubi-server/web-ui/README.md @@ -15,7 +15,15 @@ npm install ### Development Project -To do this you can change the VITE_APP_DEV_WEB_URL parameter variable as the service url in `.env.development` in the project root directory, such as http://127.0.0.1:8090 +Notice: + +Before you start the Web UI project, please make sure the Kyuubi server has been started. + +Kyuubi Web UI will proxy the requests to Kyuubi server, with the default endpoint path to`http://localhost:10099`. Modify `VITE_APP_DEV_WEB_URL` in `.env.development` for customizing targeted endpoint path. + +#### Why proxy to http://localhost:10099 + +Currently kyuubi server binds on `http://0.0.0.0:10099` in case your are running kyuubi server in MacOS or Windows(If in linux, you should config kyuubi server `kyuubi.frontend.rest.bind.host=0.0.0.0`, or change `VITE_APP_DEV_WEB_URL` in `.env.development`). ```shell npm run dev @@ -37,7 +45,7 @@ npm run prettier ### Recommend -If you want to save disk space and boost installation speed, we recommend using `pnpm 7.x.x` to instead of npm. +If you want to save disk space and boost installation speed, we recommend using `pnpm 8.x.x` to instead of npm. You can learn how to install the corresponding version from its official website. - [pnpm](https://pnpm.io/) diff --git a/kyuubi-server/web-ui/package-lock.json b/kyuubi-server/web-ui/package-lock.json index 0a2feeba118..352560cd703 100644 --- a/kyuubi-server/web-ui/package-lock.json +++ b/kyuubi-server/web-ui/package-lock.json @@ -1,18 +1,20 @@ { "name": "kyuubi-ui", - "version": "1.8.0-SNAPSHOT", + "version": "1.9.0-SNAPSHOT", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "kyuubi-ui", - "version": "1.8.0-SNAPSHOT", + "version": "1.9.0-SNAPSHOT", "dependencies": { "@element-plus/icons-vue": "^2.0.9", "axios": "^0.27.2", + "date-fns": "^2.29.3", "element-plus": "^2.2.12", "pinia": "^2.0.18", "pinia-plugin-persistedstate": "^2.1.1", + "swagger-ui-dist": "^5.6.2", "vue": "^3.2.37", "vue-i18n": "^9.2.2", "vue-router": "^4.1.3" @@ -20,10 +22,11 @@ "devDependencies": { "@iconify-json/ep": "^1.1.6", "@types/node": "^18.7.1", + "@types/swagger-ui-dist": "^3.30.1", "@typescript-eslint/eslint-plugin": "^5.33.0", "@typescript-eslint/parser": "^5.33.0", - "@vitejs/plugin-vue": "^3.0.0", - "@vitest/coverage-c8": "^0.22.0", + "@vitejs/plugin-vue": "^4.2.3", + "@vitest/coverage-v8": "^0.32.0", "@vue/eslint-config-prettier": "^7.0.0", "@vue/eslint-config-typescript": "^11.0.0", "@vue/test-utils": "^2.0.2", @@ -34,11 +37,24 @@ "prettier": "^2.7.1", "sass": "^1.54.4", "typescript": "^4.6.4", - "vite": "^3.0.0", - "vitest": "^0.22.0", + "vite": "^4.2.3", + "vitest": "^0.32.0", "vue-tsc": "^0.38.4" } }, + "node_modules/@ampproject/remapping": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, "node_modules/@babel/parser": { "version": "7.18.11", "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.18.11.tgz", @@ -50,6 +66,17 @@ "node": ">=6.0.0" } }, + "node_modules/@babel/runtime": { + "version": "7.22.3", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.3.tgz", + "integrity": "sha512-XsDuspWKLUsxwCp6r7EhsExHtYfbe5oAGQ19kqngTdCPUoPQzOPdUbD/pB9PJiwb2ptYKQDjSJT3R6dC+EPqfQ==", + "dependencies": { + "regenerator-runtime": "^0.13.11" + }, + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@bcoe/v8-coverage": { "version": "0.2.3", "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", @@ -72,1751 +99,1914 @@ "vue": "^3.2.0" } }, - "node_modules/@esbuild/linux-loong64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.14.54.tgz", - "integrity": "sha512-bZBrLAIX1kpWelV0XemxBZllyRmM6vgFQQG2GdNb+r3Fkp0FOh1NJSvekXDs7jq70k4euu1cryLMfU+mTXlEpw==", + "node_modules/@esbuild/android-arm": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.17.19.tgz", + "integrity": "sha512-rIKddzqhmav7MSmoFCmDIb6e2W57geRsM94gV2l38fzhXMwq7hZoClug9USI2pFRGL06f4IOPHHpFNOkWieR8A==", "cpu": [ - "loong64" + "arm" ], "dev": true, "optional": true, "os": [ - "linux" + "android" ], "engines": { "node": ">=12" } }, - "node_modules/@eslint/eslintrc": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-1.3.0.tgz", - "integrity": "sha512-UWW0TMTmk2d7hLcWD1/e2g5HDM/HQ3csaLSqXCfqwh4uNDuNqlaKWXmEsL4Cs41Z0KnILNvwbHAah3C2yt06kw==", + "node_modules/@esbuild/android-arm64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.17.19.tgz", + "integrity": "sha512-KBMWvEZooR7+kzY0BtbTQn0OAYY7CsiydT63pVEaPtVYF0hXbUaOyZog37DKxK7NF3XacBJOpYT4adIJh+avxA==", + "cpu": [ + "arm64" + ], "dev": true, - "dependencies": { - "ajv": "^6.12.4", - "debug": "^4.3.2", - "espree": "^9.3.2", - "globals": "^13.15.0", - "ignore": "^5.2.0", - "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", - "minimatch": "^3.1.2", - "strip-json-comments": "^3.1.1" - }, + "optional": true, + "os": [ + "android" + ], "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - } - }, - "node_modules/@floating-ui/core": { - "version": "0.7.3", - "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-0.7.3.tgz", - "integrity": "sha512-buc8BXHmG9l82+OQXOFU3Kr2XQx9ys01U/Q9HMIrZ300iLc8HLMgh7dcCqgYzAzf4BkoQvDcXf5Y+CuEZ5JBYg==" - }, - "node_modules/@floating-ui/dom": { - "version": "0.5.4", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-0.5.4.tgz", - "integrity": "sha512-419BMceRLq0RrmTSDxn8hf9R3VCJv2K9PUfugh5JyEFmdjzDo+e8U5EdR8nzKq8Yj1htzLm3b6eQEEam3/rrtg==", - "dependencies": { - "@floating-ui/core": "^0.7.3" + "node": ">=12" } }, - "node_modules/@humanwhocodes/config-array": { - "version": "0.10.4", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.10.4.tgz", - "integrity": "sha512-mXAIHxZT3Vcpg83opl1wGlVZ9xydbfZO3r5YfRSH6Gpp2J/PfdBP0wbDa2sO6/qRbcalpoevVyW6A/fI6LfeMw==", + "node_modules/@esbuild/android-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.17.19.tgz", + "integrity": "sha512-uUTTc4xGNDT7YSArp/zbtmbhO0uEEK9/ETW29Wk1thYUJBz3IVnvgEiEwEa9IeLyvnpKrWK64Utw2bgUmDveww==", + "cpu": [ + "x64" + ], "dev": true, - "dependencies": { - "@humanwhocodes/object-schema": "^1.2.1", - "debug": "^4.1.1", - "minimatch": "^3.0.4" - }, + "optional": true, + "os": [ + "android" + ], "engines": { - "node": ">=10.10.0" + "node": ">=12" } }, - "node_modules/@humanwhocodes/gitignore-to-minimatch": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@humanwhocodes/gitignore-to-minimatch/-/gitignore-to-minimatch-1.0.2.tgz", - "integrity": "sha512-rSqmMJDdLFUsyxR6FMtD00nfQKKLFb1kv+qBbOVKqErvloEIJLo5bDTJTQNTYgeyp78JsA7u/NPi5jT1GR/MuA==", + "node_modules/@esbuild/darwin-arm64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.17.19.tgz", + "integrity": "sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==", + "cpu": [ + "arm64" + ], "dev": true, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" } }, - "node_modules/@humanwhocodes/object-schema": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", - "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", - "dev": true - }, - "node_modules/@iconify-json/ep": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/@iconify-json/ep/-/ep-1.1.7.tgz", - "integrity": "sha512-GhXWVKalXFlrGgfrCXAgqBre5hv3pPAknuxyywmjamcrL5gl5Mq9WOZtuhb4cB6cJ5pMiKOMtegt73FheqWscA==", + "node_modules/@esbuild/darwin-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.17.19.tgz", + "integrity": "sha512-IJM4JJsLhRYr9xdtLytPLSH9k/oxR3boaUIYiHkAawtwNOXKE8KoU8tMvryogdcT8AU+Bflmh81Xn6Q0vTZbQw==", + "cpu": [ + "x64" + ], "dev": true, - "dependencies": { - "@iconify/types": "*" + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" } }, - "node_modules/@iconify/types": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@iconify/types/-/types-1.1.0.tgz", - "integrity": "sha512-Jh0llaK2LRXQoYsorIH8maClebsnzTcve+7U3rQUSnC11X4jtPnFuyatqFLvMxZ8MLG8dB4zfHsbPfuvxluONw==", - "dev": true - }, - "node_modules/@intlify/core-base": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/@intlify/core-base/-/core-base-9.2.2.tgz", - "integrity": "sha512-JjUpQtNfn+joMbrXvpR4hTF8iJQ2sEFzzK3KIESOx+f+uwIjgw20igOyaIdhfsVVBCds8ZM64MoeNSx+PHQMkA==", - "dependencies": { - "@intlify/devtools-if": "9.2.2", - "@intlify/message-compiler": "9.2.2", - "@intlify/shared": "9.2.2", - "@intlify/vue-devtools": "9.2.2" - }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.17.19.tgz", + "integrity": "sha512-pBwbc7DufluUeGdjSU5Si+P3SoMF5DQ/F/UmTSb8HXO80ZEAJmrykPyzo1IfNbAoaqw48YRpv8shwd1NoI0jcQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], "engines": { - "node": ">= 14" + "node": ">=12" } }, - "node_modules/@intlify/devtools-if": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/@intlify/devtools-if/-/devtools-if-9.2.2.tgz", - "integrity": "sha512-4ttr/FNO29w+kBbU7HZ/U0Lzuh2cRDhP8UlWOtV9ERcjHzuyXVZmjyleESK6eVP60tGC9QtQW9yZE+JeRhDHkg==", - "dependencies": { - "@intlify/shared": "9.2.2" - }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.17.19.tgz", + "integrity": "sha512-4lu+n8Wk0XlajEhbEffdy2xy53dpR06SlzvhGByyg36qJw6Kpfk7cp45DR/62aPH9mtJRmIyrXAS5UWBrJT6TQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], "engines": { - "node": ">= 14" + "node": ">=12" } }, - "node_modules/@intlify/message-compiler": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/@intlify/message-compiler/-/message-compiler-9.2.2.tgz", - "integrity": "sha512-IUrQW7byAKN2fMBe8z6sK6riG1pue95e5jfokn8hA5Q3Bqy4MBJ5lJAofUsawQJYHeoPJ7svMDyBaVJ4d0GTtA==", - "dependencies": { - "@intlify/shared": "9.2.2", - "source-map": "0.6.1" - }, + "node_modules/@esbuild/linux-arm": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.17.19.tgz", + "integrity": "sha512-cdmT3KxjlOQ/gZ2cjfrQOtmhG4HJs6hhvm3mWSRDPtZ/lP5oe8FWceS10JaSJC13GBd4eH/haHnqf7hhGNLerA==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">= 14" + "node": ">=12" } }, - "node_modules/@intlify/shared": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/@intlify/shared/-/shared-9.2.2.tgz", - "integrity": "sha512-wRwTpsslgZS5HNyM7uDQYZtxnbI12aGiBZURX3BTR9RFIKKRWpllTsgzHWvj3HKm3Y2Sh5LPC1r0PDCKEhVn9Q==", + "node_modules/@esbuild/linux-arm64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.17.19.tgz", + "integrity": "sha512-ct1Tg3WGwd3P+oZYqic+YZF4snNl2bsnMKRkb3ozHmnM0dGWuxcPTTntAF6bOP0Sp4x0PjSF+4uHQ1xvxfRKqg==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">= 14" + "node": ">=12" } }, - "node_modules/@intlify/vue-devtools": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/@intlify/vue-devtools/-/vue-devtools-9.2.2.tgz", - "integrity": "sha512-+dUyqyCHWHb/UcvY1MlIpO87munedm3Gn6E9WWYdWrMuYLcoIoOEVDWSS8xSwtlPU+kA+MEQTP6Q1iI/ocusJg==", - "dependencies": { - "@intlify/core-base": "9.2.2", - "@intlify/shared": "9.2.2" - }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.17.19.tgz", + "integrity": "sha512-w4IRhSy1VbsNxHRQpeGCHEmibqdTUx61Vc38APcsRbuVgK0OPEnQ0YD39Brymn96mOx48Y2laBQGqgZ0j9w6SQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">= 14" + "node": ">=12" } }, - "node_modules/@istanbuljs/schema": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", - "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "node_modules/@esbuild/linux-loong64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.17.19.tgz", + "integrity": "sha512-2iAngUbBPMq439a+z//gE+9WBldoMp1s5GWsUSgqHLzLJ9WoZLZhpwWuym0u0u/4XmZ3gpHmzV84PonE+9IIdQ==", + "cpu": [ + "loong64" + ], "dev": true, + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=8" + "node": ">=12" } }, - "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", - "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", + "node_modules/@esbuild/linux-mips64el": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.17.19.tgz", + "integrity": "sha512-LKJltc4LVdMKHsrFe4MGNPp0hqDFA1Wpt3jE1gEyM3nKUvOiO//9PheZZHfYRfYl6AwdTH4aTcXSqBerX0ml4A==", + "cpu": [ + "mips64el" + ], "dev": true, + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=6.0.0" + "node": ">=12" } }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.14", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", - "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==", - "dev": true - }, - "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.15", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.15.tgz", - "integrity": "sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==", + "node_modules/@esbuild/linux-ppc64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.17.19.tgz", + "integrity": "sha512-/c/DGybs95WXNS8y3Ti/ytqETiW7EU44MEKuCAcpPto3YjQbyK3IQVKfF6nbghD7EcLUGl0NbiL5Rt5DMhn5tg==", + "cpu": [ + "ppc64" + ], "dev": true, - "dependencies": { - "@jridgewell/resolve-uri": "^3.0.3", - "@jridgewell/sourcemap-codec": "^1.4.10" + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" } }, - "node_modules/@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "node_modules/@esbuild/linux-riscv64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.17.19.tgz", + "integrity": "sha512-FC3nUAWhvFoutlhAkgHf8f5HwFWUL6bYdvLc/TTuxKlvLi3+pPzdZiFKSWz/PF30TB1K19SuCxDTI5KcqASJqA==", + "cpu": [ + "riscv64" + ], "dev": true, - "dependencies": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - }, + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">= 8" + "node": ">=12" } }, - "node_modules/@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "node_modules/@esbuild/linux-s390x": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.17.19.tgz", + "integrity": "sha512-IbFsFbxMWLuKEbH+7sTkKzL6NJmG2vRyy6K7JJo55w+8xDk7RElYn6xvXtDW8HCfoKBFK69f3pgBJSUSQPr+4Q==", + "cpu": [ + "s390x" + ], "dev": true, + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">= 8" + "node": ">=12" } }, - "node_modules/@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "node_modules/@esbuild/linux-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.17.19.tgz", + "integrity": "sha512-68ngA9lg2H6zkZcyp22tsVt38mlhWde8l3eJLWkyLrp4HwMUr3c1s/M2t7+kHIhvMjglIBrFpncX1SzMckomGw==", + "cpu": [ + "x64" + ], "dev": true, - "dependencies": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - }, + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">= 8" + "node": ">=12" } }, - "node_modules/@popperjs/core": { - "name": "@sxzz/popperjs-es", - "version": "2.11.7", - "resolved": "https://registry.npmjs.org/@sxzz/popperjs-es/-/popperjs-es-2.11.7.tgz", - "integrity": "sha512-Ccy0NlLkzr0Ex2FKvh2X+OyERHXJ88XJ1MXtsI9y9fGexlaXaVTPzBCRBwIxFkORuOb+uBqeu+RqnpgYTEZRUQ==", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/popperjs" + "node_modules/@esbuild/netbsd-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.17.19.tgz", + "integrity": "sha512-CwFq42rXCR8TYIjIfpXCbRX0rp1jo6cPIUPSaWwzbVI4aOfX96OXY8M6KNmtPcg7QjYeDmN+DD0Wp3LaBOLf4Q==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" } }, - "node_modules/@tootallnate/once": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", - "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "node_modules/@esbuild/openbsd-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.17.19.tgz", + "integrity": "sha512-cnq5brJYrSZ2CF6c35eCmviIN3k3RczmHz8eYaVlNasVqsNY+JKohZU5MKmaOI+KkllCdzOKKdPs762VCPC20g==", + "cpu": [ + "x64" + ], "dev": true, + "optional": true, + "os": [ + "openbsd" + ], "engines": { - "node": ">= 10" + "node": ">=12" } }, - "node_modules/@types/chai": { - "version": "4.3.3", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.3.3.tgz", - "integrity": "sha512-hC7OMnszpxhZPduX+m+nrx+uFoLkWOMiR4oa/AZF3MuSETYTZmFfJAHqZEM8MVlvfG7BEUcgvtwoCTxBp6hm3g==", - "dev": true - }, - "node_modules/@types/chai-subset": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/@types/chai-subset/-/chai-subset-1.3.3.tgz", - "integrity": "sha512-frBecisrNGz+F4T6bcc+NLeolfiojh5FxW2klu669+8BARtyQv2C/GkNW6FUodVe4BroGMP/wER/YDGc7rEllw==", + "node_modules/@esbuild/sunos-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.17.19.tgz", + "integrity": "sha512-vCRT7yP3zX+bKWFeP/zdS6SqdWB8OIpaRq/mbXQxTGHnIxspRtigpkUcDMlSCOejlHowLqII7K2JKevwyRP2rg==", + "cpu": [ + "x64" + ], "dev": true, - "dependencies": { - "@types/chai": "*" + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" } }, - "node_modules/@types/istanbul-lib-coverage": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", - "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==", - "dev": true - }, - "node_modules/@types/json-schema": { - "version": "7.0.11", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", - "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==", - "dev": true - }, - "node_modules/@types/lodash": { - "version": "4.14.183", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.183.tgz", - "integrity": "sha512-UXavyuxzXKMqJPEpFPri6Ku5F9af6ZJXUneHhvQJxavrEjuHkFp2YnDWHcxJiG7hk8ZkWqjcyNeW1s/smZv5cw==" - }, - "node_modules/@types/lodash-es": { - "version": "4.17.6", - "resolved": "https://registry.npmjs.org/@types/lodash-es/-/lodash-es-4.17.6.tgz", - "integrity": "sha512-R+zTeVUKDdfoRxpAryaQNRKk3105Rrgx2CFRClIgRGaqDTdjsm8h6IYA8ir584W3ePzkZfst5xIgDwYrlh9HLg==", - "dependencies": { - "@types/lodash": "*" + "node_modules/@esbuild/win32-arm64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.17.19.tgz", + "integrity": "sha512-yYx+8jwowUstVdorcMdNlzklLYhPxjniHWFKgRqH7IFlUEa0Umu3KuYplf1HUZZ422e3NU9F4LGb+4O0Kdcaag==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" } }, - "node_modules/@types/node": { - "version": "18.7.6", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.7.6.tgz", - "integrity": "sha512-EdxgKRXgYsNITy5mjjXjVE/CS8YENSdhiagGrLqjG0pvA2owgJ6i4l7wy/PFZGC0B1/H20lWKN7ONVDNYDZm7A==", - "dev": true + "node_modules/@esbuild/win32-ia32": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.17.19.tgz", + "integrity": "sha512-eggDKanJszUtCdlVs0RB+h35wNlb5v4TWEkq4vZcmVt5u/HiDZrTXe2bWFQUez3RgNHwx/x4sk5++4NSSicKkw==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } }, - "node_modules/@types/web-bluetooth": { - "version": "0.0.14", - "resolved": "https://registry.npmjs.org/@types/web-bluetooth/-/web-bluetooth-0.0.14.tgz", - "integrity": "sha512-5d2RhCard1nQUC3aHcq/gHzWYO6K0WJmAbjO7mQJgCQKtZpgXxv1rOM6O/dBDhDYYVutk1sciOgNSe+5YyfM8A==" + "node_modules/@esbuild/win32-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.17.19.tgz", + "integrity": "sha512-lAhycmKnVOuRYNtRtatQR1LPQf2oYCkRGkSFnseDAKPl8lu5SOsK/e1sXe5a0Pc5kHIHe6P2I/ilntNv2xf3cA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } }, - "node_modules/@typescript-eslint/eslint-plugin": { - "version": "5.33.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.33.1.tgz", - "integrity": "sha512-S1iZIxrTvKkU3+m63YUOxYPKaP+yWDQrdhxTglVDVEVBf+aCSw85+BmJnyUaQQsk5TXFG/LpBu9fa+LrAQ91fQ==", + "node_modules/@eslint/eslintrc": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-1.3.0.tgz", + "integrity": "sha512-UWW0TMTmk2d7hLcWD1/e2g5HDM/HQ3csaLSqXCfqwh4uNDuNqlaKWXmEsL4Cs41Z0KnILNvwbHAah3C2yt06kw==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "5.33.1", - "@typescript-eslint/type-utils": "5.33.1", - "@typescript-eslint/utils": "5.33.1", - "debug": "^4.3.4", - "functional-red-black-tree": "^1.0.1", + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.3.2", + "globals": "^13.15.0", "ignore": "^5.2.0", - "regexpp": "^3.2.0", - "semver": "^7.3.7", - "tsutils": "^3.21.0" + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "@typescript-eslint/parser": "^5.0.0", - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } } }, - "node_modules/@typescript-eslint/parser": { - "version": "5.33.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.33.1.tgz", - "integrity": "sha512-IgLLtW7FOzoDlmaMoXdxG8HOCByTBXrB1V2ZQYSEV1ggMmJfAkMWTwUjjzagS6OkfpySyhKFkBw7A9jYmcHpZA==", - "dev": true, + "node_modules/@floating-ui/core": { + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-0.7.3.tgz", + "integrity": "sha512-buc8BXHmG9l82+OQXOFU3Kr2XQx9ys01U/Q9HMIrZ300iLc8HLMgh7dcCqgYzAzf4BkoQvDcXf5Y+CuEZ5JBYg==" + }, + "node_modules/@floating-ui/dom": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-0.5.4.tgz", + "integrity": "sha512-419BMceRLq0RrmTSDxn8hf9R3VCJv2K9PUfugh5JyEFmdjzDo+e8U5EdR8nzKq8Yj1htzLm3b6eQEEam3/rrtg==", "dependencies": { - "@typescript-eslint/scope-manager": "5.33.1", - "@typescript-eslint/types": "5.33.1", - "@typescript-eslint/typescript-estree": "5.33.1", - "debug": "^4.3.4" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "@floating-ui/core": "^0.7.3" } }, - "node_modules/@typescript-eslint/scope-manager": { - "version": "5.33.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.33.1.tgz", - "integrity": "sha512-8ibcZSqy4c5m69QpzJn8XQq9NnqAToC8OdH/W6IXPXv83vRyEDPYLdjAlUx8h/rbusq6MkW4YdQzURGOqsn3CA==", + "node_modules/@humanwhocodes/config-array": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.10.4.tgz", + "integrity": "sha512-mXAIHxZT3Vcpg83opl1wGlVZ9xydbfZO3r5YfRSH6Gpp2J/PfdBP0wbDa2sO6/qRbcalpoevVyW6A/fI6LfeMw==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.33.1", - "@typescript-eslint/visitor-keys": "5.33.1" + "@humanwhocodes/object-schema": "^1.2.1", + "debug": "^4.1.1", + "minimatch": "^3.0.4" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" + "node": ">=10.10.0" } }, - "node_modules/@typescript-eslint/type-utils": { - "version": "5.33.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.33.1.tgz", - "integrity": "sha512-X3pGsJsD8OiqhNa5fim41YtlnyiWMF/eKsEZGsHID2HcDqeSC5yr/uLOeph8rNF2/utwuI0IQoAK3fpoxcLl2g==", + "node_modules/@humanwhocodes/gitignore-to-minimatch": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@humanwhocodes/gitignore-to-minimatch/-/gitignore-to-minimatch-1.0.2.tgz", + "integrity": "sha512-rSqmMJDdLFUsyxR6FMtD00nfQKKLFb1kv+qBbOVKqErvloEIJLo5bDTJTQNTYgeyp78JsA7u/NPi5jT1GR/MuA==", "dev": true, - "dependencies": { - "@typescript-eslint/utils": "5.33.1", - "debug": "^4.3.4", - "tsutils": "^3.21.0" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "*" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "type": "github", + "url": "https://github.com/sponsors/nzakas" } }, - "node_modules/@typescript-eslint/types": { - "version": "5.33.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.33.1.tgz", - "integrity": "sha512-7K6MoQPQh6WVEkMrMW5QOA5FO+BOwzHSNd0j3+BlBwd6vtzfZceJ8xJ7Um2XDi/O3umS8/qDX6jdy2i7CijkwQ==", + "node_modules/@humanwhocodes/object-schema": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", + "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", + "dev": true + }, + "node_modules/@iconify-json/ep": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@iconify-json/ep/-/ep-1.1.7.tgz", + "integrity": "sha512-GhXWVKalXFlrGgfrCXAgqBre5hv3pPAknuxyywmjamcrL5gl5Mq9WOZtuhb4cB6cJ5pMiKOMtegt73FheqWscA==", "dev": true, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" + "dependencies": { + "@iconify/types": "*" } }, - "node_modules/@typescript-eslint/typescript-estree": { - "version": "5.33.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.33.1.tgz", - "integrity": "sha512-JOAzJ4pJ+tHzA2pgsWQi4804XisPHOtbvwUyqsuuq8+y5B5GMZs7lI1xDWs6V2d7gE/Ez5bTGojSK12+IIPtXA==", - "dev": true, + "node_modules/@iconify/types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@iconify/types/-/types-1.1.0.tgz", + "integrity": "sha512-Jh0llaK2LRXQoYsorIH8maClebsnzTcve+7U3rQUSnC11X4jtPnFuyatqFLvMxZ8MLG8dB4zfHsbPfuvxluONw==", + "dev": true + }, + "node_modules/@intlify/core-base": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/@intlify/core-base/-/core-base-9.2.2.tgz", + "integrity": "sha512-JjUpQtNfn+joMbrXvpR4hTF8iJQ2sEFzzK3KIESOx+f+uwIjgw20igOyaIdhfsVVBCds8ZM64MoeNSx+PHQMkA==", "dependencies": { - "@typescript-eslint/types": "5.33.1", - "@typescript-eslint/visitor-keys": "5.33.1", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "semver": "^7.3.7", - "tsutils": "^3.21.0" + "@intlify/devtools-if": "9.2.2", + "@intlify/message-compiler": "9.2.2", + "@intlify/shared": "9.2.2", + "@intlify/vue-devtools": "9.2.2" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "node": ">= 14" } }, - "node_modules/@typescript-eslint/utils": { - "version": "5.33.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.33.1.tgz", - "integrity": "sha512-uphZjkMaZ4fE8CR4dU7BquOV6u0doeQAr8n6cQenl/poMaIyJtBu8eys5uk6u5HiDH01Mj5lzbJ5SfeDz7oqMQ==", - "dev": true, + "node_modules/@intlify/devtools-if": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/@intlify/devtools-if/-/devtools-if-9.2.2.tgz", + "integrity": "sha512-4ttr/FNO29w+kBbU7HZ/U0Lzuh2cRDhP8UlWOtV9ERcjHzuyXVZmjyleESK6eVP60tGC9QtQW9yZE+JeRhDHkg==", "dependencies": { - "@types/json-schema": "^7.0.9", - "@typescript-eslint/scope-manager": "5.33.1", - "@typescript-eslint/types": "5.33.1", - "@typescript-eslint/typescript-estree": "5.33.1", - "eslint-scope": "^5.1.1", - "eslint-utils": "^3.0.0" + "@intlify/shared": "9.2.2" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + "node": ">= 14" } }, - "node_modules/@typescript-eslint/visitor-keys": { - "version": "5.33.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.33.1.tgz", - "integrity": "sha512-nwIxOK8Z2MPWltLKMLOEZwmfBZReqUdbEoHQXeCpa+sRVARe5twpJGHCB4dk9903Yaf0nMAlGbQfaAH92F60eg==", - "dev": true, + "node_modules/@intlify/message-compiler": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/@intlify/message-compiler/-/message-compiler-9.2.2.tgz", + "integrity": "sha512-IUrQW7byAKN2fMBe8z6sK6riG1pue95e5jfokn8hA5Q3Bqy4MBJ5lJAofUsawQJYHeoPJ7svMDyBaVJ4d0GTtA==", "dependencies": { - "@typescript-eslint/types": "5.33.1", - "eslint-visitor-keys": "^3.3.0" + "@intlify/shared": "9.2.2", + "source-map": "0.6.1" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": ">= 14" + } + }, + "node_modules/@intlify/shared": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/@intlify/shared/-/shared-9.2.2.tgz", + "integrity": "sha512-wRwTpsslgZS5HNyM7uDQYZtxnbI12aGiBZURX3BTR9RFIKKRWpllTsgzHWvj3HKm3Y2Sh5LPC1r0PDCKEhVn9Q==", + "engines": { + "node": ">= 14" + } + }, + "node_modules/@intlify/vue-devtools": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/@intlify/vue-devtools/-/vue-devtools-9.2.2.tgz", + "integrity": "sha512-+dUyqyCHWHb/UcvY1MlIpO87munedm3Gn6E9WWYdWrMuYLcoIoOEVDWSS8xSwtlPU+kA+MEQTP6Q1iI/ocusJg==", + "dependencies": { + "@intlify/core-base": "9.2.2", + "@intlify/shared": "9.2.2" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" + "engines": { + "node": ">= 14" } }, - "node_modules/@vitejs/plugin-vue": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-3.0.3.tgz", - "integrity": "sha512-U4zNBlz9mg+TA+i+5QPc3N5lQvdUXENZLO2h0Wdzp56gI1MWhqJOv+6R+d4kOzoaSSq6TnGPBdZAXKOe4lXy6g==", + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", "dev": true, "engines": { - "node": "^14.18.0 || >=16.0.0" - }, - "peerDependencies": { - "vite": "^3.0.0", - "vue": "^3.2.25" + "node": ">=8" } }, - "node_modules/@vitest/coverage-c8": { - "version": "0.22.0", - "resolved": "https://registry.npmjs.org/@vitest/coverage-c8/-/coverage-c8-0.22.0.tgz", - "integrity": "sha512-jwW6b8U+h9nbzQfKoRmpf2xjDg+mcAjLIdVUrZGhjTnIdekGfvoqFoeiXzsLv2HwYBeFi4943lYUftuj8qD1FQ==", + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", "dev": true, "dependencies": { - "c8": "^7.12.0", - "vitest": "0.22.0" + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" }, - "funding": { - "url": "https://github.com/sponsors/antfu" + "engines": { + "node": ">=6.0.0" } }, - "node_modules/@volar/code-gen": { - "version": "0.38.9", - "resolved": "https://registry.npmjs.org/@volar/code-gen/-/code-gen-0.38.9.tgz", - "integrity": "sha512-n6LClucfA+37rQeskvh9vDoZV1VvCVNy++MAPKj2dT4FT+Fbmty/SDQqnsEBtdEe6E3OQctFvA/IcKsx3Mns0A==", + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", + "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", "dev": true, - "dependencies": { - "@volar/source-map": "0.38.9" + "engines": { + "node": ">=6.0.0" } }, - "node_modules/@volar/source-map": { - "version": "0.38.9", - "resolved": "https://registry.npmjs.org/@volar/source-map/-/source-map-0.38.9.tgz", - "integrity": "sha512-ba0UFoHDYry+vwKdgkWJ6xlQT+8TFtZg1zj9tSjj4PykW1JZDuM0xplMotLun4h3YOoYfY9K1huY5gvxmrNLIw==", + "node_modules/@jridgewell/set-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", + "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.14", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", + "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==", "dev": true }, - "node_modules/@volar/vue-code-gen": { - "version": "0.38.9", - "resolved": "https://registry.npmjs.org/@volar/vue-code-gen/-/vue-code-gen-0.38.9.tgz", - "integrity": "sha512-tzj7AoarFBKl7e41MR006ncrEmNPHALuk8aG4WdDIaG387X5//5KhWC5Ff3ZfB2InGSeNT+CVUd74M0gS20rjA==", + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.15", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.15.tgz", + "integrity": "sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==", "dev": true, "dependencies": { - "@volar/code-gen": "0.38.9", - "@volar/source-map": "0.38.9", - "@vue/compiler-core": "^3.2.37", - "@vue/compiler-dom": "^3.2.37", - "@vue/shared": "^3.2.37" + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" } }, - "node_modules/@volar/vue-typescript": { - "version": "0.38.9", - "resolved": "https://registry.npmjs.org/@volar/vue-typescript/-/vue-typescript-0.38.9.tgz", - "integrity": "sha512-iJMQGU91ADi98u8V1vXd2UBmELDAaeSP0ZJaFjwosClQdKlJQYc6MlxxKfXBZisHqfbhdtrGRyaryulnYtliZw==", + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", "dev": true, "dependencies": { - "@volar/code-gen": "0.38.9", - "@volar/source-map": "0.38.9", - "@volar/vue-code-gen": "0.38.9", - "@vue/compiler-sfc": "^3.2.37", - "@vue/reactivity": "^3.2.37" + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" } }, - "node_modules/@vue/compiler-core": { - "version": "3.2.37", - "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.2.37.tgz", - "integrity": "sha512-81KhEjo7YAOh0vQJoSmAD68wLfYqJvoiD4ulyedzF+OEk/bk6/hx3fTNVfuzugIIaTrOx4PGx6pAiBRe5e9Zmg==", - "dependencies": { - "@babel/parser": "^7.16.4", - "@vue/shared": "3.2.37", - "estree-walker": "^2.0.2", - "source-map": "^0.6.1" + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "engines": { + "node": ">= 8" } }, - "node_modules/@vue/compiler-dom": { - "version": "3.2.37", - "resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.2.37.tgz", - "integrity": "sha512-yxJLH167fucHKxaqXpYk7x8z7mMEnXOw3G2q62FTkmsvNxu4FQSu5+3UMb+L7fjKa26DEzhrmCxAgFLLIzVfqQ==", + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, "dependencies": { - "@vue/compiler-core": "3.2.37", - "@vue/shared": "3.2.37" + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" } }, - "node_modules/@vue/compiler-sfc": { - "version": "3.2.37", - "resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.2.37.tgz", - "integrity": "sha512-+7i/2+9LYlpqDv+KTtWhOZH+pa8/HnX/905MdVmAcI/mPQOBwkHHIzrsEsucyOIZQYMkXUiTkmZq5am/NyXKkg==", - "dependencies": { - "@babel/parser": "^7.16.4", - "@vue/compiler-core": "3.2.37", - "@vue/compiler-dom": "3.2.37", - "@vue/compiler-ssr": "3.2.37", - "@vue/reactivity-transform": "3.2.37", - "@vue/shared": "3.2.37", - "estree-walker": "^2.0.2", - "magic-string": "^0.25.7", - "postcss": "^8.1.10", - "source-map": "^0.6.1" - } - }, - "node_modules/@vue/compiler-sfc/node_modules/magic-string": { - "version": "0.25.9", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.25.9.tgz", - "integrity": "sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==", - "dependencies": { - "sourcemap-codec": "^1.4.8" - } - }, - "node_modules/@vue/compiler-ssr": { - "version": "3.2.37", - "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.2.37.tgz", - "integrity": "sha512-7mQJD7HdXxQjktmsWp/J67lThEIcxLemz1Vb5I6rYJHR5vI+lON3nPGOH3ubmbvYGt8xEUaAr1j7/tIFWiEOqw==", - "dependencies": { - "@vue/compiler-dom": "3.2.37", - "@vue/shared": "3.2.37" + "node_modules/@popperjs/core": { + "name": "@sxzz/popperjs-es", + "version": "2.11.7", + "resolved": "https://registry.npmjs.org/@sxzz/popperjs-es/-/popperjs-es-2.11.7.tgz", + "integrity": "sha512-Ccy0NlLkzr0Ex2FKvh2X+OyERHXJ88XJ1MXtsI9y9fGexlaXaVTPzBCRBwIxFkORuOb+uBqeu+RqnpgYTEZRUQ==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/popperjs" } }, - "node_modules/@vue/devtools-api": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@vue/devtools-api/-/devtools-api-6.2.1.tgz", - "integrity": "sha512-OEgAMeQXvCoJ+1x8WyQuVZzFo0wcyCmUR3baRVLmKBo1LmYZWMlRiXlux5jd0fqVJu6PfDbOrZItVqUEzLobeQ==" - }, - "node_modules/@vue/eslint-config-prettier": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/@vue/eslint-config-prettier/-/eslint-config-prettier-7.0.0.tgz", - "integrity": "sha512-/CTc6ML3Wta1tCe1gUeO0EYnVXfo3nJXsIhZ8WJr3sov+cGASr6yuiibJTL6lmIBm7GobopToOuB3B6AWyV0Iw==", + "node_modules/@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", "dev": true, - "dependencies": { - "eslint-config-prettier": "^8.3.0", - "eslint-plugin-prettier": "^4.0.0" - }, - "peerDependencies": { - "eslint": ">= 7.28.0", - "prettier": ">= 2.0.0" + "engines": { + "node": ">= 10" } }, - "node_modules/@vue/eslint-config-typescript": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/@vue/eslint-config-typescript/-/eslint-config-typescript-11.0.0.tgz", - "integrity": "sha512-txuRzxnQVmtUvvy9UyWUy9sHWXNeRPGmSPqP53hRtaiUeCTAondI9Ho9GQYI/8/eWljYOST7iA4Aa8sANBkWaA==", + "node_modules/@types/chai": { + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.3.5.tgz", + "integrity": "sha512-mEo1sAde+UCE6b2hxn332f1g1E8WfYRu6p5SvTKr2ZKC1f7gFJXk4h5PyGP9Dt6gCaG8y8XhwnXWC6Iy2cmBng==", + "dev": true + }, + "node_modules/@types/chai-subset": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/@types/chai-subset/-/chai-subset-1.3.3.tgz", + "integrity": "sha512-frBecisrNGz+F4T6bcc+NLeolfiojh5FxW2klu669+8BARtyQv2C/GkNW6FUodVe4BroGMP/wER/YDGc7rEllw==", "dev": true, "dependencies": { - "@typescript-eslint/eslint-plugin": "^5.0.0", - "@typescript-eslint/parser": "^5.0.0", - "vue-eslint-parser": "^9.0.0" - }, - "engines": { - "node": "^14.17.0 || >=16.0.0" - }, - "peerDependencies": { - "eslint": "^6.2.0 || ^7.0.0 || ^8.0.0", - "eslint-plugin-vue": "^9.0.0", - "typescript": "*" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "@types/chai": "*" } }, - "node_modules/@vue/reactivity": { - "version": "3.2.37", - "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.2.37.tgz", - "integrity": "sha512-/7WRafBOshOc6m3F7plwzPeCu/RCVv9uMpOwa/5PiY1Zz+WLVRWiy0MYKwmg19KBdGtFWsmZ4cD+LOdVPcs52A==", - "dependencies": { - "@vue/shared": "3.2.37" - } + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", + "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==", + "dev": true }, - "node_modules/@vue/reactivity-transform": { - "version": "3.2.37", - "resolved": "https://registry.npmjs.org/@vue/reactivity-transform/-/reactivity-transform-3.2.37.tgz", - "integrity": "sha512-IWopkKEb+8qpu/1eMKVeXrK0NLw9HicGviJzhJDEyfxTR9e1WtpnnbYkJWurX6WwoFP0sz10xQg8yL8lgskAZg==", - "dependencies": { - "@babel/parser": "^7.16.4", - "@vue/compiler-core": "3.2.37", - "@vue/shared": "3.2.37", - "estree-walker": "^2.0.2", - "magic-string": "^0.25.7" - } + "node_modules/@types/json-schema": { + "version": "7.0.11", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", + "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==", + "dev": true }, - "node_modules/@vue/reactivity-transform/node_modules/magic-string": { - "version": "0.25.9", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.25.9.tgz", - "integrity": "sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==", - "dependencies": { - "sourcemap-codec": "^1.4.8" - } + "node_modules/@types/lodash": { + "version": "4.14.183", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.183.tgz", + "integrity": "sha512-UXavyuxzXKMqJPEpFPri6Ku5F9af6ZJXUneHhvQJxavrEjuHkFp2YnDWHcxJiG7hk8ZkWqjcyNeW1s/smZv5cw==" }, - "node_modules/@vue/runtime-core": { - "version": "3.2.37", - "resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.2.37.tgz", - "integrity": "sha512-JPcd9kFyEdXLl/i0ClS7lwgcs0QpUAWj+SKX2ZC3ANKi1U4DOtiEr6cRqFXsPwY5u1L9fAjkinIdB8Rz3FoYNQ==", + "node_modules/@types/lodash-es": { + "version": "4.17.6", + "resolved": "https://registry.npmjs.org/@types/lodash-es/-/lodash-es-4.17.6.tgz", + "integrity": "sha512-R+zTeVUKDdfoRxpAryaQNRKk3105Rrgx2CFRClIgRGaqDTdjsm8h6IYA8ir584W3ePzkZfst5xIgDwYrlh9HLg==", "dependencies": { - "@vue/reactivity": "3.2.37", - "@vue/shared": "3.2.37" + "@types/lodash": "*" } }, - "node_modules/@vue/runtime-dom": { - "version": "3.2.37", - "resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.2.37.tgz", - "integrity": "sha512-HimKdh9BepShW6YozwRKAYjYQWg9mQn63RGEiSswMbW+ssIht1MILYlVGkAGGQbkhSh31PCdoUcfiu4apXJoPw==", - "dependencies": { - "@vue/runtime-core": "3.2.37", - "@vue/shared": "3.2.37", - "csstype": "^2.6.8" - } + "node_modules/@types/node": { + "version": "18.7.6", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.7.6.tgz", + "integrity": "sha512-EdxgKRXgYsNITy5mjjXjVE/CS8YENSdhiagGrLqjG0pvA2owgJ6i4l7wy/PFZGC0B1/H20lWKN7ONVDNYDZm7A==", + "dev": true }, - "node_modules/@vue/server-renderer": { - "version": "3.2.37", - "resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.2.37.tgz", - "integrity": "sha512-kLITEJvaYgZQ2h47hIzPh2K3jG8c1zCVbp/o/bzQOyvzaKiCquKS7AaioPI28GNxIsE/zSx+EwWYsNxDCX95MA==", - "dependencies": { - "@vue/compiler-ssr": "3.2.37", - "@vue/shared": "3.2.37" - }, - "peerDependencies": { - "vue": "3.2.37" - } + "node_modules/@types/swagger-ui-dist": { + "version": "3.30.1", + "resolved": "https://registry.npmjs.org/@types/swagger-ui-dist/-/swagger-ui-dist-3.30.1.tgz", + "integrity": "sha512-wWojDensMF33dSrhak4iWSPOsWBbvf+rSJ6VWQ7ohQbGdKAiT2IwUexrDZkMvf3+vVAPVnNFDRDtxADFszbh+Q==", + "dev": true }, - "node_modules/@vue/shared": { - "version": "3.2.37", - "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.2.37.tgz", - "integrity": "sha512-4rSJemR2NQIo9Klm1vabqWjD8rs/ZaJSzMxkMNeJS6lHiUjjUeYFbooN19NgFjztubEKh3WlZUeOLVdbbUWHsw==" + "node_modules/@types/web-bluetooth": { + "version": "0.0.14", + "resolved": "https://registry.npmjs.org/@types/web-bluetooth/-/web-bluetooth-0.0.14.tgz", + "integrity": "sha512-5d2RhCard1nQUC3aHcq/gHzWYO6K0WJmAbjO7mQJgCQKtZpgXxv1rOM6O/dBDhDYYVutk1sciOgNSe+5YyfM8A==" }, - "node_modules/@vue/test-utils": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@vue/test-utils/-/test-utils-2.0.2.tgz", - "integrity": "sha512-E2P4oXSaWDqTZNbmKZFVLrNN/siVN78YkEqs7pHryWerrlZR9bBFLWdJwRoguX45Ru6HxIflzKl4vQvwRMwm5g==", + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "5.33.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.33.1.tgz", + "integrity": "sha512-S1iZIxrTvKkU3+m63YUOxYPKaP+yWDQrdhxTglVDVEVBf+aCSw85+BmJnyUaQQsk5TXFG/LpBu9fa+LrAQ91fQ==", "dev": true, - "peerDependencies": { - "vue": "^3.0.1" - } - }, - "node_modules/@vueuse/core": { - "version": "8.9.4", - "resolved": "https://registry.npmjs.org/@vueuse/core/-/core-8.9.4.tgz", - "integrity": "sha512-B/Mdj9TK1peFyWaPof+Zf/mP9XuGAngaJZBwPaXBvU3aCTZlx3ltlrFFFyMV4iGBwsjSCeUCgZrtkEj9dS2Y3Q==", "dependencies": { - "@types/web-bluetooth": "^0.0.14", - "@vueuse/metadata": "8.9.4", - "@vueuse/shared": "8.9.4", - "vue-demi": "*" + "@typescript-eslint/scope-manager": "5.33.1", + "@typescript-eslint/type-utils": "5.33.1", + "@typescript-eslint/utils": "5.33.1", + "debug": "^4.3.4", + "functional-red-black-tree": "^1.0.1", + "ignore": "^5.2.0", + "regexpp": "^3.2.0", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, "funding": { - "url": "https://github.com/sponsors/antfu" + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "@vue/composition-api": "^1.1.0", - "vue": "^2.6.0 || ^3.2.0" + "@typescript-eslint/parser": "^5.0.0", + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" }, "peerDependenciesMeta": { - "@vue/composition-api": { - "optional": true - }, - "vue": { + "typescript": { "optional": true } } }, - "node_modules/@vueuse/core/node_modules/@vueuse/shared": { - "version": "8.9.4", - "resolved": "https://registry.npmjs.org/@vueuse/shared/-/shared-8.9.4.tgz", - "integrity": "sha512-wt+T30c4K6dGRMVqPddexEVLa28YwxW5OFIPmzUHICjphfAuBFTTdDoyqREZNDOFJZ44ARH1WWQNCUK8koJ+Ag==", + "node_modules/@typescript-eslint/parser": { + "version": "5.33.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.33.1.tgz", + "integrity": "sha512-IgLLtW7FOzoDlmaMoXdxG8HOCByTBXrB1V2ZQYSEV1ggMmJfAkMWTwUjjzagS6OkfpySyhKFkBw7A9jYmcHpZA==", + "dev": true, "dependencies": { - "vue-demi": "*" + "@typescript-eslint/scope-manager": "5.33.1", + "@typescript-eslint/types": "5.33.1", + "@typescript-eslint/typescript-estree": "5.33.1", + "debug": "^4.3.4" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, "funding": { - "url": "https://github.com/sponsors/antfu" + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "@vue/composition-api": "^1.1.0", - "vue": "^2.6.0 || ^3.2.0" + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" }, "peerDependenciesMeta": { - "@vue/composition-api": { - "optional": true - }, - "vue": { + "typescript": { "optional": true } } }, - "node_modules/@vueuse/core/node_modules/vue-demi": { - "version": "0.13.8", - "resolved": "https://registry.npmjs.org/vue-demi/-/vue-demi-0.13.8.tgz", - "integrity": "sha512-Vy1zbZhCOdsmvGR6tJhAvO5vhP7eiS8xkbYQSoVa7o6KlIy3W8Rc53ED4qI4qpeRDjv3mLfXSEpYU6Yq4pgXRg==", - "hasInstallScript": true, - "bin": { - "vue-demi-fix": "bin/vue-demi-fix.js", - "vue-demi-switch": "bin/vue-demi-switch.js" + "node_modules/@typescript-eslint/scope-manager": { + "version": "5.33.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.33.1.tgz", + "integrity": "sha512-8ibcZSqy4c5m69QpzJn8XQq9NnqAToC8OdH/W6IXPXv83vRyEDPYLdjAlUx8h/rbusq6MkW4YdQzURGOqsn3CA==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.33.1", + "@typescript-eslint/visitor-keys": "5.33.1" }, "engines": { - "node": ">=12" + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, "funding": { - "url": "https://github.com/sponsors/antfu" - }, - "peerDependencies": { - "@vue/composition-api": "^1.0.0-rc.1", - "vue": "^3.0.0-0 || ^2.6.0" + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "5.33.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.33.1.tgz", + "integrity": "sha512-X3pGsJsD8OiqhNa5fim41YtlnyiWMF/eKsEZGsHID2HcDqeSC5yr/uLOeph8rNF2/utwuI0IQoAK3fpoxcLl2g==", + "dev": true, + "dependencies": { + "@typescript-eslint/utils": "5.33.1", + "debug": "^4.3.4", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "*" }, "peerDependenciesMeta": { - "@vue/composition-api": { + "typescript": { "optional": true } } }, - "node_modules/@vueuse/metadata": { - "version": "8.9.4", - "resolved": "https://registry.npmjs.org/@vueuse/metadata/-/metadata-8.9.4.tgz", - "integrity": "sha512-IwSfzH80bnJMzqhaapqJl9JRIiyQU0zsRGEgnxN6jhq7992cPUJIRfV+JHRIZXjYqbwt07E1gTEp0R0zPJ1aqw==", + "node_modules/@typescript-eslint/types": { + "version": "5.33.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.33.1.tgz", + "integrity": "sha512-7K6MoQPQh6WVEkMrMW5QOA5FO+BOwzHSNd0j3+BlBwd6vtzfZceJ8xJ7Um2XDi/O3umS8/qDX6jdy2i7CijkwQ==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, "funding": { - "url": "https://github.com/sponsors/antfu" + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/abab": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.6.tgz", - "integrity": "sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==", - "dev": true - }, - "node_modules/acorn": { - "version": "8.8.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.0.tgz", - "integrity": "sha512-QOxyigPVrpZ2GXT+PFyZTl6TtOFc5egxHIP9IlQ+RbupQuX4RkT/Bee4/kQuC02Xkzg84JcT7oLYtDIQxp+v7w==", + "node_modules/@typescript-eslint/typescript-estree": { + "version": "5.33.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.33.1.tgz", + "integrity": "sha512-JOAzJ4pJ+tHzA2pgsWQi4804XisPHOtbvwUyqsuuq8+y5B5GMZs7lI1xDWs6V2d7gE/Ez5bTGojSK12+IIPtXA==", "dev": true, - "bin": { - "acorn": "bin/acorn" + "dependencies": { + "@typescript-eslint/types": "5.33.1", + "@typescript-eslint/visitor-keys": "5.33.1", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.3.7", + "tsutils": "^3.21.0" }, "engines": { - "node": ">=0.4.0" + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, - "node_modules/acorn-globals": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-6.0.0.tgz", - "integrity": "sha512-ZQl7LOWaF5ePqqcX4hLuv/bLXYQNfNWw2c0/yX/TsPRKamzHcTGQnlCjHT3TsmkOUVEPS3crCxiPfdzE/Trlhg==", + "node_modules/@typescript-eslint/utils": { + "version": "5.33.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.33.1.tgz", + "integrity": "sha512-uphZjkMaZ4fE8CR4dU7BquOV6u0doeQAr8n6cQenl/poMaIyJtBu8eys5uk6u5HiDH01Mj5lzbJ5SfeDz7oqMQ==", "dev": true, "dependencies": { - "acorn": "^7.1.1", - "acorn-walk": "^7.1.1" + "@types/json-schema": "^7.0.9", + "@typescript-eslint/scope-manager": "5.33.1", + "@typescript-eslint/types": "5.33.1", + "@typescript-eslint/typescript-estree": "5.33.1", + "eslint-scope": "^5.1.1", + "eslint-utils": "^3.0.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, - "node_modules/acorn-globals/node_modules/acorn": { - "version": "7.4.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", - "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", + "node_modules/@typescript-eslint/visitor-keys": { + "version": "5.33.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.33.1.tgz", + "integrity": "sha512-nwIxOK8Z2MPWltLKMLOEZwmfBZReqUdbEoHQXeCpa+sRVARe5twpJGHCB4dk9903Yaf0nMAlGbQfaAH92F60eg==", "dev": true, - "bin": { - "acorn": "bin/acorn" + "dependencies": { + "@typescript-eslint/types": "5.33.1", + "eslint-visitor-keys": "^3.3.0" }, "engines": { - "node": ">=0.4.0" + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/acorn-jsx": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", - "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "node_modules/@vitejs/plugin-vue": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-4.2.3.tgz", + "integrity": "sha512-R6JDUfiZbJA9cMiguQ7jxALsgiprjBeHL5ikpXfJCH62pPHtI+JdJ5xWj6Ev73yXSlYl86+blXn1kZHQ7uElxw==", "dev": true, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, "peerDependencies": { - "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + "vite": "^4.0.0", + "vue": "^3.2.25" } }, - "node_modules/acorn-walk": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-7.2.0.tgz", - "integrity": "sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==", + "node_modules/@vitest/coverage-v8": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-0.32.0.tgz", + "integrity": "sha512-VXXlWq9X/NbsoP/l/CHLBjutsFFww1UY1qEhzGjn/DY7Tqe+z0Nu8XKc8im/XUAmjiWsh2XV7sy/F0IKAl4eaw==", "dev": true, - "engines": { - "node": ">=0.4.0" + "dependencies": { + "@ampproject/remapping": "^2.2.1", + "@bcoe/v8-coverage": "^0.2.3", + "istanbul-lib-coverage": "^3.2.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.1", + "istanbul-reports": "^3.1.5", + "magic-string": "^0.30.0", + "picocolors": "^1.0.0", + "std-env": "^3.3.2", + "test-exclude": "^6.0.0", + "v8-to-istanbul": "^9.1.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "vitest": ">=0.32.0 <1" } }, - "node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "node_modules/@vitest/expect": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-0.32.0.tgz", + "integrity": "sha512-VxVHhIxKw9Lux+O9bwLEEk2gzOUe93xuFHy9SzYWnnoYZFYg1NfBtnfnYWiJN7yooJ7KNElCK5YtA7DTZvtXtg==", "dev": true, "dependencies": { - "debug": "4" + "@vitest/spy": "0.32.0", + "@vitest/utils": "0.32.0", + "chai": "^4.3.7" }, - "engines": { - "node": ">= 6.0.0" + "funding": { + "url": "https://opencollective.com/vitest" } }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "node_modules/@vitest/runner": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-0.32.0.tgz", + "integrity": "sha512-QpCmRxftHkr72xt5A08xTEs9I4iWEXIOCHWhQQguWOKE4QH7DXSKZSOFibuwEIMAD7G0ERvtUyQn7iPWIqSwmw==", "dev": true, "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" + "@vitest/utils": "0.32.0", + "concordance": "^5.0.4", + "p-limit": "^4.0.0", + "pathe": "^1.1.0" }, "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" + "url": "https://opencollective.com/vitest" } }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "node_modules/@vitest/runner/node_modules/p-limit": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", + "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", "dev": true, + "dependencies": { + "yocto-queue": "^1.0.0" + }, "engines": { - "node": ">=8" + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "node_modules/@vitest/runner/node_modules/yocto-queue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "dev": true, + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@vitest/snapshot": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-0.32.0.tgz", + "integrity": "sha512-yCKorPWjEnzpUxQpGlxulujTcSPgkblwGzAUEL+z01FTUg/YuCDZ8dxr9sHA08oO2EwxzHXNLjQKWJ2zc2a19Q==", "dev": true, "dependencies": { - "color-convert": "^2.0.1" + "magic-string": "^0.30.0", + "pathe": "^1.1.0", + "pretty-format": "^27.5.1" }, - "engines": { - "node": ">=8" + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-0.32.0.tgz", + "integrity": "sha512-MruAPlM0uyiq3d53BkwTeShXY0rYEfhNGQzVO5GHBmmX3clsxcWp79mMnkOVcV244sNTeDcHbcPFWIjOI4tZvw==", + "dev": true, + "dependencies": { + "tinyspy": "^2.1.0" }, "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" + "url": "https://opencollective.com/vitest" } }, - "node_modules/anymatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", - "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", + "node_modules/@vitest/utils": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-0.32.0.tgz", + "integrity": "sha512-53yXunzx47MmbuvcOPpLaVljHaeSu1G2dHdmy7+9ngMnQIkBQcvwOcoclWFnxDMxFbnq8exAfh3aKSZaK71J5A==", "dev": true, "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" + "concordance": "^5.0.4", + "loupe": "^2.3.6", + "pretty-format": "^27.5.1" }, - "engines": { - "node": ">= 8" + "funding": { + "url": "https://opencollective.com/vitest" } }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "node_modules/@volar/code-gen": { + "version": "0.38.9", + "resolved": "https://registry.npmjs.org/@volar/code-gen/-/code-gen-0.38.9.tgz", + "integrity": "sha512-n6LClucfA+37rQeskvh9vDoZV1VvCVNy++MAPKj2dT4FT+Fbmty/SDQqnsEBtdEe6E3OQctFvA/IcKsx3Mns0A==", + "dev": true, + "dependencies": { + "@volar/source-map": "0.38.9" + } + }, + "node_modules/@volar/source-map": { + "version": "0.38.9", + "resolved": "https://registry.npmjs.org/@volar/source-map/-/source-map-0.38.9.tgz", + "integrity": "sha512-ba0UFoHDYry+vwKdgkWJ6xlQT+8TFtZg1zj9tSjj4PykW1JZDuM0xplMotLun4h3YOoYfY9K1huY5gvxmrNLIw==", "dev": true }, - "node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "node_modules/@volar/vue-code-gen": { + "version": "0.38.9", + "resolved": "https://registry.npmjs.org/@volar/vue-code-gen/-/vue-code-gen-0.38.9.tgz", + "integrity": "sha512-tzj7AoarFBKl7e41MR006ncrEmNPHALuk8aG4WdDIaG387X5//5KhWC5Ff3ZfB2InGSeNT+CVUd74M0gS20rjA==", "dev": true, - "engines": { - "node": ">=8" + "dependencies": { + "@volar/code-gen": "0.38.9", + "@volar/source-map": "0.38.9", + "@vue/compiler-core": "^3.2.37", + "@vue/compiler-dom": "^3.2.37", + "@vue/shared": "^3.2.37" } }, - "node_modules/assertion-error": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", - "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", + "node_modules/@volar/vue-typescript": { + "version": "0.38.9", + "resolved": "https://registry.npmjs.org/@volar/vue-typescript/-/vue-typescript-0.38.9.tgz", + "integrity": "sha512-iJMQGU91ADi98u8V1vXd2UBmELDAaeSP0ZJaFjwosClQdKlJQYc6MlxxKfXBZisHqfbhdtrGRyaryulnYtliZw==", "dev": true, - "engines": { - "node": "*" + "dependencies": { + "@volar/code-gen": "0.38.9", + "@volar/source-map": "0.38.9", + "@volar/vue-code-gen": "0.38.9", + "@vue/compiler-sfc": "^3.2.37", + "@vue/reactivity": "^3.2.37" } }, - "node_modules/async-validator": { - "version": "4.2.5", - "resolved": "https://registry.npmjs.org/async-validator/-/async-validator-4.2.5.tgz", - "integrity": "sha512-7HhHjtERjqlNbZtqNqy2rckN/SpOOlmDliet+lP7k+eKZEjPk3DgyeU9lIXLdeLz0uBbbVp+9Qdow9wJWgwwfg==" + "node_modules/@vue/compiler-core": { + "version": "3.2.37", + "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.2.37.tgz", + "integrity": "sha512-81KhEjo7YAOh0vQJoSmAD68wLfYqJvoiD4ulyedzF+OEk/bk6/hx3fTNVfuzugIIaTrOx4PGx6pAiBRe5e9Zmg==", + "dependencies": { + "@babel/parser": "^7.16.4", + "@vue/shared": "3.2.37", + "estree-walker": "^2.0.2", + "source-map": "^0.6.1" + } }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + "node_modules/@vue/compiler-dom": { + "version": "3.2.37", + "resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.2.37.tgz", + "integrity": "sha512-yxJLH167fucHKxaqXpYk7x8z7mMEnXOw3G2q62FTkmsvNxu4FQSu5+3UMb+L7fjKa26DEzhrmCxAgFLLIzVfqQ==", + "dependencies": { + "@vue/compiler-core": "3.2.37", + "@vue/shared": "3.2.37" + } }, - "node_modules/axios": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", - "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", + "node_modules/@vue/compiler-sfc": { + "version": "3.2.37", + "resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.2.37.tgz", + "integrity": "sha512-+7i/2+9LYlpqDv+KTtWhOZH+pa8/HnX/905MdVmAcI/mPQOBwkHHIzrsEsucyOIZQYMkXUiTkmZq5am/NyXKkg==", "dependencies": { - "follow-redirects": "^1.14.9", - "form-data": "^4.0.0" + "@babel/parser": "^7.16.4", + "@vue/compiler-core": "3.2.37", + "@vue/compiler-dom": "3.2.37", + "@vue/compiler-ssr": "3.2.37", + "@vue/reactivity-transform": "3.2.37", + "@vue/shared": "3.2.37", + "estree-walker": "^2.0.2", + "magic-string": "^0.25.7", + "postcss": "^8.1.10", + "source-map": "^0.6.1" } }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true + "node_modules/@vue/compiler-sfc/node_modules/magic-string": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.25.9.tgz", + "integrity": "sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==", + "dependencies": { + "sourcemap-codec": "^1.4.8" + } }, - "node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "node_modules/@vue/compiler-ssr": { + "version": "3.2.37", + "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.2.37.tgz", + "integrity": "sha512-7mQJD7HdXxQjktmsWp/J67lThEIcxLemz1Vb5I6rYJHR5vI+lON3nPGOH3ubmbvYGt8xEUaAr1j7/tIFWiEOqw==", + "dependencies": { + "@vue/compiler-dom": "3.2.37", + "@vue/shared": "3.2.37" + } + }, + "node_modules/@vue/devtools-api": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/@vue/devtools-api/-/devtools-api-6.2.1.tgz", + "integrity": "sha512-OEgAMeQXvCoJ+1x8WyQuVZzFo0wcyCmUR3baRVLmKBo1LmYZWMlRiXlux5jd0fqVJu6PfDbOrZItVqUEzLobeQ==" + }, + "node_modules/@vue/eslint-config-prettier": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/@vue/eslint-config-prettier/-/eslint-config-prettier-7.0.0.tgz", + "integrity": "sha512-/CTc6ML3Wta1tCe1gUeO0EYnVXfo3nJXsIhZ8WJr3sov+cGASr6yuiibJTL6lmIBm7GobopToOuB3B6AWyV0Iw==", + "dev": true, + "dependencies": { + "eslint-config-prettier": "^8.3.0", + "eslint-plugin-prettier": "^4.0.0" + }, + "peerDependencies": { + "eslint": ">= 7.28.0", + "prettier": ">= 2.0.0" + } + }, + "node_modules/@vue/eslint-config-typescript": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/@vue/eslint-config-typescript/-/eslint-config-typescript-11.0.0.tgz", + "integrity": "sha512-txuRzxnQVmtUvvy9UyWUy9sHWXNeRPGmSPqP53hRtaiUeCTAondI9Ho9GQYI/8/eWljYOST7iA4Aa8sANBkWaA==", "dev": true, + "dependencies": { + "@typescript-eslint/eslint-plugin": "^5.0.0", + "@typescript-eslint/parser": "^5.0.0", + "vue-eslint-parser": "^9.0.0" + }, "engines": { - "node": ">=8" + "node": "^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.2.0 || ^7.0.0 || ^8.0.0", + "eslint-plugin-vue": "^9.0.0", + "typescript": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, - "node_modules/boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", - "dev": true + "node_modules/@vue/reactivity": { + "version": "3.2.37", + "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.2.37.tgz", + "integrity": "sha512-/7WRafBOshOc6m3F7plwzPeCu/RCVv9uMpOwa/5PiY1Zz+WLVRWiy0MYKwmg19KBdGtFWsmZ4cD+LOdVPcs52A==", + "dependencies": { + "@vue/shared": "3.2.37" + } }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, + "node_modules/@vue/reactivity-transform": { + "version": "3.2.37", + "resolved": "https://registry.npmjs.org/@vue/reactivity-transform/-/reactivity-transform-3.2.37.tgz", + "integrity": "sha512-IWopkKEb+8qpu/1eMKVeXrK0NLw9HicGviJzhJDEyfxTR9e1WtpnnbYkJWurX6WwoFP0sz10xQg8yL8lgskAZg==", "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "@babel/parser": "^7.16.4", + "@vue/compiler-core": "3.2.37", + "@vue/shared": "3.2.37", + "estree-walker": "^2.0.2", + "magic-string": "^0.25.7" } }, - "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "node_modules/@vue/reactivity-transform/node_modules/magic-string": { + "version": "0.25.9", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.25.9.tgz", + "integrity": "sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==", + "dependencies": { + "sourcemap-codec": "^1.4.8" + } + }, + "node_modules/@vue/runtime-core": { + "version": "3.2.37", + "resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.2.37.tgz", + "integrity": "sha512-JPcd9kFyEdXLl/i0ClS7lwgcs0QpUAWj+SKX2ZC3ANKi1U4DOtiEr6cRqFXsPwY5u1L9fAjkinIdB8Rz3FoYNQ==", + "dependencies": { + "@vue/reactivity": "3.2.37", + "@vue/shared": "3.2.37" + } + }, + "node_modules/@vue/runtime-dom": { + "version": "3.2.37", + "resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.2.37.tgz", + "integrity": "sha512-HimKdh9BepShW6YozwRKAYjYQWg9mQn63RGEiSswMbW+ssIht1MILYlVGkAGGQbkhSh31PCdoUcfiu4apXJoPw==", + "dependencies": { + "@vue/runtime-core": "3.2.37", + "@vue/shared": "3.2.37", + "csstype": "^2.6.8" + } + }, + "node_modules/@vue/server-renderer": { + "version": "3.2.37", + "resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.2.37.tgz", + "integrity": "sha512-kLITEJvaYgZQ2h47hIzPh2K3jG8c1zCVbp/o/bzQOyvzaKiCquKS7AaioPI28GNxIsE/zSx+EwWYsNxDCX95MA==", + "dependencies": { + "@vue/compiler-ssr": "3.2.37", + "@vue/shared": "3.2.37" + }, + "peerDependencies": { + "vue": "3.2.37" + } + }, + "node_modules/@vue/shared": { + "version": "3.2.37", + "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.2.37.tgz", + "integrity": "sha512-4rSJemR2NQIo9Klm1vabqWjD8rs/ZaJSzMxkMNeJS6lHiUjjUeYFbooN19NgFjztubEKh3WlZUeOLVdbbUWHsw==" + }, + "node_modules/@vue/test-utils": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@vue/test-utils/-/test-utils-2.0.2.tgz", + "integrity": "sha512-E2P4oXSaWDqTZNbmKZFVLrNN/siVN78YkEqs7pHryWerrlZR9bBFLWdJwRoguX45Ru6HxIflzKl4vQvwRMwm5g==", "dev": true, + "peerDependencies": { + "vue": "^3.0.1" + } + }, + "node_modules/@vueuse/core": { + "version": "8.9.4", + "resolved": "https://registry.npmjs.org/@vueuse/core/-/core-8.9.4.tgz", + "integrity": "sha512-B/Mdj9TK1peFyWaPof+Zf/mP9XuGAngaJZBwPaXBvU3aCTZlx3ltlrFFFyMV4iGBwsjSCeUCgZrtkEj9dS2Y3Q==", "dependencies": { - "fill-range": "^7.0.1" + "@types/web-bluetooth": "^0.0.14", + "@vueuse/metadata": "8.9.4", + "@vueuse/shared": "8.9.4", + "vue-demi": "*" }, - "engines": { - "node": ">=8" + "funding": { + "url": "https://github.com/sponsors/antfu" + }, + "peerDependencies": { + "@vue/composition-api": "^1.1.0", + "vue": "^2.6.0 || ^3.2.0" + }, + "peerDependenciesMeta": { + "@vue/composition-api": { + "optional": true + }, + "vue": { + "optional": true + } } }, - "node_modules/browser-process-hrtime": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/browser-process-hrtime/-/browser-process-hrtime-1.0.0.tgz", - "integrity": "sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow==", - "dev": true - }, - "node_modules/c8": { - "version": "7.12.0", - "resolved": "https://registry.npmjs.org/c8/-/c8-7.12.0.tgz", - "integrity": "sha512-CtgQrHOkyxr5koX1wEUmN/5cfDa2ckbHRA4Gy5LAL0zaCFtVWJS5++n+w4/sr2GWGerBxgTjpKeDclk/Qk6W/A==", - "dev": true, + "node_modules/@vueuse/core/node_modules/@vueuse/shared": { + "version": "8.9.4", + "resolved": "https://registry.npmjs.org/@vueuse/shared/-/shared-8.9.4.tgz", + "integrity": "sha512-wt+T30c4K6dGRMVqPddexEVLa28YwxW5OFIPmzUHICjphfAuBFTTdDoyqREZNDOFJZ44ARH1WWQNCUK8koJ+Ag==", "dependencies": { - "@bcoe/v8-coverage": "^0.2.3", - "@istanbuljs/schema": "^0.1.3", - "find-up": "^5.0.0", - "foreground-child": "^2.0.0", - "istanbul-lib-coverage": "^3.2.0", - "istanbul-lib-report": "^3.0.0", - "istanbul-reports": "^3.1.4", - "rimraf": "^3.0.2", - "test-exclude": "^6.0.0", - "v8-to-istanbul": "^9.0.0", - "yargs": "^16.2.0", - "yargs-parser": "^20.2.9" + "vue-demi": "*" }, - "bin": { - "c8": "bin/c8.js" + "funding": { + "url": "https://github.com/sponsors/antfu" }, - "engines": { - "node": ">=10.12.0" + "peerDependencies": { + "@vue/composition-api": "^1.1.0", + "vue": "^2.6.0 || ^3.2.0" + }, + "peerDependenciesMeta": { + "@vue/composition-api": { + "optional": true + }, + "vue": { + "optional": true + } } }, - "node_modules/callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "dev": true, + "node_modules/@vueuse/core/node_modules/vue-demi": { + "version": "0.13.8", + "resolved": "https://registry.npmjs.org/vue-demi/-/vue-demi-0.13.8.tgz", + "integrity": "sha512-Vy1zbZhCOdsmvGR6tJhAvO5vhP7eiS8xkbYQSoVa7o6KlIy3W8Rc53ED4qI4qpeRDjv3mLfXSEpYU6Yq4pgXRg==", + "hasInstallScript": true, + "bin": { + "vue-demi-fix": "bin/vue-demi-fix.js", + "vue-demi-switch": "bin/vue-demi-switch.js" + }, "engines": { - "node": ">=6" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + }, + "peerDependencies": { + "@vue/composition-api": "^1.0.0-rc.1", + "vue": "^3.0.0-0 || ^2.6.0" + }, + "peerDependenciesMeta": { + "@vue/composition-api": { + "optional": true + } } }, - "node_modules/chai": { - "version": "4.3.6", - "resolved": "https://registry.npmjs.org/chai/-/chai-4.3.6.tgz", - "integrity": "sha512-bbcp3YfHCUzMOvKqsztczerVgBKSsEijCySNlHHbX3VG1nskvqjz5Rfso1gGwD6w6oOV3eI60pKuMOV5MV7p3Q==", - "dev": true, - "dependencies": { - "assertion-error": "^1.1.0", - "check-error": "^1.0.2", - "deep-eql": "^3.0.1", - "get-func-name": "^2.0.0", - "loupe": "^2.3.1", - "pathval": "^1.1.1", - "type-detect": "^4.0.5" - }, - "engines": { - "node": ">=4" + "node_modules/@vueuse/metadata": { + "version": "8.9.4", + "resolved": "https://registry.npmjs.org/@vueuse/metadata/-/metadata-8.9.4.tgz", + "integrity": "sha512-IwSfzH80bnJMzqhaapqJl9JRIiyQU0zsRGEgnxN6jhq7992cPUJIRfV+JHRIZXjYqbwt07E1gTEp0R0zPJ1aqw==", + "funding": { + "url": "https://github.com/sponsors/antfu" } }, - "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "node_modules/abab": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.6.tgz", + "integrity": "sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==", + "dev": true + }, + "node_modules/acorn": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", + "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==", "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" + "bin": { + "acorn": "bin/acorn" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" + "node": ">=0.4.0" } }, - "node_modules/check-error": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", - "integrity": "sha512-BrgHpW9NURQgzoNyjfq0Wu6VFO6D7IZEmJNdtgNqpzGG8RuNFHt2jQxWlAs4HMe119chBnv+34syEZtc6IhLtA==", + "node_modules/acorn-globals": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-6.0.0.tgz", + "integrity": "sha512-ZQl7LOWaF5ePqqcX4hLuv/bLXYQNfNWw2c0/yX/TsPRKamzHcTGQnlCjHT3TsmkOUVEPS3crCxiPfdzE/Trlhg==", "dev": true, - "engines": { - "node": "*" + "dependencies": { + "acorn": "^7.1.1", + "acorn-walk": "^7.1.1" } }, - "node_modules/chokidar": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "node_modules/acorn-globals/node_modules/acorn": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", + "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", "dev": true, - "funding": [ - { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - ], - "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" + "bin": { + "acorn": "bin/acorn" }, "engines": { - "node": ">= 8.10.0" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" + "node": ">=0.4.0" } }, - "node_modules/chokidar/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", "dev": true, - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, - "node_modules/cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "node_modules/acorn-walk": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-7.2.0.tgz", + "integrity": "sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==", "dev": true, - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" + "engines": { + "node": ">=0.4.0" } }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", "dev": true, "dependencies": { - "color-name": "~1.1.4" + "debug": "4" }, "engines": { - "node": ">=7.0.0" + "node": ">= 6.0.0" } }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, "dependencies": { - "delayed-stream": "~1.0.0" + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" }, - "engines": { - "node": ">= 0.8" + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" } }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true - }, - "node_modules/convert-source-map": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz", - "integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==", + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "dev": true, - "dependencies": { - "safe-buffer": "~5.1.1" + "engines": { + "node": ">=8" } }, - "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" + "color-convert": "^2.0.1" }, "engines": { - "node": ">= 8" + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "node_modules/anymatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", + "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", "dev": true, - "bin": { - "cssesc": "bin/cssesc" + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" }, "engines": { - "node": ">=4" + "node": ">= 8" } }, - "node_modules/cssom": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.5.0.tgz", - "integrity": "sha512-iKuQcq+NdHqlAcwUY0o/HL69XQrUaQdMjmStJ8JFmUaiiQErlhrmuigkg/CU4E2J0IyUKUrMAgl36TvN67MqTw==", + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", "dev": true }, - "node_modules/cssstyle": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-2.3.0.tgz", - "integrity": "sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A==", + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", "dev": true, - "dependencies": { - "cssom": "~0.3.6" - }, "engines": { "node": ">=8" } }, - "node_modules/cssstyle/node_modules/cssom": { - "version": "0.3.8", - "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.3.8.tgz", - "integrity": "sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==", - "dev": true - }, - "node_modules/csstype": { - "version": "2.6.20", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-2.6.20.tgz", - "integrity": "sha512-/WwNkdXfckNgw6S5R125rrW8ez139lBHWouiBvX8dfMFtcn6V81REDqnH7+CRpRipfYlyU1CmOnOxrmGcFOjeA==" - }, - "node_modules/data-urls": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-3.0.2.tgz", - "integrity": "sha512-Jy/tj3ldjZJo63sVAvg6LHt2mHvl4V6AgRAmNDtLdm7faqtsx+aJG42rsyCo9JCoRVKwPFzKlIPx3DIibwSIaQ==", + "node_modules/assertion-error": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", + "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", "dev": true, - "dependencies": { - "abab": "^2.0.6", - "whatwg-mimetype": "^3.0.0", - "whatwg-url": "^11.0.0" - }, "engines": { - "node": ">=12" + "node": "*" } }, - "node_modules/dayjs": { - "version": "1.11.5", - "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.5.tgz", - "integrity": "sha512-CAdX5Q3YW3Gclyo5Vpqkgpj8fSdLQcRuzfX6mC6Phy0nfJ0eGYOeS7m4mt2plDWLAtA4TqTakvbboHvUxfe4iA==" + "node_modules/async-validator": { + "version": "4.2.5", + "resolved": "https://registry.npmjs.org/async-validator/-/async-validator-4.2.5.tgz", + "integrity": "sha512-7HhHjtERjqlNbZtqNqy2rckN/SpOOlmDliet+lP7k+eKZEjPk3DgyeU9lIXLdeLz0uBbbVp+9Qdow9wJWgwwfg==" }, - "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/axios": { + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", + "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } + "follow-redirects": "^1.14.9", + "form-data": "^4.0.0" } }, - "node_modules/decimal.js": { - "version": "10.4.0", - "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.0.tgz", - "integrity": "sha512-Nv6ENEzyPQ6AItkGwLE2PGKinZZ9g59vSh2BeH6NqPu0OTKZ5ruJsVqh/orbAnqXc9pBbgXAIrc2EyaCj8NpGg==", + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "dev": true }, - "node_modules/deep-eql": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", - "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==", + "node_modules/binary-extensions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", + "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", "dev": true, - "dependencies": { - "type-detect": "^4.0.0" - }, "engines": { - "node": ">=0.12" + "node": ">=8" } }, - "node_modules/deep-is": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "node_modules/blueimp-md5": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/blueimp-md5/-/blueimp-md5-2.19.0.tgz", + "integrity": "sha512-DRQrD6gJyy8FbiE4s+bDoXS9hiW3Vbx5uCdwvcCf3zLHL+Iv7LtGHLpr+GZV8rHG8tK766FGYBwRbu8pELTt+w==", "dev": true }, - "node_modules/delayed-stream": { + "node_modules/boolbase": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", - "engines": { - "node": ">=0.4.0" + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", + "dev": true + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" } }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", "dev": true, "dependencies": { - "path-type": "^4.0.0" + "fill-range": "^7.0.1" }, "engines": { "node": ">=8" } }, - "node_modules/doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "node_modules/browser-process-hrtime": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/browser-process-hrtime/-/browser-process-hrtime-1.0.0.tgz", + "integrity": "sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow==", + "dev": true + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", "dev": true, - "dependencies": { - "esutils": "^2.0.2" - }, "engines": { - "node": ">=6.0.0" + "node": ">=8" } }, - "node_modules/domexception": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/domexception/-/domexception-4.0.0.tgz", - "integrity": "sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==", + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", "dev": true, - "dependencies": { - "webidl-conversions": "^7.0.0" - }, "engines": { - "node": ">=12" + "node": ">=6" } }, - "node_modules/element-plus": { - "version": "2.2.13", - "resolved": "https://registry.npmjs.org/element-plus/-/element-plus-2.2.13.tgz", - "integrity": "sha512-dKQ7BPZC8deUPhv+6s4GgOL0GyGj3KpUarywxm6s1nWnHjH6FqeZlUcxPqBvJd7W/d81POayx3B13GP+rfkG9g==", + "node_modules/chai": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.3.7.tgz", + "integrity": "sha512-HLnAzZ2iupm25PlN0xFreAlBA5zaBSv3og0DdeGA4Ar6h6rJ3A0rolRUKJhSF2V10GZKDgWF/VmAEsNWjCRB+A==", + "dev": true, "dependencies": { - "@ctrl/tinycolor": "^3.4.1", - "@element-plus/icons-vue": "^2.0.6", - "@floating-ui/dom": "^0.5.4", - "@popperjs/core": "npm:@sxzz/popperjs-es@^2.11.7", - "@types/lodash": "^4.14.182", - "@types/lodash-es": "^4.17.6", - "@vueuse/core": "^8.7.5", - "async-validator": "^4.2.5", - "dayjs": "^1.11.3", - "escape-html": "^1.0.3", - "lodash": "^4.17.21", - "lodash-es": "^4.17.21", - "lodash-unified": "^1.0.2", - "memoize-one": "^6.0.0", - "normalize-wheel-es": "^1.2.0" + "assertion-error": "^1.1.0", + "check-error": "^1.0.2", + "deep-eql": "^4.1.2", + "get-func-name": "^2.0.0", + "loupe": "^2.3.1", + "pathval": "^1.1.1", + "type-detect": "^4.0.5" }, - "peerDependencies": { - "vue": "^3.2.0" + "engines": { + "node": ">=4" } }, - "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "node_modules/entities": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.3.1.tgz", - "integrity": "sha512-o4q/dYJlmyjP2zfnaWDUC6A3BQFmVTX+tZPezK7k0GLSU9QYCauscf5Y+qcEPzKL+EixVouYDgLQK5H9GrLpkg==", + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, "engines": { - "node": ">=0.12" + "node": ">=10" }, "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" + "url": "https://github.com/chalk/chalk?sponsor=1" } }, - "node_modules/esbuild": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.14.54.tgz", - "integrity": "sha512-Cy9llcy8DvET5uznocPyqL3BFRrFXSVqbgpMJ9Wz8oVjZlh/zUSNbPRbov0VX7VxN2JH1Oa0uNxZ7eLRb62pJA==", - "dev": true, - "hasInstallScript": true, - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=12" - }, - "optionalDependencies": { - "@esbuild/linux-loong64": "0.14.54", - "esbuild-android-64": "0.14.54", - "esbuild-android-arm64": "0.14.54", - "esbuild-darwin-64": "0.14.54", - "esbuild-darwin-arm64": "0.14.54", - "esbuild-freebsd-64": "0.14.54", - "esbuild-freebsd-arm64": "0.14.54", - "esbuild-linux-32": "0.14.54", - "esbuild-linux-64": "0.14.54", - "esbuild-linux-arm": "0.14.54", - "esbuild-linux-arm64": "0.14.54", - "esbuild-linux-mips64le": "0.14.54", - "esbuild-linux-ppc64le": "0.14.54", - "esbuild-linux-riscv64": "0.14.54", - "esbuild-linux-s390x": "0.14.54", - "esbuild-netbsd-64": "0.14.54", - "esbuild-openbsd-64": "0.14.54", - "esbuild-sunos-64": "0.14.54", - "esbuild-windows-32": "0.14.54", - "esbuild-windows-64": "0.14.54", - "esbuild-windows-arm64": "0.14.54" - } - }, - "node_modules/esbuild-android-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-android-64/-/esbuild-android-64-0.14.54.tgz", - "integrity": "sha512-Tz2++Aqqz0rJ7kYBfz+iqyE3QMycD4vk7LBRyWaAVFgFtQ/O8EJOnVmTOiDWYZ/uYzB4kvP+bqejYdVKzE5lAQ==", - "cpu": [ - "x64" - ], + "node_modules/check-error": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", + "integrity": "sha512-BrgHpW9NURQgzoNyjfq0Wu6VFO6D7IZEmJNdtgNqpzGG8RuNFHt2jQxWlAs4HMe119chBnv+34syEZtc6IhLtA==", "dev": true, - "optional": true, - "os": [ - "android" - ], "engines": { - "node": ">=12" + "node": "*" } }, - "node_modules/esbuild-android-arm64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-android-arm64/-/esbuild-android-arm64-0.14.54.tgz", - "integrity": "sha512-F9E+/QDi9sSkLaClO8SOV6etqPd+5DgJje1F9lOWoNncDdOBL2YF59IhsWATSt0TLZbYCf3pNlTHvVV5VfHdvg==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "android" + "node_modules/chokidar": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", + "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } ], + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, "engines": { - "node": ">=12" + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" } }, - "node_modules/esbuild-darwin-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-darwin-64/-/esbuild-darwin-64-0.14.54.tgz", - "integrity": "sha512-jtdKWV3nBviOd5v4hOpkVmpxsBy90CGzebpbO9beiqUYVMBtSc0AL9zGftFuBon7PNDcdvNCEuQqw2x0wP9yug==", - "cpu": [ - "x64" - ], + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "dev": true, - "optional": true, - "os": [ - "darwin" - ], + "dependencies": { + "is-glob": "^4.0.1" + }, "engines": { - "node": ">=12" + "node": ">= 6" } }, - "node_modules/esbuild-darwin-arm64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-darwin-arm64/-/esbuild-darwin-arm64-0.14.54.tgz", - "integrity": "sha512-OPafJHD2oUPyvJMrsCvDGkRrVCar5aVyHfWGQzY1dWnzErjrDuSETxwA2HSsyg2jORLY8yBfzc1MIpUkXlctmw==", - "cpu": [ - "arm64" - ], + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, - "optional": true, - "os": [ - "darwin" - ], + "dependencies": { + "color-name": "~1.1.4" + }, "engines": { - "node": ">=12" + "node": ">=7.0.0" } }, - "node_modules/esbuild-freebsd-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-freebsd-64/-/esbuild-freebsd-64-0.14.54.tgz", - "integrity": "sha512-OKwd4gmwHqOTp4mOGZKe/XUlbDJ4Q9TjX0hMPIDBUWWu/kwhBAudJdBoxnjNf9ocIB6GN6CPowYpR/hRCbSYAg==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "freebsd" - ], + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, "engines": { - "node": ">=12" + "node": ">= 0.8" } }, - "node_modules/esbuild-freebsd-arm64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-freebsd-arm64/-/esbuild-freebsd-arm64-0.14.54.tgz", - "integrity": "sha512-sFwueGr7OvIFiQT6WeG0jRLjkjdqWWSrfbVwZp8iMP+8UHEHRBvlaxL6IuKNDwAozNUmbb8nIMXa7oAOARGs1Q==", - "cpu": [ - "arm64" - ], + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/concordance": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/concordance/-/concordance-5.0.4.tgz", + "integrity": "sha512-OAcsnTEYu1ARJqWVGwf4zh4JDfHZEaSNlNccFmt8YjB2l/n19/PF2viLINHc57vO4FKIAFl2FWASIGZZWZ2Kxw==", "dev": true, - "optional": true, - "os": [ - "freebsd" - ], + "dependencies": { + "date-time": "^3.1.0", + "esutils": "^2.0.3", + "fast-diff": "^1.2.0", + "js-string-escape": "^1.0.1", + "lodash": "^4.17.15", + "md5-hex": "^3.0.1", + "semver": "^7.3.2", + "well-known-symbols": "^2.0.0" + }, "engines": { - "node": ">=12" + "node": ">=10.18.0 <11 || >=12.14.0 <13 || >=14" } }, - "node_modules/esbuild-linux-32": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-32/-/esbuild-linux-32-0.14.54.tgz", - "integrity": "sha512-1ZuY+JDI//WmklKlBgJnglpUL1owm2OX+8E1syCD6UAxcMM/XoWd76OHSjl/0MR0LisSAXDqgjT3uJqT67O3qw==", - "cpu": [ - "ia32" - ], + "node_modules/convert-source-map": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz", + "integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==", "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" + "dependencies": { + "safe-buffer": "~5.1.1" } }, - "node_modules/esbuild-linux-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-64/-/esbuild-linux-64-0.14.54.tgz", - "integrity": "sha512-EgjAgH5HwTbtNsTqQOXWApBaPVdDn7XcK+/PtJwZLT1UmpLoznPd8c5CxqsH2dQK3j05YsB3L17T8vE7cp4cCg==", - "cpu": [ - "x64" - ], + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", "dev": true, - "optional": true, - "os": [ - "linux" - ], + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, "engines": { - "node": ">=12" + "node": ">= 8" } }, - "node_modules/esbuild-linux-arm": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-arm/-/esbuild-linux-arm-0.14.54.tgz", - "integrity": "sha512-qqz/SjemQhVMTnvcLGoLOdFpCYbz4v4fUo+TfsWG+1aOu70/80RV6bgNpR2JCrppV2moUQkww+6bWxXRL9YMGw==", - "cpu": [ - "arm" - ], + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", "dev": true, - "optional": true, - "os": [ - "linux" - ], + "bin": { + "cssesc": "bin/cssesc" + }, "engines": { - "node": ">=12" + "node": ">=4" } }, - "node_modules/esbuild-linux-arm64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-arm64/-/esbuild-linux-arm64-0.14.54.tgz", - "integrity": "sha512-WL71L+0Rwv+Gv/HTmxTEmpv0UgmxYa5ftZILVi2QmZBgX3q7+tDeOQNqGtdXSdsL8TQi1vIaVFHUPDe0O0kdig==", - "cpu": [ - "arm64" - ], + "node_modules/cssom": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.5.0.tgz", + "integrity": "sha512-iKuQcq+NdHqlAcwUY0o/HL69XQrUaQdMjmStJ8JFmUaiiQErlhrmuigkg/CU4E2J0IyUKUrMAgl36TvN67MqTw==", + "dev": true + }, + "node_modules/cssstyle": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-2.3.0.tgz", + "integrity": "sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A==", "dev": true, - "optional": true, - "os": [ - "linux" - ], + "dependencies": { + "cssom": "~0.3.6" + }, "engines": { - "node": ">=12" + "node": ">=8" } }, - "node_modules/esbuild-linux-mips64le": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-mips64le/-/esbuild-linux-mips64le-0.14.54.tgz", - "integrity": "sha512-qTHGQB8D1etd0u1+sB6p0ikLKRVuCWhYQhAHRPkO+OF3I/iSlTKNNS0Lh2Oc0g0UFGguaFZZiPJdJey3AGpAlw==", - "cpu": [ - "mips64el" - ], + "node_modules/cssstyle/node_modules/cssom": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.3.8.tgz", + "integrity": "sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==", + "dev": true + }, + "node_modules/csstype": { + "version": "2.6.20", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-2.6.20.tgz", + "integrity": "sha512-/WwNkdXfckNgw6S5R125rrW8ez139lBHWouiBvX8dfMFtcn6V81REDqnH7+CRpRipfYlyU1CmOnOxrmGcFOjeA==" + }, + "node_modules/data-urls": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-3.0.2.tgz", + "integrity": "sha512-Jy/tj3ldjZJo63sVAvg6LHt2mHvl4V6AgRAmNDtLdm7faqtsx+aJG42rsyCo9JCoRVKwPFzKlIPx3DIibwSIaQ==", "dev": true, - "optional": true, - "os": [ - "linux" - ], + "dependencies": { + "abab": "^2.0.6", + "whatwg-mimetype": "^3.0.0", + "whatwg-url": "^11.0.0" + }, "engines": { "node": ">=12" } }, - "node_modules/esbuild-linux-ppc64le": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-ppc64le/-/esbuild-linux-ppc64le-0.14.54.tgz", - "integrity": "sha512-j3OMlzHiqwZBDPRCDFKcx595XVfOfOnv68Ax3U4UKZ3MTYQB5Yz3X1mn5GnodEVYzhtZgxEBidLWeIs8FDSfrQ==", - "cpu": [ - "ppc64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], + "node_modules/date-fns": { + "version": "2.30.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.30.0.tgz", + "integrity": "sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==", + "dependencies": { + "@babel/runtime": "^7.21.0" + }, "engines": { - "node": ">=12" + "node": ">=0.11" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/date-fns" } }, - "node_modules/esbuild-linux-riscv64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-riscv64/-/esbuild-linux-riscv64-0.14.54.tgz", - "integrity": "sha512-y7Vt7Wl9dkOGZjxQZnDAqqn+XOqFD7IMWiewY5SPlNlzMX39ocPQlOaoxvT4FllA5viyV26/QzHtvTjVNOxHZg==", - "cpu": [ - "riscv64" - ], + "node_modules/date-time": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/date-time/-/date-time-3.1.0.tgz", + "integrity": "sha512-uqCUKXE5q1PNBXjPqvwhwJf9SwMoAHBgWJ6DcrnS5o+W2JOiIILl0JEdVD8SGujrNS02GGxgwAg2PN2zONgtjg==", "dev": true, - "optional": true, - "os": [ - "linux" - ], + "dependencies": { + "time-zone": "^1.0.0" + }, "engines": { - "node": ">=12" + "node": ">=6" } }, - "node_modules/esbuild-linux-s390x": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-s390x/-/esbuild-linux-s390x-0.14.54.tgz", - "integrity": "sha512-zaHpW9dziAsi7lRcyV4r8dhfG1qBidQWUXweUjnw+lliChJqQr+6XD71K41oEIC3Mx1KStovEmlzm+MkGZHnHA==", - "cpu": [ - "s390x" - ], + "node_modules/dayjs": { + "version": "1.11.5", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.5.tgz", + "integrity": "sha512-CAdX5Q3YW3Gclyo5Vpqkgpj8fSdLQcRuzfX6mC6Phy0nfJ0eGYOeS7m4mt2plDWLAtA4TqTakvbboHvUxfe4iA==" + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", "dev": true, - "optional": true, - "os": [ - "linux" - ], + "dependencies": { + "ms": "2.1.2" + }, "engines": { - "node": ">=12" + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } } }, - "node_modules/esbuild-netbsd-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-netbsd-64/-/esbuild-netbsd-64-0.14.54.tgz", - "integrity": "sha512-PR01lmIMnfJTgeU9VJTDY9ZerDWVFIUzAtJuDHwwceppW7cQWjBBqP48NdeRtoP04/AtO9a7w3viI+PIDr6d+w==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "netbsd" - ], + "node_modules/decimal.js": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.0.tgz", + "integrity": "sha512-Nv6ENEzyPQ6AItkGwLE2PGKinZZ9g59vSh2BeH6NqPu0OTKZ5ruJsVqh/orbAnqXc9pBbgXAIrc2EyaCj8NpGg==", + "dev": true + }, + "node_modules/deep-eql": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.3.tgz", + "integrity": "sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==", + "dev": true, + "dependencies": { + "type-detect": "^4.0.0" + }, "engines": { - "node": ">=12" + "node": ">=6" } }, - "node_modules/esbuild-openbsd-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-openbsd-64/-/esbuild-openbsd-64-0.14.54.tgz", - "integrity": "sha512-Qyk7ikT2o7Wu76UsvvDS5q0amJvmRzDyVlL0qf5VLsLchjCa1+IAvd8kTBgUxD7VBUUVgItLkk609ZHUc1oCaw==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "openbsd" - ], + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", "engines": { - "node": ">=12" + "node": ">=0.4.0" } }, - "node_modules/esbuild-sunos-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-sunos-64/-/esbuild-sunos-64-0.14.54.tgz", - "integrity": "sha512-28GZ24KmMSeKi5ueWzMcco6EBHStL3B6ubM7M51RmPwXQGLe0teBGJocmWhgwccA1GeFXqxzILIxXpHbl9Q/Kw==", - "cpu": [ - "x64" - ], + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", "dev": true, - "optional": true, - "os": [ - "sunos" - ], + "dependencies": { + "path-type": "^4.0.0" + }, "engines": { - "node": ">=12" + "node": ">=8" } }, - "node_modules/esbuild-windows-32": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-windows-32/-/esbuild-windows-32-0.14.54.tgz", - "integrity": "sha512-T+rdZW19ql9MjS7pixmZYVObd9G7kcaZo+sETqNH4RCkuuYSuv9AGHUVnPoP9hhuE1WM1ZimHz1CIBHBboLU7w==", - "cpu": [ - "ia32" - ], + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", "dev": true, - "optional": true, - "os": [ - "win32" - ], + "dependencies": { + "esutils": "^2.0.2" + }, "engines": { - "node": ">=12" + "node": ">=6.0.0" } }, - "node_modules/esbuild-windows-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-windows-64/-/esbuild-windows-64-0.14.54.tgz", - "integrity": "sha512-AoHTRBUuYwXtZhjXZbA1pGfTo8cJo3vZIcWGLiUcTNgHpJJMC1rVA44ZereBHMJtotyN71S8Qw0npiCIkW96cQ==", - "cpu": [ - "x64" - ], + "node_modules/domexception": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/domexception/-/domexception-4.0.0.tgz", + "integrity": "sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==", "dev": true, - "optional": true, - "os": [ - "win32" - ], + "dependencies": { + "webidl-conversions": "^7.0.0" + }, "engines": { "node": ">=12" } }, - "node_modules/esbuild-windows-arm64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-windows-arm64/-/esbuild-windows-arm64-0.14.54.tgz", - "integrity": "sha512-M0kuUvXhot1zOISQGXwWn6YtS+Y/1RT9WrVIOywZnJHo3jCDyewAc79aKNQWFCQm+xNHVTq9h8dZKvygoXQQRg==", - "cpu": [ - "arm64" - ], + "node_modules/element-plus": { + "version": "2.2.13", + "resolved": "https://registry.npmjs.org/element-plus/-/element-plus-2.2.13.tgz", + "integrity": "sha512-dKQ7BPZC8deUPhv+6s4GgOL0GyGj3KpUarywxm6s1nWnHjH6FqeZlUcxPqBvJd7W/d81POayx3B13GP+rfkG9g==", + "dependencies": { + "@ctrl/tinycolor": "^3.4.1", + "@element-plus/icons-vue": "^2.0.6", + "@floating-ui/dom": "^0.5.4", + "@popperjs/core": "npm:@sxzz/popperjs-es@^2.11.7", + "@types/lodash": "^4.14.182", + "@types/lodash-es": "^4.17.6", + "@vueuse/core": "^8.7.5", + "async-validator": "^4.2.5", + "dayjs": "^1.11.3", + "escape-html": "^1.0.3", + "lodash": "^4.17.21", + "lodash-es": "^4.17.21", + "lodash-unified": "^1.0.2", + "memoize-one": "^6.0.0", + "normalize-wheel-es": "^1.2.0" + }, + "peerDependencies": { + "vue": "^3.2.0" + } + }, + "node_modules/entities": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.3.1.tgz", + "integrity": "sha512-o4q/dYJlmyjP2zfnaWDUC6A3BQFmVTX+tZPezK7k0GLSU9QYCauscf5Y+qcEPzKL+EixVouYDgLQK5H9GrLpkg==", "dev": true, - "optional": true, - "os": [ - "win32" - ], "engines": { - "node": ">=12" + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" } }, - "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "node_modules/esbuild": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.17.19.tgz", + "integrity": "sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==", "dev": true, + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, "engines": { - "node": ">=6" + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/android-arm": "0.17.19", + "@esbuild/android-arm64": "0.17.19", + "@esbuild/android-x64": "0.17.19", + "@esbuild/darwin-arm64": "0.17.19", + "@esbuild/darwin-x64": "0.17.19", + "@esbuild/freebsd-arm64": "0.17.19", + "@esbuild/freebsd-x64": "0.17.19", + "@esbuild/linux-arm": "0.17.19", + "@esbuild/linux-arm64": "0.17.19", + "@esbuild/linux-ia32": "0.17.19", + "@esbuild/linux-loong64": "0.17.19", + "@esbuild/linux-mips64el": "0.17.19", + "@esbuild/linux-ppc64": "0.17.19", + "@esbuild/linux-riscv64": "0.17.19", + "@esbuild/linux-s390x": "0.17.19", + "@esbuild/linux-x64": "0.17.19", + "@esbuild/netbsd-x64": "0.17.19", + "@esbuild/openbsd-x64": "0.17.19", + "@esbuild/sunos-x64": "0.17.19", + "@esbuild/win32-arm64": "0.17.19", + "@esbuild/win32-ia32": "0.17.19", + "@esbuild/win32-x64": "0.17.19" } }, "node_modules/escape-html": { @@ -2333,19 +2523,6 @@ } } }, - "node_modules/foreground-child": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-2.0.0.tgz", - "integrity": "sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA==", - "dev": true, - "dependencies": { - "cross-spawn": "^7.0.0", - "signal-exit": "^3.0.2" - }, - "engines": { - "node": ">=8.0.0" - } - }, "node_modules/form-data": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", @@ -2379,27 +2556,12 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, - "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true - }, "node_modules/functional-red-black-tree": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", "integrity": "sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g==", "dev": true }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true, - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, "node_modules/get-func-name": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz", @@ -2482,18 +2644,6 @@ "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", "dev": true }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dev": true, - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", @@ -2628,18 +2778,6 @@ "node": ">=8" } }, - "node_modules/is-core-module": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.10.0.tgz", - "integrity": "sha512-Erxj2n/LDAZ7H8WNJXd9tw38GYM3dv8rk8Zcs+jJuxYTW7sozH+SS8NtrSjVL1/vpLvWi1hxy96IzjJ3EHTJJg==", - "dev": true, - "dependencies": { - "has": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -2649,15 +2787,6 @@ "node": ">=0.10.0" } }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, - "engines": { - "node": ">=8" - } - }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -2714,6 +2843,20 @@ "node": ">=8" } }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/istanbul-reports": { "version": "3.1.5", "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.5.tgz", @@ -2727,6 +2870,15 @@ "node": ">=8" } }, + "node_modules/js-string-escape": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/js-string-escape/-/js-string-escape-1.0.1.tgz", + "integrity": "sha512-Smw4xcfIQ5LVjAOuJCvN/zIodzA/BBSsluuoSykP+lUvScIi4U6RJLfwHet5cxFnCswUjISV8oAXaqaJDY3chg==", + "dev": true, + "engines": { + "node": ">= 0.8" + } + }, "node_modules/js-yaml": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", @@ -2797,6 +2949,12 @@ "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", "dev": true }, + "node_modules/jsonc-parser": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.0.tgz", + "integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==", + "dev": true + }, "node_modules/levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -2811,9 +2969,9 @@ } }, "node_modules/local-pkg": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.4.2.tgz", - "integrity": "sha512-mlERgSPrbxU3BP4qBqAvvwlgW4MTg78iwJdGGnv7kibKjWcJksrG3t6LB5lXI93wXRDvG4NpUgJFmTG4T6rdrg==", + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.4.3.tgz", + "integrity": "sha512-SFppqq5p42fe2qcZQqqEOiVRXl+WCP1MdT6k7BDEW1j++sp5fIY+/fdRQitvKgB5BrBcmrs5m/L0v2FrU5MY1g==", "dev": true, "engines": { "node": ">=14" @@ -2864,9 +3022,9 @@ "dev": true }, "node_modules/loupe": { - "version": "2.3.4", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.4.tgz", - "integrity": "sha512-OvKfgCC2Ndby6aSTREl5aCCPTNIzlDfQZvZxNUrBrihDhL3xcrYegTblhmEiCrg2kKQz4XsFIaemE5BF4ybSaQ==", + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.6.tgz", + "integrity": "sha512-RaPMZKiMy8/JruncMU5Bt6na1eftNoo++R4Y+N2FrxkDVTrGvcyzFTsaGif4QTeKESheMGegbhw6iUAq+5A8zA==", "dev": true, "dependencies": { "get-func-name": "^2.0.0" @@ -2884,6 +3042,18 @@ "node": ">=10" } }, + "node_modules/magic-string": { + "version": "0.30.0", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.0.tgz", + "integrity": "sha512-LA+31JYDJLs82r2ScLrlz1GjSgu66ZV518eyWT+S8VhyQn/JL0u9MeBOvQMGYiPk1DBiSN9DDMOcXvigJZaViQ==", + "dev": true, + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.13" + }, + "engines": { + "node": ">=12" + } + }, "node_modules/make-dir": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", @@ -2908,6 +3078,18 @@ "semver": "bin/semver.js" } }, + "node_modules/md5-hex": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/md5-hex/-/md5-hex-3.0.1.tgz", + "integrity": "sha512-BUiRtTtV39LIJwinWBjqVsU9xhdnz7/i889V859IBFpuqGAj6LuOvHv5XLbgZ2R7ptJoJaEcxkv88/h25T7Ciw==", + "dev": true, + "dependencies": { + "blueimp-md5": "^2.10.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/memoize-one": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/memoize-one/-/memoize-one-6.0.0.tgz", @@ -2966,6 +3148,18 @@ "node": "*" } }, + "node_modules/mlly": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.3.0.tgz", + "integrity": "sha512-HT5mcgIQKkOrZecOjOX3DJorTikWXwsBfpcr/MGBkhfWcjiqvnaL/9ppxvIUXfjT6xt4DVIAsN9fMUz1ev4bIw==", + "dev": true, + "dependencies": { + "acorn": "^8.8.2", + "pathe": "^1.1.0", + "pkg-types": "^1.0.3", + "ufo": "^1.1.2" + } + }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", @@ -2973,9 +3167,15 @@ "dev": true }, "node_modules/nanoid": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz", - "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==", + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -3128,12 +3328,6 @@ "node": ">=8" } }, - "node_modules/path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "dev": true - }, "node_modules/path-type": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", @@ -3143,6 +3337,12 @@ "node": ">=8" } }, + "node_modules/pathe": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.1.tgz", + "integrity": "sha512-d+RQGp0MAYTIaDBIMmOfMwz3E+LOZnxx1HZd5R18mmCZY0QBlK0LDZfPc8FW8Ed2DlvsuE6PRjroDY+wg4+j/Q==", + "dev": true + }, "node_modules/pathval": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", @@ -3232,10 +3432,21 @@ } } }, + "node_modules/pkg-types": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.0.3.tgz", + "integrity": "sha512-nN7pYi0AQqJnoLPC9eHFQ8AcyaixBUOwvqc5TDnIKCMEE6I0y8P7OKA7fPexsXGCGxQDl/cmrLAp26LhcwxZ4A==", + "dev": true, + "dependencies": { + "jsonc-parser": "^3.2.0", + "mlly": "^1.2.0", + "pathe": "^1.1.0" + } + }, "node_modules/postcss": { - "version": "8.4.16", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.16.tgz", - "integrity": "sha512-ipHE1XBvKzm5xI7hiHCZJCSugxvsdq2mPnsq5+UF+VHCjiBvtDrlxJfMBToWaP9D5XlgNmcFGqoHmUn0EYEaRQ==", + "version": "8.4.24", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.24.tgz", + "integrity": "sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==", "funding": [ { "type": "opencollective", @@ -3244,10 +3455,14 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" }, @@ -3304,6 +3519,32 @@ "node": ">=6.0.0" } }, + "node_modules/pretty-format": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", + "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^17.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, "node_modules/psl": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", @@ -3319,6 +3560,12 @@ "node": ">=6" } }, + "node_modules/querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", + "dev": true + }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -3339,6 +3586,12 @@ } ] }, + "node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true + }, "node_modules/readdirp": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", @@ -3351,44 +3604,29 @@ "node": ">=8.10.0" } }, + "node_modules/regenerator-runtime": { + "version": "0.13.11", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", + "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" + }, "node_modules/regexpp": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz", "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==", - "dev": true, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/mysticatea" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/resolve": { - "version": "1.22.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", - "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", - "dev": true, - "dependencies": { - "is-core-module": "^2.9.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" + "dev": true, + "engines": { + "node": ">=8" }, "funding": { - "url": "https://github.com/sponsors/ljharb" + "url": "https://github.com/sponsors/mysticatea" } }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true + }, "node_modules/resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", @@ -3424,15 +3662,16 @@ } }, "node_modules/rollup": { - "version": "2.77.3", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-2.77.3.tgz", - "integrity": "sha512-/qxNTG7FbmefJWoeeYJFbHehJ2HNWnjkAFRKzWN/45eNBBF/r8lo992CwcJXEzyVxs5FmfId+vTSTQDb+bxA+g==", + "version": "3.24.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.24.0.tgz", + "integrity": "sha512-OgraHOIg2YpHQTjl0/ymWfFNBEyPucB7lmhXrQUh38qNOegxLapSPFs9sNr0qKR75awW41D93XafoR2QfhBdUQ==", "dev": true, "bin": { "rollup": "dist/bin/rollup" }, "engines": { - "node": ">=10.0.0" + "node": ">=14.18.0", + "npm": ">=8.0.0" }, "optionalDependencies": { "fsevents": "~2.3.2" @@ -3538,10 +3777,10 @@ "node": ">=8" } }, - "node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", "dev": true }, "node_modules/slash": { @@ -3574,19 +3813,17 @@ "resolved": "https://registry.npmjs.org/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz", "integrity": "sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==" }, - "node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true + }, + "node_modules/std-env": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.3.3.tgz", + "integrity": "sha512-Rz6yejtVyWnVjC1RFvNmYL10kgjC49EOghxWn0RFqlCHGFpQx+Xe7yW3I4ceK1SGrWIGMjD5Kbue8W/udkbMJg==", + "dev": true }, "node_modules/strip-ansi": { "version": "6.0.1", @@ -3612,6 +3849,18 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/strip-literal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-1.0.1.tgz", + "integrity": "sha512-QZTsipNpa2Ppr6v1AmJHESqJ3Uz247MUS0OjrnnZjFAvEoWqxuyFuXn2xLgMtRnijJShAa1HL0gtJyUs7u7n3Q==", + "dev": true, + "dependencies": { + "acorn": "^8.8.2" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, "node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", @@ -3624,17 +3873,10 @@ "node": ">=8" } }, - "node_modules/supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "dev": true, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } + "node_modules/swagger-ui-dist": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/swagger-ui-dist/-/swagger-ui-dist-5.6.2.tgz", + "integrity": "sha512-2LKVuU2m6RHkemJloKiKJOTpN2RPmbsiad0OfSdtmFHOXJKAgYRZMwJcpT96RX6E9HUB5RkVOFC6vWqVjRgSOg==" }, "node_modules/symbol-tree": { "version": "3.2.4", @@ -3662,19 +3904,34 @@ "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", "dev": true }, + "node_modules/time-zone": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/time-zone/-/time-zone-1.0.0.tgz", + "integrity": "sha512-TIsDdtKo6+XrPtiTm1ssmMngN1sAhyKnTO2kunQWqNPWIVvCm15Wmw4SWInwTVgJ5u/Tr04+8Ei9TNcw4x4ONA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/tinybench": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.5.0.tgz", + "integrity": "sha512-kRwSG8Zx4tjF9ZiyH4bhaebu+EDz1BOx9hOigYHlUW4xxI/wKIUQUqo018UlU4ar6ATPBsaMrdbKZ+tmPdohFA==", + "dev": true + }, "node_modules/tinypool": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.2.4.tgz", - "integrity": "sha512-Vs3rhkUH6Qq1t5bqtb816oT+HeJTXfwt2cbPH17sWHIYKTotQIFPk3tf2fgqRrVyMDVOc1EnPgzIxfIulXVzwQ==", + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.5.0.tgz", + "integrity": "sha512-paHQtnrlS1QZYKF/GnLoOM/DN9fqaGOFbCbxzAhwniySnzl9Ebk8w73/dd34DAhe/obUbPAOldTyYXQZxnPBPQ==", "dev": true, "engines": { "node": ">=14.0.0" } }, "node_modules/tinyspy": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-1.0.0.tgz", - "integrity": "sha512-FI5B2QdODQYDRjfuLF+OrJ8bjWRMCXokQPcwKm0W3IzcbUmBNv536cQc7eXGoAuXphZwgx1DFbqImwzz08Fnhw==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-2.1.1.tgz", + "integrity": "sha512-XPJL2uSzcOyBMky6OFrusqWlzfFrXtE0hPuMgW8A2HmaqrPo4ZQHRN/V0QXN3FSjKxpsbRrFc5LI7KOwBsT1/w==", "dev": true, "engines": { "node": ">=14.0.0" @@ -3693,14 +3950,15 @@ } }, "node_modules/tough-cookie": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.0.0.tgz", - "integrity": "sha512-tHdtEpQCMrc1YLrMaqXXcj6AxhYi/xgit6mZu1+EDWUn+qhUf8wMQoFIy9NXuq23zAwtcB0t/MjACGR18pcRbg==", + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz", + "integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==", "dev": true, "dependencies": { "psl": "^1.1.33", "punycode": "^2.1.1", - "universalify": "^0.1.2" + "universalify": "^0.2.0", + "url-parse": "^1.5.3" }, "engines": { "node": ">=6" @@ -3785,10 +4043,16 @@ "node": ">=4.2.0" } }, + "node_modules/ufo": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.1.2.tgz", + "integrity": "sha512-TrY6DsjTQQgyS3E3dBaOXf0TpPD8u9FVrVYmKVegJuFw51n/YB9XPt+U6ydzFG5ZIN7+DIjPbNmXoBj9esYhgQ==", + "dev": true + }, "node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", + "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", "dev": true, "engines": { "node": ">= 4.0.0" @@ -3803,6 +4067,16 @@ "punycode": "^2.1.0" } }, + "node_modules/url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dev": true, + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", @@ -3816,9 +4090,9 @@ "dev": true }, "node_modules/v8-to-istanbul": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.0.1.tgz", - "integrity": "sha512-74Y4LqY74kLE6IFyIjPtkSTWzUZmj8tdHT9Ii/26dvQ6K9Dl2NbEfj0XgU2sHCtKgt5VupqhlO/5aWuqS+IY1w==", + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.1.0.tgz", + "integrity": "sha512-6z3GW9x8G1gd+JIIgQQQxXuiJtCXeAjp6RaPEPLv62mH3iPHPxV6W3robxtCzNErRo6ZwTmzWhsbNvjyEBKzKA==", "dev": true, "dependencies": { "@jridgewell/trace-mapping": "^0.3.12", @@ -3830,15 +4104,14 @@ } }, "node_modules/vite": { - "version": "3.0.8", - "resolved": "https://registry.npmjs.org/vite/-/vite-3.0.8.tgz", - "integrity": "sha512-AOZ4eN7mrkJiOLuw8IA7piS4IdOQyQCA81GxGsAQvAZzMRi9ZwGB3TOaYsj4uLAWK46T5L4AfQ6InNGlxX30IQ==", + "version": "4.3.9", + "resolved": "https://registry.npmjs.org/vite/-/vite-4.3.9.tgz", + "integrity": "sha512-qsTNZjO9NoJNW7KnOrgYwczm0WctJ8m/yqYAMAK9Lxt4SoySUfS5S8ia9K7JHpa3KEeMfyF8LoJ3c5NeBJy6pg==", "dev": true, "dependencies": { - "esbuild": "^0.14.47", - "postcss": "^8.4.16", - "resolve": "^1.22.1", - "rollup": ">=2.75.6 <2.77.0 || ~2.77.0" + "esbuild": "^0.17.5", + "postcss": "^8.4.23", + "rollup": "^3.21.0" }, "bin": { "vite": "bin/vite.js" @@ -3850,12 +4123,17 @@ "fsevents": "~2.3.2" }, "peerDependencies": { + "@types/node": ">= 14", "less": "*", "sass": "*", "stylus": "*", + "sugarss": "*", "terser": "^5.4.0" }, "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, "less": { "optional": true }, @@ -3865,42 +4143,87 @@ "stylus": { "optional": true }, + "sugarss": { + "optional": true + }, "terser": { "optional": true } } }, + "node_modules/vite-node": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-0.32.0.tgz", + "integrity": "sha512-220P/y8YacYAU+daOAqiGEFXx2A8AwjadDzQqos6wSukjvvTWNqleJSwoUn0ckyNdjHIKoxn93Nh1vWBqEKr3Q==", + "dev": true, + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.3.4", + "mlly": "^1.2.0", + "pathe": "^1.1.0", + "picocolors": "^1.0.0", + "vite": "^3.0.0 || ^4.0.0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": ">=v14.18.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, "node_modules/vitest": { - "version": "0.22.0", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-0.22.0.tgz", - "integrity": "sha512-BSIro/QOHLaQY08FHwT6THWhqLQ+VPU+N4Rdo4pcP+16XB6oLmNNAXGcSh/MOLUhfUy+mqCwx7AyKmU7Ms5R+g==", + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-0.32.0.tgz", + "integrity": "sha512-SW83o629gCqnV3BqBnTxhB10DAwzwEx3z+rqYZESehUB+eWsJxwcBQx7CKy0otuGMJTYh7qCVuUX23HkftGl/Q==", "dev": true, "dependencies": { - "@types/chai": "^4.3.3", + "@types/chai": "^4.3.5", "@types/chai-subset": "^1.3.3", "@types/node": "*", - "chai": "^4.3.6", + "@vitest/expect": "0.32.0", + "@vitest/runner": "0.32.0", + "@vitest/snapshot": "0.32.0", + "@vitest/spy": "0.32.0", + "@vitest/utils": "0.32.0", + "acorn": "^8.8.2", + "acorn-walk": "^8.2.0", + "cac": "^6.7.14", + "chai": "^4.3.7", + "concordance": "^5.0.4", "debug": "^4.3.4", - "local-pkg": "^0.4.2", - "tinypool": "^0.2.4", - "tinyspy": "^1.0.0", - "vite": "^2.9.12 || ^3.0.0-0" + "local-pkg": "^0.4.3", + "magic-string": "^0.30.0", + "pathe": "^1.1.0", + "picocolors": "^1.0.0", + "std-env": "^3.3.2", + "strip-literal": "^1.0.1", + "tinybench": "^2.5.0", + "tinypool": "^0.5.0", + "vite": "^3.0.0 || ^4.0.0", + "vite-node": "0.32.0", + "why-is-node-running": "^2.2.2" }, "bin": { "vitest": "vitest.mjs" }, "engines": { - "node": ">=v14.16.0" + "node": ">=v14.18.0" }, "funding": { - "url": "https://github.com/sponsors/antfu" + "url": "https://opencollective.com/vitest" }, "peerDependencies": { "@edge-runtime/vm": "*", "@vitest/browser": "*", "@vitest/ui": "*", "happy-dom": "*", - "jsdom": "*" + "jsdom": "*", + "playwright": "*", + "safaridriver": "*", + "webdriverio": "*" }, "peerDependenciesMeta": { "@edge-runtime/vm": { @@ -3917,9 +4240,27 @@ }, "jsdom": { "optional": true + }, + "playwright": { + "optional": true + }, + "safaridriver": { + "optional": true + }, + "webdriverio": { + "optional": true } } }, + "node_modules/vitest/node_modules/acorn-walk": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", + "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "dev": true, + "engines": { + "node": ">=0.4.0" + } + }, "node_modules/vue": { "version": "3.2.37", "resolved": "https://registry.npmjs.org/vue/-/vue-3.2.37.tgz", @@ -4054,6 +4395,15 @@ "node": ">=12" } }, + "node_modules/well-known-symbols": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/well-known-symbols/-/well-known-symbols-2.0.0.tgz", + "integrity": "sha512-ZMjC3ho+KXo0BfJb7JgtQ5IBuvnShdlACNkKkdsqBmYw3bPAaJfPeYUo6tLUaT5tG/Gkh7xkpBhKRQ9e7pyg9Q==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/whatwg-encoding": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz", @@ -4103,30 +4453,29 @@ "node": ">= 8" } }, - "node_modules/word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "node_modules/why-is-node-running": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.2.2.tgz", + "integrity": "sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==", "dev": true, + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, "engines": { - "node": ">=0.10.0" + "node": ">=8" } }, - "node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "node_modules/word-wrap": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz", + "integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==", "dev": true, - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + "node": ">=0.10.0" } }, "node_modules/wrappy": { @@ -4171,48 +4520,12 @@ "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", "dev": true }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "dev": true, - "engines": { - "node": ">=10" - } - }, "node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", "dev": true }, - "node_modules/yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "dev": true, - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", - "dev": true, - "engines": { - "node": ">=10" - } - }, "node_modules/yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", @@ -4227,11 +4540,29 @@ } }, "dependencies": { + "@ampproject/remapping": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "dev": true, + "requires": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + } + }, "@babel/parser": { "version": "7.18.11", "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.18.11.tgz", "integrity": "sha512-9JKn5vN+hDt0Hdqn1PiJ2guflwP+B6Ga8qbDuoF0PzzVhrzsKIJo8yGqVk6CmMHiMei9w1C1Bp9IMJSIK+HPIQ==" }, + "@babel/runtime": { + "version": "7.22.3", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.3.tgz", + "integrity": "sha512-XsDuspWKLUsxwCp6r7EhsExHtYfbe5oAGQ19kqngTdCPUoPQzOPdUbD/pB9PJiwb2ptYKQDjSJT3R6dC+EPqfQ==", + "requires": { + "regenerator-runtime": "^0.13.11" + } + }, "@bcoe/v8-coverage": { "version": "0.2.3", "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", @@ -4249,10 +4580,157 @@ "integrity": "sha512-okdrwiVeKBmW41Hkl0eMrXDjzJwhQMuKiBOu17rOszqM+LS/yBYpNQNV5Jvoh06Wc+89fMmb/uhzf8NZuDuUaQ==", "requires": {} }, + "@esbuild/android-arm": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.17.19.tgz", + "integrity": "sha512-rIKddzqhmav7MSmoFCmDIb6e2W57geRsM94gV2l38fzhXMwq7hZoClug9USI2pFRGL06f4IOPHHpFNOkWieR8A==", + "dev": true, + "optional": true + }, + "@esbuild/android-arm64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.17.19.tgz", + "integrity": "sha512-KBMWvEZooR7+kzY0BtbTQn0OAYY7CsiydT63pVEaPtVYF0hXbUaOyZog37DKxK7NF3XacBJOpYT4adIJh+avxA==", + "dev": true, + "optional": true + }, + "@esbuild/android-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.17.19.tgz", + "integrity": "sha512-uUTTc4xGNDT7YSArp/zbtmbhO0uEEK9/ETW29Wk1thYUJBz3IVnvgEiEwEa9IeLyvnpKrWK64Utw2bgUmDveww==", + "dev": true, + "optional": true + }, + "@esbuild/darwin-arm64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.17.19.tgz", + "integrity": "sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==", + "dev": true, + "optional": true + }, + "@esbuild/darwin-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.17.19.tgz", + "integrity": "sha512-IJM4JJsLhRYr9xdtLytPLSH9k/oxR3boaUIYiHkAawtwNOXKE8KoU8tMvryogdcT8AU+Bflmh81Xn6Q0vTZbQw==", + "dev": true, + "optional": true + }, + "@esbuild/freebsd-arm64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.17.19.tgz", + "integrity": "sha512-pBwbc7DufluUeGdjSU5Si+P3SoMF5DQ/F/UmTSb8HXO80ZEAJmrykPyzo1IfNbAoaqw48YRpv8shwd1NoI0jcQ==", + "dev": true, + "optional": true + }, + "@esbuild/freebsd-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.17.19.tgz", + "integrity": "sha512-4lu+n8Wk0XlajEhbEffdy2xy53dpR06SlzvhGByyg36qJw6Kpfk7cp45DR/62aPH9mtJRmIyrXAS5UWBrJT6TQ==", + "dev": true, + "optional": true + }, + "@esbuild/linux-arm": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.17.19.tgz", + "integrity": "sha512-cdmT3KxjlOQ/gZ2cjfrQOtmhG4HJs6hhvm3mWSRDPtZ/lP5oe8FWceS10JaSJC13GBd4eH/haHnqf7hhGNLerA==", + "dev": true, + "optional": true + }, + "@esbuild/linux-arm64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.17.19.tgz", + "integrity": "sha512-ct1Tg3WGwd3P+oZYqic+YZF4snNl2bsnMKRkb3ozHmnM0dGWuxcPTTntAF6bOP0Sp4x0PjSF+4uHQ1xvxfRKqg==", + "dev": true, + "optional": true + }, + "@esbuild/linux-ia32": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.17.19.tgz", + "integrity": "sha512-w4IRhSy1VbsNxHRQpeGCHEmibqdTUx61Vc38APcsRbuVgK0OPEnQ0YD39Brymn96mOx48Y2laBQGqgZ0j9w6SQ==", + "dev": true, + "optional": true + }, "@esbuild/linux-loong64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.14.54.tgz", - "integrity": "sha512-bZBrLAIX1kpWelV0XemxBZllyRmM6vgFQQG2GdNb+r3Fkp0FOh1NJSvekXDs7jq70k4euu1cryLMfU+mTXlEpw==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.17.19.tgz", + "integrity": "sha512-2iAngUbBPMq439a+z//gE+9WBldoMp1s5GWsUSgqHLzLJ9WoZLZhpwWuym0u0u/4XmZ3gpHmzV84PonE+9IIdQ==", + "dev": true, + "optional": true + }, + "@esbuild/linux-mips64el": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.17.19.tgz", + "integrity": "sha512-LKJltc4LVdMKHsrFe4MGNPp0hqDFA1Wpt3jE1gEyM3nKUvOiO//9PheZZHfYRfYl6AwdTH4aTcXSqBerX0ml4A==", + "dev": true, + "optional": true + }, + "@esbuild/linux-ppc64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.17.19.tgz", + "integrity": "sha512-/c/DGybs95WXNS8y3Ti/ytqETiW7EU44MEKuCAcpPto3YjQbyK3IQVKfF6nbghD7EcLUGl0NbiL5Rt5DMhn5tg==", + "dev": true, + "optional": true + }, + "@esbuild/linux-riscv64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.17.19.tgz", + "integrity": "sha512-FC3nUAWhvFoutlhAkgHf8f5HwFWUL6bYdvLc/TTuxKlvLi3+pPzdZiFKSWz/PF30TB1K19SuCxDTI5KcqASJqA==", + "dev": true, + "optional": true + }, + "@esbuild/linux-s390x": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.17.19.tgz", + "integrity": "sha512-IbFsFbxMWLuKEbH+7sTkKzL6NJmG2vRyy6K7JJo55w+8xDk7RElYn6xvXtDW8HCfoKBFK69f3pgBJSUSQPr+4Q==", + "dev": true, + "optional": true + }, + "@esbuild/linux-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.17.19.tgz", + "integrity": "sha512-68ngA9lg2H6zkZcyp22tsVt38mlhWde8l3eJLWkyLrp4HwMUr3c1s/M2t7+kHIhvMjglIBrFpncX1SzMckomGw==", + "dev": true, + "optional": true + }, + "@esbuild/netbsd-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.17.19.tgz", + "integrity": "sha512-CwFq42rXCR8TYIjIfpXCbRX0rp1jo6cPIUPSaWwzbVI4aOfX96OXY8M6KNmtPcg7QjYeDmN+DD0Wp3LaBOLf4Q==", + "dev": true, + "optional": true + }, + "@esbuild/openbsd-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.17.19.tgz", + "integrity": "sha512-cnq5brJYrSZ2CF6c35eCmviIN3k3RczmHz8eYaVlNasVqsNY+JKohZU5MKmaOI+KkllCdzOKKdPs762VCPC20g==", + "dev": true, + "optional": true + }, + "@esbuild/sunos-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.17.19.tgz", + "integrity": "sha512-vCRT7yP3zX+bKWFeP/zdS6SqdWB8OIpaRq/mbXQxTGHnIxspRtigpkUcDMlSCOejlHowLqII7K2JKevwyRP2rg==", + "dev": true, + "optional": true + }, + "@esbuild/win32-arm64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.17.19.tgz", + "integrity": "sha512-yYx+8jwowUstVdorcMdNlzklLYhPxjniHWFKgRqH7IFlUEa0Umu3KuYplf1HUZZ422e3NU9F4LGb+4O0Kdcaag==", + "dev": true, + "optional": true + }, + "@esbuild/win32-ia32": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.17.19.tgz", + "integrity": "sha512-eggDKanJszUtCdlVs0RB+h35wNlb5v4TWEkq4vZcmVt5u/HiDZrTXe2bWFQUez3RgNHwx/x4sk5++4NSSicKkw==", + "dev": true, + "optional": true + }, + "@esbuild/win32-x64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.17.19.tgz", + "integrity": "sha512-lAhycmKnVOuRYNtRtatQR1LPQf2oYCkRGkSFnseDAKPl8lu5SOsK/e1sXe5a0Pc5kHIHe6P2I/ilntNv2xf3cA==", "dev": true, "optional": true }, @@ -4372,12 +4850,29 @@ "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", "dev": true }, + "@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "dev": true, + "requires": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + } + }, "@jridgewell/resolve-uri": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", "dev": true }, + "@jridgewell/set-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", + "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "dev": true + }, "@jridgewell/sourcemap-codec": { "version": "1.4.14", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", @@ -4432,9 +4927,9 @@ "dev": true }, "@types/chai": { - "version": "4.3.3", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.3.3.tgz", - "integrity": "sha512-hC7OMnszpxhZPduX+m+nrx+uFoLkWOMiR4oa/AZF3MuSETYTZmFfJAHqZEM8MVlvfG7BEUcgvtwoCTxBp6hm3g==", + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.3.5.tgz", + "integrity": "sha512-mEo1sAde+UCE6b2hxn332f1g1E8WfYRu6p5SvTKr2ZKC1f7gFJXk4h5PyGP9Dt6gCaG8y8XhwnXWC6Iy2cmBng==", "dev": true }, "@types/chai-subset": { @@ -4477,6 +4972,12 @@ "integrity": "sha512-EdxgKRXgYsNITy5mjjXjVE/CS8YENSdhiagGrLqjG0pvA2owgJ6i4l7wy/PFZGC0B1/H20lWKN7ONVDNYDZm7A==", "dev": true }, + "@types/swagger-ui-dist": { + "version": "3.30.1", + "resolved": "https://registry.npmjs.org/@types/swagger-ui-dist/-/swagger-ui-dist-3.30.1.tgz", + "integrity": "sha512-wWojDensMF33dSrhak4iWSPOsWBbvf+rSJ6VWQ7ohQbGdKAiT2IwUexrDZkMvf3+vVAPVnNFDRDtxADFszbh+Q==", + "dev": true + }, "@types/web-bluetooth": { "version": "0.0.14", "resolved": "https://registry.npmjs.org/@types/web-bluetooth/-/web-bluetooth-0.0.14.tgz", @@ -4553,45 +5054,125 @@ "tsutils": "^3.21.0" } }, - "@typescript-eslint/utils": { - "version": "5.33.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.33.1.tgz", - "integrity": "sha512-uphZjkMaZ4fE8CR4dU7BquOV6u0doeQAr8n6cQenl/poMaIyJtBu8eys5uk6u5HiDH01Mj5lzbJ5SfeDz7oqMQ==", + "@typescript-eslint/utils": { + "version": "5.33.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.33.1.tgz", + "integrity": "sha512-uphZjkMaZ4fE8CR4dU7BquOV6u0doeQAr8n6cQenl/poMaIyJtBu8eys5uk6u5HiDH01Mj5lzbJ5SfeDz7oqMQ==", + "dev": true, + "requires": { + "@types/json-schema": "^7.0.9", + "@typescript-eslint/scope-manager": "5.33.1", + "@typescript-eslint/types": "5.33.1", + "@typescript-eslint/typescript-estree": "5.33.1", + "eslint-scope": "^5.1.1", + "eslint-utils": "^3.0.0" + } + }, + "@typescript-eslint/visitor-keys": { + "version": "5.33.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.33.1.tgz", + "integrity": "sha512-nwIxOK8Z2MPWltLKMLOEZwmfBZReqUdbEoHQXeCpa+sRVARe5twpJGHCB4dk9903Yaf0nMAlGbQfaAH92F60eg==", + "dev": true, + "requires": { + "@typescript-eslint/types": "5.33.1", + "eslint-visitor-keys": "^3.3.0" + } + }, + "@vitejs/plugin-vue": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-4.2.3.tgz", + "integrity": "sha512-R6JDUfiZbJA9cMiguQ7jxALsgiprjBeHL5ikpXfJCH62pPHtI+JdJ5xWj6Ev73yXSlYl86+blXn1kZHQ7uElxw==", + "dev": true, + "requires": {} + }, + "@vitest/coverage-v8": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-0.32.0.tgz", + "integrity": "sha512-VXXlWq9X/NbsoP/l/CHLBjutsFFww1UY1qEhzGjn/DY7Tqe+z0Nu8XKc8im/XUAmjiWsh2XV7sy/F0IKAl4eaw==", + "dev": true, + "requires": { + "@ampproject/remapping": "^2.2.1", + "@bcoe/v8-coverage": "^0.2.3", + "istanbul-lib-coverage": "^3.2.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.1", + "istanbul-reports": "^3.1.5", + "magic-string": "^0.30.0", + "picocolors": "^1.0.0", + "std-env": "^3.3.2", + "test-exclude": "^6.0.0", + "v8-to-istanbul": "^9.1.0" + } + }, + "@vitest/expect": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-0.32.0.tgz", + "integrity": "sha512-VxVHhIxKw9Lux+O9bwLEEk2gzOUe93xuFHy9SzYWnnoYZFYg1NfBtnfnYWiJN7yooJ7KNElCK5YtA7DTZvtXtg==", + "dev": true, + "requires": { + "@vitest/spy": "0.32.0", + "@vitest/utils": "0.32.0", + "chai": "^4.3.7" + } + }, + "@vitest/runner": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-0.32.0.tgz", + "integrity": "sha512-QpCmRxftHkr72xt5A08xTEs9I4iWEXIOCHWhQQguWOKE4QH7DXSKZSOFibuwEIMAD7G0ERvtUyQn7iPWIqSwmw==", + "dev": true, + "requires": { + "@vitest/utils": "0.32.0", + "concordance": "^5.0.4", + "p-limit": "^4.0.0", + "pathe": "^1.1.0" + }, + "dependencies": { + "p-limit": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", + "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", + "dev": true, + "requires": { + "yocto-queue": "^1.0.0" + } + }, + "yocto-queue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "dev": true + } + } + }, + "@vitest/snapshot": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-0.32.0.tgz", + "integrity": "sha512-yCKorPWjEnzpUxQpGlxulujTcSPgkblwGzAUEL+z01FTUg/YuCDZ8dxr9sHA08oO2EwxzHXNLjQKWJ2zc2a19Q==", "dev": true, "requires": { - "@types/json-schema": "^7.0.9", - "@typescript-eslint/scope-manager": "5.33.1", - "@typescript-eslint/types": "5.33.1", - "@typescript-eslint/typescript-estree": "5.33.1", - "eslint-scope": "^5.1.1", - "eslint-utils": "^3.0.0" + "magic-string": "^0.30.0", + "pathe": "^1.1.0", + "pretty-format": "^27.5.1" } }, - "@typescript-eslint/visitor-keys": { - "version": "5.33.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.33.1.tgz", - "integrity": "sha512-nwIxOK8Z2MPWltLKMLOEZwmfBZReqUdbEoHQXeCpa+sRVARe5twpJGHCB4dk9903Yaf0nMAlGbQfaAH92F60eg==", + "@vitest/spy": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-0.32.0.tgz", + "integrity": "sha512-MruAPlM0uyiq3d53BkwTeShXY0rYEfhNGQzVO5GHBmmX3clsxcWp79mMnkOVcV244sNTeDcHbcPFWIjOI4tZvw==", "dev": true, "requires": { - "@typescript-eslint/types": "5.33.1", - "eslint-visitor-keys": "^3.3.0" + "tinyspy": "^2.1.0" } }, - "@vitejs/plugin-vue": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-3.0.3.tgz", - "integrity": "sha512-U4zNBlz9mg+TA+i+5QPc3N5lQvdUXENZLO2h0Wdzp56gI1MWhqJOv+6R+d4kOzoaSSq6TnGPBdZAXKOe4lXy6g==", - "dev": true, - "requires": {} - }, - "@vitest/coverage-c8": { - "version": "0.22.0", - "resolved": "https://registry.npmjs.org/@vitest/coverage-c8/-/coverage-c8-0.22.0.tgz", - "integrity": "sha512-jwW6b8U+h9nbzQfKoRmpf2xjDg+mcAjLIdVUrZGhjTnIdekGfvoqFoeiXzsLv2HwYBeFi4943lYUftuj8qD1FQ==", + "@vitest/utils": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-0.32.0.tgz", + "integrity": "sha512-53yXunzx47MmbuvcOPpLaVljHaeSu1G2dHdmy7+9ngMnQIkBQcvwOcoclWFnxDMxFbnq8exAfh3aKSZaK71J5A==", "dev": true, "requires": { - "c8": "^7.12.0", - "vitest": "0.22.0" + "concordance": "^5.0.4", + "loupe": "^2.3.6", + "pretty-format": "^27.5.1" } }, "@volar/code-gen": { @@ -4826,9 +5407,9 @@ "dev": true }, "acorn": { - "version": "8.8.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.0.tgz", - "integrity": "sha512-QOxyigPVrpZ2GXT+PFyZTl6TtOFc5egxHIP9IlQ+RbupQuX4RkT/Bee4/kQuC02Xkzg84JcT7oLYtDIQxp+v7w==", + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", + "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==", "dev": true }, "acorn-globals": { @@ -4957,6 +5538,12 @@ "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", "dev": true }, + "blueimp-md5": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/blueimp-md5/-/blueimp-md5-2.19.0.tgz", + "integrity": "sha512-DRQrD6gJyy8FbiE4s+bDoXS9hiW3Vbx5uCdwvcCf3zLHL+Iv7LtGHLpr+GZV8rHG8tK766FGYBwRbu8pELTt+w==", + "dev": true + }, "boolbase": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", @@ -4988,25 +5575,11 @@ "integrity": "sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow==", "dev": true }, - "c8": { - "version": "7.12.0", - "resolved": "https://registry.npmjs.org/c8/-/c8-7.12.0.tgz", - "integrity": "sha512-CtgQrHOkyxr5koX1wEUmN/5cfDa2ckbHRA4Gy5LAL0zaCFtVWJS5++n+w4/sr2GWGerBxgTjpKeDclk/Qk6W/A==", - "dev": true, - "requires": { - "@bcoe/v8-coverage": "^0.2.3", - "@istanbuljs/schema": "^0.1.3", - "find-up": "^5.0.0", - "foreground-child": "^2.0.0", - "istanbul-lib-coverage": "^3.2.0", - "istanbul-lib-report": "^3.0.0", - "istanbul-reports": "^3.1.4", - "rimraf": "^3.0.2", - "test-exclude": "^6.0.0", - "v8-to-istanbul": "^9.0.0", - "yargs": "^16.2.0", - "yargs-parser": "^20.2.9" - } + "cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "dev": true }, "callsites": { "version": "3.1.0", @@ -5015,14 +5588,14 @@ "dev": true }, "chai": { - "version": "4.3.6", - "resolved": "https://registry.npmjs.org/chai/-/chai-4.3.6.tgz", - "integrity": "sha512-bbcp3YfHCUzMOvKqsztczerVgBKSsEijCySNlHHbX3VG1nskvqjz5Rfso1gGwD6w6oOV3eI60pKuMOV5MV7p3Q==", + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.3.7.tgz", + "integrity": "sha512-HLnAzZ2iupm25PlN0xFreAlBA5zaBSv3og0DdeGA4Ar6h6rJ3A0rolRUKJhSF2V10GZKDgWF/VmAEsNWjCRB+A==", "dev": true, "requires": { "assertion-error": "^1.1.0", "check-error": "^1.0.2", - "deep-eql": "^3.0.1", + "deep-eql": "^4.1.2", "get-func-name": "^2.0.0", "loupe": "^2.3.1", "pathval": "^1.1.1", @@ -5072,17 +5645,6 @@ } } }, - "cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "dev": true, - "requires": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, "color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", @@ -5112,6 +5674,22 @@ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "dev": true }, + "concordance": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/concordance/-/concordance-5.0.4.tgz", + "integrity": "sha512-OAcsnTEYu1ARJqWVGwf4zh4JDfHZEaSNlNccFmt8YjB2l/n19/PF2viLINHc57vO4FKIAFl2FWASIGZZWZ2Kxw==", + "dev": true, + "requires": { + "date-time": "^3.1.0", + "esutils": "^2.0.3", + "fast-diff": "^1.2.0", + "js-string-escape": "^1.0.1", + "lodash": "^4.17.15", + "md5-hex": "^3.0.1", + "semver": "^7.3.2", + "well-known-symbols": "^2.0.0" + } + }, "convert-source-map": { "version": "1.8.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz", @@ -5177,6 +5755,23 @@ "whatwg-url": "^11.0.0" } }, + "date-fns": { + "version": "2.30.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.30.0.tgz", + "integrity": "sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==", + "requires": { + "@babel/runtime": "^7.21.0" + } + }, + "date-time": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/date-time/-/date-time-3.1.0.tgz", + "integrity": "sha512-uqCUKXE5q1PNBXjPqvwhwJf9SwMoAHBgWJ6DcrnS5o+W2JOiIILl0JEdVD8SGujrNS02GGxgwAg2PN2zONgtjg==", + "dev": true, + "requires": { + "time-zone": "^1.0.0" + } + }, "dayjs": { "version": "1.11.5", "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.5.tgz", @@ -5198,9 +5793,9 @@ "dev": true }, "deep-eql": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", - "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==", + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.3.tgz", + "integrity": "sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==", "dev": true, "requires": { "type-detect": "^4.0.0" @@ -5266,12 +5861,6 @@ "normalize-wheel-es": "^1.2.0" } }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, "entities": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/entities/-/entities-4.3.1.tgz", @@ -5279,179 +5868,34 @@ "dev": true }, "esbuild": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.14.54.tgz", - "integrity": "sha512-Cy9llcy8DvET5uznocPyqL3BFRrFXSVqbgpMJ9Wz8oVjZlh/zUSNbPRbov0VX7VxN2JH1Oa0uNxZ7eLRb62pJA==", - "dev": true, - "requires": { - "@esbuild/linux-loong64": "0.14.54", - "esbuild-android-64": "0.14.54", - "esbuild-android-arm64": "0.14.54", - "esbuild-darwin-64": "0.14.54", - "esbuild-darwin-arm64": "0.14.54", - "esbuild-freebsd-64": "0.14.54", - "esbuild-freebsd-arm64": "0.14.54", - "esbuild-linux-32": "0.14.54", - "esbuild-linux-64": "0.14.54", - "esbuild-linux-arm": "0.14.54", - "esbuild-linux-arm64": "0.14.54", - "esbuild-linux-mips64le": "0.14.54", - "esbuild-linux-ppc64le": "0.14.54", - "esbuild-linux-riscv64": "0.14.54", - "esbuild-linux-s390x": "0.14.54", - "esbuild-netbsd-64": "0.14.54", - "esbuild-openbsd-64": "0.14.54", - "esbuild-sunos-64": "0.14.54", - "esbuild-windows-32": "0.14.54", - "esbuild-windows-64": "0.14.54", - "esbuild-windows-arm64": "0.14.54" - } - }, - "esbuild-android-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-android-64/-/esbuild-android-64-0.14.54.tgz", - "integrity": "sha512-Tz2++Aqqz0rJ7kYBfz+iqyE3QMycD4vk7LBRyWaAVFgFtQ/O8EJOnVmTOiDWYZ/uYzB4kvP+bqejYdVKzE5lAQ==", - "dev": true, - "optional": true - }, - "esbuild-android-arm64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-android-arm64/-/esbuild-android-arm64-0.14.54.tgz", - "integrity": "sha512-F9E+/QDi9sSkLaClO8SOV6etqPd+5DgJje1F9lOWoNncDdOBL2YF59IhsWATSt0TLZbYCf3pNlTHvVV5VfHdvg==", - "dev": true, - "optional": true - }, - "esbuild-darwin-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-darwin-64/-/esbuild-darwin-64-0.14.54.tgz", - "integrity": "sha512-jtdKWV3nBviOd5v4hOpkVmpxsBy90CGzebpbO9beiqUYVMBtSc0AL9zGftFuBon7PNDcdvNCEuQqw2x0wP9yug==", - "dev": true, - "optional": true - }, - "esbuild-darwin-arm64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-darwin-arm64/-/esbuild-darwin-arm64-0.14.54.tgz", - "integrity": "sha512-OPafJHD2oUPyvJMrsCvDGkRrVCar5aVyHfWGQzY1dWnzErjrDuSETxwA2HSsyg2jORLY8yBfzc1MIpUkXlctmw==", - "dev": true, - "optional": true - }, - "esbuild-freebsd-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-freebsd-64/-/esbuild-freebsd-64-0.14.54.tgz", - "integrity": "sha512-OKwd4gmwHqOTp4mOGZKe/XUlbDJ4Q9TjX0hMPIDBUWWu/kwhBAudJdBoxnjNf9ocIB6GN6CPowYpR/hRCbSYAg==", - "dev": true, - "optional": true - }, - "esbuild-freebsd-arm64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-freebsd-arm64/-/esbuild-freebsd-arm64-0.14.54.tgz", - "integrity": "sha512-sFwueGr7OvIFiQT6WeG0jRLjkjdqWWSrfbVwZp8iMP+8UHEHRBvlaxL6IuKNDwAozNUmbb8nIMXa7oAOARGs1Q==", - "dev": true, - "optional": true - }, - "esbuild-linux-32": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-32/-/esbuild-linux-32-0.14.54.tgz", - "integrity": "sha512-1ZuY+JDI//WmklKlBgJnglpUL1owm2OX+8E1syCD6UAxcMM/XoWd76OHSjl/0MR0LisSAXDqgjT3uJqT67O3qw==", - "dev": true, - "optional": true - }, - "esbuild-linux-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-64/-/esbuild-linux-64-0.14.54.tgz", - "integrity": "sha512-EgjAgH5HwTbtNsTqQOXWApBaPVdDn7XcK+/PtJwZLT1UmpLoznPd8c5CxqsH2dQK3j05YsB3L17T8vE7cp4cCg==", - "dev": true, - "optional": true - }, - "esbuild-linux-arm": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-arm/-/esbuild-linux-arm-0.14.54.tgz", - "integrity": "sha512-qqz/SjemQhVMTnvcLGoLOdFpCYbz4v4fUo+TfsWG+1aOu70/80RV6bgNpR2JCrppV2moUQkww+6bWxXRL9YMGw==", - "dev": true, - "optional": true - }, - "esbuild-linux-arm64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-arm64/-/esbuild-linux-arm64-0.14.54.tgz", - "integrity": "sha512-WL71L+0Rwv+Gv/HTmxTEmpv0UgmxYa5ftZILVi2QmZBgX3q7+tDeOQNqGtdXSdsL8TQi1vIaVFHUPDe0O0kdig==", - "dev": true, - "optional": true - }, - "esbuild-linux-mips64le": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-mips64le/-/esbuild-linux-mips64le-0.14.54.tgz", - "integrity": "sha512-qTHGQB8D1etd0u1+sB6p0ikLKRVuCWhYQhAHRPkO+OF3I/iSlTKNNS0Lh2Oc0g0UFGguaFZZiPJdJey3AGpAlw==", - "dev": true, - "optional": true - }, - "esbuild-linux-ppc64le": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-ppc64le/-/esbuild-linux-ppc64le-0.14.54.tgz", - "integrity": "sha512-j3OMlzHiqwZBDPRCDFKcx595XVfOfOnv68Ax3U4UKZ3MTYQB5Yz3X1mn5GnodEVYzhtZgxEBidLWeIs8FDSfrQ==", - "dev": true, - "optional": true - }, - "esbuild-linux-riscv64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-riscv64/-/esbuild-linux-riscv64-0.14.54.tgz", - "integrity": "sha512-y7Vt7Wl9dkOGZjxQZnDAqqn+XOqFD7IMWiewY5SPlNlzMX39ocPQlOaoxvT4FllA5viyV26/QzHtvTjVNOxHZg==", - "dev": true, - "optional": true - }, - "esbuild-linux-s390x": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-linux-s390x/-/esbuild-linux-s390x-0.14.54.tgz", - "integrity": "sha512-zaHpW9dziAsi7lRcyV4r8dhfG1qBidQWUXweUjnw+lliChJqQr+6XD71K41oEIC3Mx1KStovEmlzm+MkGZHnHA==", - "dev": true, - "optional": true - }, - "esbuild-netbsd-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-netbsd-64/-/esbuild-netbsd-64-0.14.54.tgz", - "integrity": "sha512-PR01lmIMnfJTgeU9VJTDY9ZerDWVFIUzAtJuDHwwceppW7cQWjBBqP48NdeRtoP04/AtO9a7w3viI+PIDr6d+w==", - "dev": true, - "optional": true - }, - "esbuild-openbsd-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-openbsd-64/-/esbuild-openbsd-64-0.14.54.tgz", - "integrity": "sha512-Qyk7ikT2o7Wu76UsvvDS5q0amJvmRzDyVlL0qf5VLsLchjCa1+IAvd8kTBgUxD7VBUUVgItLkk609ZHUc1oCaw==", - "dev": true, - "optional": true - }, - "esbuild-sunos-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-sunos-64/-/esbuild-sunos-64-0.14.54.tgz", - "integrity": "sha512-28GZ24KmMSeKi5ueWzMcco6EBHStL3B6ubM7M51RmPwXQGLe0teBGJocmWhgwccA1GeFXqxzILIxXpHbl9Q/Kw==", - "dev": true, - "optional": true - }, - "esbuild-windows-32": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-windows-32/-/esbuild-windows-32-0.14.54.tgz", - "integrity": "sha512-T+rdZW19ql9MjS7pixmZYVObd9G7kcaZo+sETqNH4RCkuuYSuv9AGHUVnPoP9hhuE1WM1ZimHz1CIBHBboLU7w==", - "dev": true, - "optional": true - }, - "esbuild-windows-64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-windows-64/-/esbuild-windows-64-0.14.54.tgz", - "integrity": "sha512-AoHTRBUuYwXtZhjXZbA1pGfTo8cJo3vZIcWGLiUcTNgHpJJMC1rVA44ZereBHMJtotyN71S8Qw0npiCIkW96cQ==", - "dev": true, - "optional": true - }, - "esbuild-windows-arm64": { - "version": "0.14.54", - "resolved": "https://registry.npmjs.org/esbuild-windows-arm64/-/esbuild-windows-arm64-0.14.54.tgz", - "integrity": "sha512-M0kuUvXhot1zOISQGXwWn6YtS+Y/1RT9WrVIOywZnJHo3jCDyewAc79aKNQWFCQm+xNHVTq9h8dZKvygoXQQRg==", - "dev": true, - "optional": true - }, - "escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "dev": true + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.17.19.tgz", + "integrity": "sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==", + "dev": true, + "requires": { + "@esbuild/android-arm": "0.17.19", + "@esbuild/android-arm64": "0.17.19", + "@esbuild/android-x64": "0.17.19", + "@esbuild/darwin-arm64": "0.17.19", + "@esbuild/darwin-x64": "0.17.19", + "@esbuild/freebsd-arm64": "0.17.19", + "@esbuild/freebsd-x64": "0.17.19", + "@esbuild/linux-arm": "0.17.19", + "@esbuild/linux-arm64": "0.17.19", + "@esbuild/linux-ia32": "0.17.19", + "@esbuild/linux-loong64": "0.17.19", + "@esbuild/linux-mips64el": "0.17.19", + "@esbuild/linux-ppc64": "0.17.19", + "@esbuild/linux-riscv64": "0.17.19", + "@esbuild/linux-s390x": "0.17.19", + "@esbuild/linux-x64": "0.17.19", + "@esbuild/netbsd-x64": "0.17.19", + "@esbuild/openbsd-x64": "0.17.19", + "@esbuild/sunos-x64": "0.17.19", + "@esbuild/win32-arm64": "0.17.19", + "@esbuild/win32-ia32": "0.17.19", + "@esbuild/win32-x64": "0.17.19" + } }, "escape-html": { "version": "1.0.3", @@ -5827,16 +6271,6 @@ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz", "integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==" }, - "foreground-child": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-2.0.0.tgz", - "integrity": "sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA==", - "dev": true, - "requires": { - "cross-spawn": "^7.0.0", - "signal-exit": "^3.0.2" - } - }, "form-data": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", @@ -5860,24 +6294,12 @@ "dev": true, "optional": true }, - "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true - }, "functional-red-black-tree": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", "integrity": "sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g==", "dev": true }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true - }, "get-func-name": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz", @@ -5936,15 +6358,6 @@ "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", "dev": true }, - "has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dev": true, - "requires": { - "function-bind": "^1.1.1" - } - }, "has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", @@ -6049,27 +6462,12 @@ "binary-extensions": "^2.0.0" } }, - "is-core-module": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.10.0.tgz", - "integrity": "sha512-Erxj2n/LDAZ7H8WNJXd9tw38GYM3dv8rk8Zcs+jJuxYTW7sozH+SS8NtrSjVL1/vpLvWi1hxy96IzjJ3EHTJJg==", - "dev": true, - "requires": { - "has": "^1.0.3" - } - }, "is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "dev": true }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true - }, "is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -6114,6 +6512,17 @@ "supports-color": "^7.1.0" } }, + "istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "requires": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + } + }, "istanbul-reports": { "version": "3.1.5", "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.5.tgz", @@ -6124,6 +6533,12 @@ "istanbul-lib-report": "^3.0.0" } }, + "js-string-escape": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/js-string-escape/-/js-string-escape-1.0.1.tgz", + "integrity": "sha512-Smw4xcfIQ5LVjAOuJCvN/zIodzA/BBSsluuoSykP+lUvScIi4U6RJLfwHet5cxFnCswUjISV8oAXaqaJDY3chg==", + "dev": true + }, "js-yaml": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", @@ -6180,6 +6595,12 @@ "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", "dev": true }, + "jsonc-parser": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.0.tgz", + "integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==", + "dev": true + }, "levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -6191,9 +6612,9 @@ } }, "local-pkg": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.4.2.tgz", - "integrity": "sha512-mlERgSPrbxU3BP4qBqAvvwlgW4MTg78iwJdGGnv7kibKjWcJksrG3t6LB5lXI93wXRDvG4NpUgJFmTG4T6rdrg==", + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.4.3.tgz", + "integrity": "sha512-SFppqq5p42fe2qcZQqqEOiVRXl+WCP1MdT6k7BDEW1j++sp5fIY+/fdRQitvKgB5BrBcmrs5m/L0v2FrU5MY1g==", "dev": true }, "locate-path": { @@ -6228,9 +6649,9 @@ "dev": true }, "loupe": { - "version": "2.3.4", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.4.tgz", - "integrity": "sha512-OvKfgCC2Ndby6aSTREl5aCCPTNIzlDfQZvZxNUrBrihDhL3xcrYegTblhmEiCrg2kKQz4XsFIaemE5BF4ybSaQ==", + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.6.tgz", + "integrity": "sha512-RaPMZKiMy8/JruncMU5Bt6na1eftNoo++R4Y+N2FrxkDVTrGvcyzFTsaGif4QTeKESheMGegbhw6iUAq+5A8zA==", "dev": true, "requires": { "get-func-name": "^2.0.0" @@ -6245,6 +6666,15 @@ "yallist": "^4.0.0" } }, + "magic-string": { + "version": "0.30.0", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.0.tgz", + "integrity": "sha512-LA+31JYDJLs82r2ScLrlz1GjSgu66ZV518eyWT+S8VhyQn/JL0u9MeBOvQMGYiPk1DBiSN9DDMOcXvigJZaViQ==", + "dev": true, + "requires": { + "@jridgewell/sourcemap-codec": "^1.4.13" + } + }, "make-dir": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", @@ -6262,6 +6692,15 @@ } } }, + "md5-hex": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/md5-hex/-/md5-hex-3.0.1.tgz", + "integrity": "sha512-BUiRtTtV39LIJwinWBjqVsU9xhdnz7/i889V859IBFpuqGAj6LuOvHv5XLbgZ2R7ptJoJaEcxkv88/h25T7Ciw==", + "dev": true, + "requires": { + "blueimp-md5": "^2.10.0" + } + }, "memoize-one": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/memoize-one/-/memoize-one-6.0.0.tgz", @@ -6305,6 +6744,18 @@ "brace-expansion": "^1.1.7" } }, + "mlly": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.3.0.tgz", + "integrity": "sha512-HT5mcgIQKkOrZecOjOX3DJorTikWXwsBfpcr/MGBkhfWcjiqvnaL/9ppxvIUXfjT6xt4DVIAsN9fMUz1ev4bIw==", + "dev": true, + "requires": { + "acorn": "^8.8.2", + "pathe": "^1.1.0", + "pkg-types": "^1.0.3", + "ufo": "^1.1.2" + } + }, "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", @@ -6312,9 +6763,9 @@ "dev": true }, "nanoid": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz", - "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==" + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==" }, "natural-compare": { "version": "1.4.0", @@ -6425,18 +6876,18 @@ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "dev": true }, - "path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "dev": true - }, "path-type": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", "dev": true }, + "pathe": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.1.tgz", + "integrity": "sha512-d+RQGp0MAYTIaDBIMmOfMwz3E+LOZnxx1HZd5R18mmCZY0QBlK0LDZfPc8FW8Ed2DlvsuE6PRjroDY+wg4+j/Q==", + "dev": true + }, "pathval": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", @@ -6477,12 +6928,23 @@ "integrity": "sha512-HUgsU5IRtM75eAQiIqzT3p1oPEuYH1/B2ipTMU++yE+FV0LkHaBswdKXs0RMWYCmugO8s62oxLTh/N1dLNp+5A==", "requires": {} }, + "pkg-types": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.0.3.tgz", + "integrity": "sha512-nN7pYi0AQqJnoLPC9eHFQ8AcyaixBUOwvqc5TDnIKCMEE6I0y8P7OKA7fPexsXGCGxQDl/cmrLAp26LhcwxZ4A==", + "dev": true, + "requires": { + "jsonc-parser": "^3.2.0", + "mlly": "^1.2.0", + "pathe": "^1.1.0" + } + }, "postcss": { - "version": "8.4.16", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.16.tgz", - "integrity": "sha512-ipHE1XBvKzm5xI7hiHCZJCSugxvsdq2mPnsq5+UF+VHCjiBvtDrlxJfMBToWaP9D5XlgNmcFGqoHmUn0EYEaRQ==", + "version": "8.4.24", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.24.tgz", + "integrity": "sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==", "requires": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" } @@ -6518,6 +6980,25 @@ "fast-diff": "^1.1.2" } }, + "pretty-format": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", + "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^17.0.1" + }, + "dependencies": { + "ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true + } + } + }, "psl": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", @@ -6530,12 +7011,24 @@ "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", "dev": true }, + "querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", + "dev": true + }, "queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", "dev": true }, + "react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true + }, "readdirp": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", @@ -6545,29 +7038,23 @@ "picomatch": "^2.2.1" } }, + "regenerator-runtime": { + "version": "0.13.11", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", + "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" + }, "regexpp": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz", "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==", "dev": true }, - "require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", "dev": true }, - "resolve": { - "version": "1.22.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", - "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", - "dev": true, - "requires": { - "is-core-module": "^2.9.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - } - }, "resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", @@ -6590,9 +7077,9 @@ } }, "rollup": { - "version": "2.77.3", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-2.77.3.tgz", - "integrity": "sha512-/qxNTG7FbmefJWoeeYJFbHehJ2HNWnjkAFRKzWN/45eNBBF/r8lo992CwcJXEzyVxs5FmfId+vTSTQDb+bxA+g==", + "version": "3.24.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.24.0.tgz", + "integrity": "sha512-OgraHOIg2YpHQTjl0/ymWfFNBEyPucB7lmhXrQUh38qNOegxLapSPFs9sNr0qKR75awW41D93XafoR2QfhBdUQ==", "dev": true, "requires": { "fsevents": "~2.3.2" @@ -6663,10 +7150,10 @@ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", "dev": true }, - "signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", "dev": true }, "slash": { @@ -6690,16 +7177,17 @@ "resolved": "https://registry.npmjs.org/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz", "integrity": "sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==" }, - "string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - } + "stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true + }, + "std-env": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.3.3.tgz", + "integrity": "sha512-Rz6yejtVyWnVjC1RFvNmYL10kgjC49EOghxWn0RFqlCHGFpQx+Xe7yW3I4ceK1SGrWIGMjD5Kbue8W/udkbMJg==", + "dev": true }, "strip-ansi": { "version": "6.0.1", @@ -6716,6 +7204,15 @@ "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "dev": true }, + "strip-literal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-1.0.1.tgz", + "integrity": "sha512-QZTsipNpa2Ppr6v1AmJHESqJ3Uz247MUS0OjrnnZjFAvEoWqxuyFuXn2xLgMtRnijJShAa1HL0gtJyUs7u7n3Q==", + "dev": true, + "requires": { + "acorn": "^8.8.2" + } + }, "supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", @@ -6725,11 +7222,10 @@ "has-flag": "^4.0.0" } }, - "supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "dev": true + "swagger-ui-dist": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/swagger-ui-dist/-/swagger-ui-dist-5.6.2.tgz", + "integrity": "sha512-2LKVuU2m6RHkemJloKiKJOTpN2RPmbsiad0OfSdtmFHOXJKAgYRZMwJcpT96RX6E9HUB5RkVOFC6vWqVjRgSOg==" }, "symbol-tree": { "version": "3.2.4", @@ -6754,16 +7250,28 @@ "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", "dev": true }, + "time-zone": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/time-zone/-/time-zone-1.0.0.tgz", + "integrity": "sha512-TIsDdtKo6+XrPtiTm1ssmMngN1sAhyKnTO2kunQWqNPWIVvCm15Wmw4SWInwTVgJ5u/Tr04+8Ei9TNcw4x4ONA==", + "dev": true + }, + "tinybench": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.5.0.tgz", + "integrity": "sha512-kRwSG8Zx4tjF9ZiyH4bhaebu+EDz1BOx9hOigYHlUW4xxI/wKIUQUqo018UlU4ar6ATPBsaMrdbKZ+tmPdohFA==", + "dev": true + }, "tinypool": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.2.4.tgz", - "integrity": "sha512-Vs3rhkUH6Qq1t5bqtb816oT+HeJTXfwt2cbPH17sWHIYKTotQIFPk3tf2fgqRrVyMDVOc1EnPgzIxfIulXVzwQ==", + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.5.0.tgz", + "integrity": "sha512-paHQtnrlS1QZYKF/GnLoOM/DN9fqaGOFbCbxzAhwniySnzl9Ebk8w73/dd34DAhe/obUbPAOldTyYXQZxnPBPQ==", "dev": true }, "tinyspy": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-1.0.0.tgz", - "integrity": "sha512-FI5B2QdODQYDRjfuLF+OrJ8bjWRMCXokQPcwKm0W3IzcbUmBNv536cQc7eXGoAuXphZwgx1DFbqImwzz08Fnhw==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-2.1.1.tgz", + "integrity": "sha512-XPJL2uSzcOyBMky6OFrusqWlzfFrXtE0hPuMgW8A2HmaqrPo4ZQHRN/V0QXN3FSjKxpsbRrFc5LI7KOwBsT1/w==", "dev": true }, "to-regex-range": { @@ -6776,14 +7284,15 @@ } }, "tough-cookie": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.0.0.tgz", - "integrity": "sha512-tHdtEpQCMrc1YLrMaqXXcj6AxhYi/xgit6mZu1+EDWUn+qhUf8wMQoFIy9NXuq23zAwtcB0t/MjACGR18pcRbg==", + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz", + "integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==", "dev": true, "requires": { "psl": "^1.1.33", "punycode": "^2.1.1", - "universalify": "^0.1.2" + "universalify": "^0.2.0", + "url-parse": "^1.5.3" } }, "tr46": { @@ -6837,10 +7346,16 @@ "integrity": "sha512-C0WQT0gezHuw6AdY1M2jxUO83Rjf0HP7Sk1DtXj6j1EwkQNZrHAg2XPWlq62oqEhYvONq5pkC2Y9oPljWToLmQ==", "devOptional": true }, + "ufo": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.1.2.tgz", + "integrity": "sha512-TrY6DsjTQQgyS3E3dBaOXf0TpPD8u9FVrVYmKVegJuFw51n/YB9XPt+U6ydzFG5ZIN7+DIjPbNmXoBj9esYhgQ==", + "dev": true + }, "universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", + "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", "dev": true }, "uri-js": { @@ -6852,6 +7367,16 @@ "punycode": "^2.1.0" } }, + "url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dev": true, + "requires": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, "util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", @@ -6865,9 +7390,9 @@ "dev": true }, "v8-to-istanbul": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.0.1.tgz", - "integrity": "sha512-74Y4LqY74kLE6IFyIjPtkSTWzUZmj8tdHT9Ii/26dvQ6K9Dl2NbEfj0XgU2sHCtKgt5VupqhlO/5aWuqS+IY1w==", + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.1.0.tgz", + "integrity": "sha512-6z3GW9x8G1gd+JIIgQQQxXuiJtCXeAjp6RaPEPLv62mH3iPHPxV6W3robxtCzNErRo6ZwTmzWhsbNvjyEBKzKA==", "dev": true, "requires": { "@jridgewell/trace-mapping": "^0.3.12", @@ -6876,33 +7401,70 @@ } }, "vite": { - "version": "3.0.8", - "resolved": "https://registry.npmjs.org/vite/-/vite-3.0.8.tgz", - "integrity": "sha512-AOZ4eN7mrkJiOLuw8IA7piS4IdOQyQCA81GxGsAQvAZzMRi9ZwGB3TOaYsj4uLAWK46T5L4AfQ6InNGlxX30IQ==", + "version": "4.3.9", + "resolved": "https://registry.npmjs.org/vite/-/vite-4.3.9.tgz", + "integrity": "sha512-qsTNZjO9NoJNW7KnOrgYwczm0WctJ8m/yqYAMAK9Lxt4SoySUfS5S8ia9K7JHpa3KEeMfyF8LoJ3c5NeBJy6pg==", "dev": true, "requires": { - "esbuild": "^0.14.47", + "esbuild": "^0.17.5", "fsevents": "~2.3.2", - "postcss": "^8.4.16", - "resolve": "^1.22.1", - "rollup": ">=2.75.6 <2.77.0 || ~2.77.0" + "postcss": "^8.4.23", + "rollup": "^3.21.0" + } + }, + "vite-node": { + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-0.32.0.tgz", + "integrity": "sha512-220P/y8YacYAU+daOAqiGEFXx2A8AwjadDzQqos6wSukjvvTWNqleJSwoUn0ckyNdjHIKoxn93Nh1vWBqEKr3Q==", + "dev": true, + "requires": { + "cac": "^6.7.14", + "debug": "^4.3.4", + "mlly": "^1.2.0", + "pathe": "^1.1.0", + "picocolors": "^1.0.0", + "vite": "^3.0.0 || ^4.0.0" } }, "vitest": { - "version": "0.22.0", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-0.22.0.tgz", - "integrity": "sha512-BSIro/QOHLaQY08FHwT6THWhqLQ+VPU+N4Rdo4pcP+16XB6oLmNNAXGcSh/MOLUhfUy+mqCwx7AyKmU7Ms5R+g==", + "version": "0.32.0", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-0.32.0.tgz", + "integrity": "sha512-SW83o629gCqnV3BqBnTxhB10DAwzwEx3z+rqYZESehUB+eWsJxwcBQx7CKy0otuGMJTYh7qCVuUX23HkftGl/Q==", "dev": true, "requires": { - "@types/chai": "^4.3.3", + "@types/chai": "^4.3.5", "@types/chai-subset": "^1.3.3", "@types/node": "*", - "chai": "^4.3.6", + "@vitest/expect": "0.32.0", + "@vitest/runner": "0.32.0", + "@vitest/snapshot": "0.32.0", + "@vitest/spy": "0.32.0", + "@vitest/utils": "0.32.0", + "acorn": "^8.8.2", + "acorn-walk": "^8.2.0", + "cac": "^6.7.14", + "chai": "^4.3.7", + "concordance": "^5.0.4", "debug": "^4.3.4", - "local-pkg": "^0.4.2", - "tinypool": "^0.2.4", - "tinyspy": "^1.0.0", - "vite": "^2.9.12 || ^3.0.0-0" + "local-pkg": "^0.4.3", + "magic-string": "^0.30.0", + "pathe": "^1.1.0", + "picocolors": "^1.0.0", + "std-env": "^3.3.2", + "strip-literal": "^1.0.1", + "tinybench": "^2.5.0", + "tinypool": "^0.5.0", + "vite": "^3.0.0 || ^4.0.0", + "vite-node": "0.32.0", + "why-is-node-running": "^2.2.2" + }, + "dependencies": { + "acorn-walk": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", + "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "dev": true + } } }, "vue": { @@ -7002,6 +7564,12 @@ "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", "dev": true }, + "well-known-symbols": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/well-known-symbols/-/well-known-symbols-2.0.0.tgz", + "integrity": "sha512-ZMjC3ho+KXo0BfJb7JgtQ5IBuvnShdlACNkKkdsqBmYw3bPAaJfPeYUo6tLUaT5tG/Gkh7xkpBhKRQ9e7pyg9Q==", + "dev": true + }, "whatwg-encoding": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz", @@ -7036,23 +7604,22 @@ "isexe": "^2.0.0" } }, - "word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", - "dev": true - }, - "wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "why-is-node-running": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.2.2.tgz", + "integrity": "sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==", "dev": true, "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" + "siginfo": "^2.0.0", + "stackback": "0.0.2" } }, + "word-wrap": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz", + "integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==", + "dev": true + }, "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", @@ -7078,39 +7645,12 @@ "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", "dev": true }, - "y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "dev": true - }, "yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", "dev": true }, - "yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "dev": true, - "requires": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - } - }, - "yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", - "dev": true - }, "yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", diff --git a/kyuubi-server/web-ui/package.json b/kyuubi-server/web-ui/package.json index 63fdc72216c..f31b836dcb8 100644 --- a/kyuubi-server/web-ui/package.json +++ b/kyuubi-server/web-ui/package.json @@ -1,7 +1,7 @@ { "name": "kyuubi-ui", "private": true, - "version": "1.8.0-SNAPSHOT", + "version": "1.9.0-SNAPSHOT", "type": "module", "scripts": { "dev": "vue-tsc --noEmit && vite --port 9090", @@ -11,15 +11,17 @@ "lint": "eslint --ext .ts,vue --ignore-path .gitignore .", "lint-fix": "eslint --fix --ext .ts,vue --ignore-path .gitignore .", "prettier": "prettier --write \"src/**/*.{vue,ts,tsx}\"", - "test": "vitest", + "test": "vitest --mode development", "coverage": "vitest run --coverage" }, "dependencies": { "@element-plus/icons-vue": "^2.0.9", "axios": "^0.27.2", + "date-fns": "^2.29.3", "element-plus": "^2.2.12", "pinia": "^2.0.18", "pinia-plugin-persistedstate": "^2.1.1", + "swagger-ui-dist": "^4.9.1", "vue": "^3.2.37", "vue-i18n": "^9.2.2", "vue-router": "^4.1.3" @@ -27,10 +29,11 @@ "devDependencies": { "@iconify-json/ep": "^1.1.6", "@types/node": "^18.7.1", + "@types/swagger-ui-dist": "^3.30.1", "@typescript-eslint/eslint-plugin": "^5.33.0", "@typescript-eslint/parser": "^5.33.0", - "@vitejs/plugin-vue": "^3.0.0", - "@vitest/coverage-c8": "^0.22.0", + "@vitejs/plugin-vue": "^4.2.3", + "@vitest/coverage-v8": "^0.32.0", "@vue/eslint-config-prettier": "^7.0.0", "@vue/eslint-config-typescript": "^11.0.0", "@vue/test-utils": "^2.0.2", @@ -41,8 +44,8 @@ "prettier": "^2.7.1", "sass": "^1.54.4", "typescript": "^4.6.4", - "vite": "^3.0.0", - "vitest": "^0.22.0", + "vite": "^4.2.3", + "vitest": "^0.32.0", "vue-tsc": "^0.38.4" } } diff --git a/kyuubi-server/web-ui/pnpm-lock.yaml b/kyuubi-server/web-ui/pnpm-lock.yaml index 61fc5124dbe..83754291beb 100644 --- a/kyuubi-server/web-ui/pnpm-lock.yaml +++ b/kyuubi-server/web-ui/pnpm-lock.yaml @@ -1,83 +1,129 @@ -lockfileVersion: 5.4 - -specifiers: - '@element-plus/icons-vue': ^2.0.9 - '@iconify-json/ep': ^1.1.6 - '@types/node': ^18.7.1 - '@typescript-eslint/eslint-plugin': ^5.33.0 - '@typescript-eslint/parser': ^5.33.0 - '@vitejs/plugin-vue': ^3.0.0 - '@vitest/coverage-c8': ^0.22.0 - '@vue/eslint-config-prettier': ^7.0.0 - '@vue/eslint-config-typescript': ^11.0.0 - '@vue/test-utils': ^2.0.2 - axios: ^0.27.2 - element-plus: ^2.2.12 - eslint: ^8.21.0 - eslint-plugin-prettier: ^4.2.1 - eslint-plugin-vue: ^9.3.0 - jsdom: ^20.0.0 - pinia: ^2.0.18 - pinia-plugin-persistedstate: ^2.1.1 - prettier: ^2.7.1 - sass: ^1.54.4 - typescript: ^4.6.4 - vite: ^3.0.0 - vitest: ^0.22.0 - vue: ^3.2.37 - vue-i18n: ^9.2.2 - vue-router: ^4.1.3 - vue-tsc: ^0.38.4 +lockfileVersion: '6.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false dependencies: - '@element-plus/icons-vue': 2.0.9_vue@3.2.37 - axios: 0.27.2 - element-plus: 2.2.13_vue@3.2.37 - pinia: 2.0.18_j6bzmzd4ujpabbp5objtwxyjp4 - pinia-plugin-persistedstate: 2.1.1_pinia@2.0.18 - vue: 3.2.37 - vue-i18n: 9.2.2_vue@3.2.37 - vue-router: 4.1.3_vue@3.2.37 + '@element-plus/icons-vue': + specifier: ^2.0.9 + version: 2.0.9(vue@3.2.37) + axios: + specifier: ^0.27.2 + version: 0.27.2 + date-fns: + specifier: ^2.29.3 + version: 2.29.3 + element-plus: + specifier: ^2.2.12 + version: 2.2.13(vue@3.2.37) + pinia: + specifier: ^2.0.18 + version: 2.0.18(typescript@4.7.4)(vue@3.2.37) + pinia-plugin-persistedstate: + specifier: ^2.1.1 + version: 2.1.1(pinia@2.0.18) + swagger-ui-dist: + specifier: ^4.9.1 + version: 4.19.1 + vue: + specifier: ^3.2.37 + version: 3.2.37 + vue-i18n: + specifier: ^9.2.2 + version: 9.2.2(vue@3.2.37) + vue-router: + specifier: ^4.1.3 + version: 4.1.3(vue@3.2.37) devDependencies: - '@iconify-json/ep': 1.1.7 - '@types/node': 18.7.6 - '@typescript-eslint/eslint-plugin': 5.33.1_vsoshirnpb7xw6mr7xomgfas2i - '@typescript-eslint/parser': 5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq - '@vitejs/plugin-vue': 3.0.3_vite@3.0.8+vue@3.2.37 - '@vitest/coverage-c8': 0.22.0_jsdom@20.0.0+sass@1.54.4 - '@vue/eslint-config-prettier': 7.0.0_2xd4q2tc5cqa5as7uugqhp6oue - '@vue/eslint-config-typescript': 11.0.0_4py5zxx5ck6utobkmozwvrmyiy - '@vue/test-utils': 2.0.2_vue@3.2.37 - eslint: 8.22.0 - eslint-plugin-prettier: 4.2.1_2xd4q2tc5cqa5as7uugqhp6oue - eslint-plugin-vue: 9.3.0_eslint@8.22.0 - jsdom: 20.0.0 - prettier: 2.7.1 - sass: 1.54.4 - typescript: 4.7.4 - vite: 3.0.8_sass@1.54.4 - vitest: 0.22.0_jsdom@20.0.0+sass@1.54.4 - vue-tsc: 0.38.9_typescript@4.7.4 + '@iconify-json/ep': + specifier: ^1.1.6 + version: 1.1.7 + '@types/node': + specifier: ^18.7.1 + version: 18.7.6 + '@types/swagger-ui-dist': + specifier: ^3.30.1 + version: 3.30.1 + '@typescript-eslint/eslint-plugin': + specifier: ^5.33.0 + version: 5.33.1(@typescript-eslint/parser@5.33.1)(eslint@8.22.0)(typescript@4.7.4) + '@typescript-eslint/parser': + specifier: ^5.33.0 + version: 5.33.1(eslint@8.22.0)(typescript@4.7.4) + '@vitejs/plugin-vue': + specifier: ^4.2.3 + version: 4.2.3(vite@4.2.3)(vue@3.2.37) + '@vitest/coverage-v8': + specifier: ^0.32.0 + version: 0.32.0(vitest@0.32.0) + '@vue/eslint-config-prettier': + specifier: ^7.0.0 + version: 7.0.0(eslint@8.22.0)(prettier@2.7.1) + '@vue/eslint-config-typescript': + specifier: ^11.0.0 + version: 11.0.0(eslint-plugin-vue@9.3.0)(eslint@8.22.0)(typescript@4.7.4) + '@vue/test-utils': + specifier: ^2.0.2 + version: 2.0.2(vue@3.2.37) + eslint: + specifier: ^8.21.0 + version: 8.22.0 + eslint-plugin-prettier: + specifier: ^4.2.1 + version: 4.2.1(eslint-config-prettier@8.5.0)(eslint@8.22.0)(prettier@2.7.1) + eslint-plugin-vue: + specifier: ^9.3.0 + version: 9.3.0(eslint@8.22.0) + jsdom: + specifier: ^20.0.0 + version: 20.0.0 + prettier: + specifier: ^2.7.1 + version: 2.7.1 + sass: + specifier: ^1.54.4 + version: 1.54.4 + typescript: + specifier: ^4.6.4 + version: 4.7.4 + vite: + specifier: ^4.2.3 + version: 4.2.3(@types/node@18.7.6)(sass@1.54.4) + vitest: + specifier: ^0.32.0 + version: 0.32.0(jsdom@20.0.0)(sass@1.54.4) + vue-tsc: + specifier: ^0.38.4 + version: 0.38.9(typescript@4.7.4) packages: - /@babel/helper-string-parser/7.18.10: + /@ampproject/remapping@2.2.1: + resolution: {integrity: sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==} + engines: {node: '>=6.0.0'} + dependencies: + '@jridgewell/gen-mapping': 0.3.3 + '@jridgewell/trace-mapping': 0.3.15 + dev: true + + /@babel/helper-string-parser@7.18.10: resolution: {integrity: sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==} engines: {node: '>=6.9.0'} - /@babel/helper-validator-identifier/7.18.6: + /@babel/helper-validator-identifier@7.18.6: resolution: {integrity: sha512-MmetCkz9ej86nJQV+sFCxoGGrUbU3q02kgLciwkrt9QqEB7cP39oKEY0PakknEO0Gu20SskMRi+AYZ3b1TpN9g==} engines: {node: '>=6.9.0'} - /@babel/parser/7.18.11: + /@babel/parser@7.18.11: resolution: {integrity: sha512-9JKn5vN+hDt0Hdqn1PiJ2guflwP+B6Ga8qbDuoF0PzzVhrzsKIJo8yGqVk6CmMHiMei9w1C1Bp9IMJSIK+HPIQ==} engines: {node: '>=6.0.0'} hasBin: true dependencies: '@babel/types': 7.18.10 - /@babel/types/7.18.10: + /@babel/types@7.18.10: resolution: {integrity: sha512-MJvnbEiiNkpjo+LknnmRrqbY1GPUUggjv+wQVjetM/AONoupqRALB7I6jGqNUAZsKcRIEu2J6FRFvsczljjsaQ==} engines: {node: '>=6.9.0'} dependencies: @@ -85,16 +131,16 @@ packages: '@babel/helper-validator-identifier': 7.18.6 to-fast-properties: 2.0.0 - /@bcoe/v8-coverage/0.2.3: + /@bcoe/v8-coverage@0.2.3: resolution: {integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==} dev: true - /@ctrl/tinycolor/3.4.1: + /@ctrl/tinycolor@3.4.1: resolution: {integrity: sha512-ej5oVy6lykXsvieQtqZxCOaLT+xD4+QNarq78cIYISHmZXshCvROLudpQN3lfL8G0NL7plMSSK+zlyvCaIJ4Iw==} engines: {node: '>=10'} dev: false - /@element-plus/icons-vue/2.0.9_vue@3.2.37: + /@element-plus/icons-vue@2.0.9(vue@3.2.37): resolution: {integrity: sha512-okdrwiVeKBmW41Hkl0eMrXDjzJwhQMuKiBOu17rOszqM+LS/yBYpNQNV5Jvoh06Wc+89fMmb/uhzf8NZuDuUaQ==} peerDependencies: vue: ^3.2.0 @@ -102,8 +148,98 @@ packages: vue: 3.2.37 dev: false - /@esbuild/linux-loong64/0.14.54: - resolution: {integrity: sha512-bZBrLAIX1kpWelV0XemxBZllyRmM6vgFQQG2GdNb+r3Fkp0FOh1NJSvekXDs7jq70k4euu1cryLMfU+mTXlEpw==} + /@esbuild/android-arm64@0.17.19: + resolution: {integrity: sha512-KBMWvEZooR7+kzY0BtbTQn0OAYY7CsiydT63pVEaPtVYF0hXbUaOyZog37DKxK7NF3XacBJOpYT4adIJh+avxA==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + requiresBuild: true + dev: true + optional: true + + /@esbuild/android-arm@0.17.19: + resolution: {integrity: sha512-rIKddzqhmav7MSmoFCmDIb6e2W57geRsM94gV2l38fzhXMwq7hZoClug9USI2pFRGL06f4IOPHHpFNOkWieR8A==} + engines: {node: '>=12'} + cpu: [arm] + os: [android] + requiresBuild: true + dev: true + optional: true + + /@esbuild/android-x64@0.17.19: + resolution: {integrity: sha512-uUTTc4xGNDT7YSArp/zbtmbhO0uEEK9/ETW29Wk1thYUJBz3IVnvgEiEwEa9IeLyvnpKrWK64Utw2bgUmDveww==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + requiresBuild: true + dev: true + optional: true + + /@esbuild/darwin-arm64@0.17.19: + resolution: {integrity: sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@esbuild/darwin-x64@0.17.19: + resolution: {integrity: sha512-IJM4JJsLhRYr9xdtLytPLSH9k/oxR3boaUIYiHkAawtwNOXKE8KoU8tMvryogdcT8AU+Bflmh81Xn6Q0vTZbQw==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@esbuild/freebsd-arm64@0.17.19: + resolution: {integrity: sha512-pBwbc7DufluUeGdjSU5Si+P3SoMF5DQ/F/UmTSb8HXO80ZEAJmrykPyzo1IfNbAoaqw48YRpv8shwd1NoI0jcQ==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/freebsd-x64@0.17.19: + resolution: {integrity: sha512-4lu+n8Wk0XlajEhbEffdy2xy53dpR06SlzvhGByyg36qJw6Kpfk7cp45DR/62aPH9mtJRmIyrXAS5UWBrJT6TQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-arm64@0.17.19: + resolution: {integrity: sha512-ct1Tg3WGwd3P+oZYqic+YZF4snNl2bsnMKRkb3ozHmnM0dGWuxcPTTntAF6bOP0Sp4x0PjSF+4uHQ1xvxfRKqg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-arm@0.17.19: + resolution: {integrity: sha512-cdmT3KxjlOQ/gZ2cjfrQOtmhG4HJs6hhvm3mWSRDPtZ/lP5oe8FWceS10JaSJC13GBd4eH/haHnqf7hhGNLerA==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-ia32@0.17.19: + resolution: {integrity: sha512-w4IRhSy1VbsNxHRQpeGCHEmibqdTUx61Vc38APcsRbuVgK0OPEnQ0YD39Brymn96mOx48Y2laBQGqgZ0j9w6SQ==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-loong64@0.17.19: + resolution: {integrity: sha512-2iAngUbBPMq439a+z//gE+9WBldoMp1s5GWsUSgqHLzLJ9WoZLZhpwWuym0u0u/4XmZ3gpHmzV84PonE+9IIdQ==} engines: {node: '>=12'} cpu: [loong64] os: [linux] @@ -111,7 +247,106 @@ packages: dev: true optional: true - /@eslint/eslintrc/1.3.0: + /@esbuild/linux-mips64el@0.17.19: + resolution: {integrity: sha512-LKJltc4LVdMKHsrFe4MGNPp0hqDFA1Wpt3jE1gEyM3nKUvOiO//9PheZZHfYRfYl6AwdTH4aTcXSqBerX0ml4A==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-ppc64@0.17.19: + resolution: {integrity: sha512-/c/DGybs95WXNS8y3Ti/ytqETiW7EU44MEKuCAcpPto3YjQbyK3IQVKfF6nbghD7EcLUGl0NbiL5Rt5DMhn5tg==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-riscv64@0.17.19: + resolution: {integrity: sha512-FC3nUAWhvFoutlhAkgHf8f5HwFWUL6bYdvLc/TTuxKlvLi3+pPzdZiFKSWz/PF30TB1K19SuCxDTI5KcqASJqA==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-s390x@0.17.19: + resolution: {integrity: sha512-IbFsFbxMWLuKEbH+7sTkKzL6NJmG2vRyy6K7JJo55w+8xDk7RElYn6xvXtDW8HCfoKBFK69f3pgBJSUSQPr+4Q==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-x64@0.17.19: + resolution: {integrity: sha512-68ngA9lg2H6zkZcyp22tsVt38mlhWde8l3eJLWkyLrp4HwMUr3c1s/M2t7+kHIhvMjglIBrFpncX1SzMckomGw==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/netbsd-x64@0.17.19: + resolution: {integrity: sha512-CwFq42rXCR8TYIjIfpXCbRX0rp1jo6cPIUPSaWwzbVI4aOfX96OXY8M6KNmtPcg7QjYeDmN+DD0Wp3LaBOLf4Q==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/openbsd-x64@0.17.19: + resolution: {integrity: sha512-cnq5brJYrSZ2CF6c35eCmviIN3k3RczmHz8eYaVlNasVqsNY+JKohZU5MKmaOI+KkllCdzOKKdPs762VCPC20g==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/sunos-x64@0.17.19: + resolution: {integrity: sha512-vCRT7yP3zX+bKWFeP/zdS6SqdWB8OIpaRq/mbXQxTGHnIxspRtigpkUcDMlSCOejlHowLqII7K2JKevwyRP2rg==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + requiresBuild: true + dev: true + optional: true + + /@esbuild/win32-arm64@0.17.19: + resolution: {integrity: sha512-yYx+8jwowUstVdorcMdNlzklLYhPxjniHWFKgRqH7IFlUEa0Umu3KuYplf1HUZZ422e3NU9F4LGb+4O0Kdcaag==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@esbuild/win32-ia32@0.17.19: + resolution: {integrity: sha512-eggDKanJszUtCdlVs0RB+h35wNlb5v4TWEkq4vZcmVt5u/HiDZrTXe2bWFQUez3RgNHwx/x4sk5++4NSSicKkw==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@esbuild/win32-x64@0.17.19: + resolution: {integrity: sha512-lAhycmKnVOuRYNtRtatQR1LPQf2oYCkRGkSFnseDAKPl8lu5SOsK/e1sXe5a0Pc5kHIHe6P2I/ilntNv2xf3cA==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@eslint/eslintrc@1.3.0: resolution: {integrity: sha512-UWW0TMTmk2d7hLcWD1/e2g5HDM/HQ3csaLSqXCfqwh4uNDuNqlaKWXmEsL4Cs41Z0KnILNvwbHAah3C2yt06kw==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dependencies: @@ -128,17 +363,17 @@ packages: - supports-color dev: true - /@floating-ui/core/0.7.3: + /@floating-ui/core@0.7.3: resolution: {integrity: sha512-buc8BXHmG9l82+OQXOFU3Kr2XQx9ys01U/Q9HMIrZ300iLc8HLMgh7dcCqgYzAzf4BkoQvDcXf5Y+CuEZ5JBYg==} dev: false - /@floating-ui/dom/0.5.4: + /@floating-ui/dom@0.5.4: resolution: {integrity: sha512-419BMceRLq0RrmTSDxn8hf9R3VCJv2K9PUfugh5JyEFmdjzDo+e8U5EdR8nzKq8Yj1htzLm3b6eQEEam3/rrtg==} dependencies: '@floating-ui/core': 0.7.3 dev: false - /@humanwhocodes/config-array/0.10.4: + /@humanwhocodes/config-array@0.10.4: resolution: {integrity: sha512-mXAIHxZT3Vcpg83opl1wGlVZ9xydbfZO3r5YfRSH6Gpp2J/PfdBP0wbDa2sO6/qRbcalpoevVyW6A/fI6LfeMw==} engines: {node: '>=10.10.0'} dependencies: @@ -149,25 +384,25 @@ packages: - supports-color dev: true - /@humanwhocodes/gitignore-to-minimatch/1.0.2: + /@humanwhocodes/gitignore-to-minimatch@1.0.2: resolution: {integrity: sha512-rSqmMJDdLFUsyxR6FMtD00nfQKKLFb1kv+qBbOVKqErvloEIJLo5bDTJTQNTYgeyp78JsA7u/NPi5jT1GR/MuA==} dev: true - /@humanwhocodes/object-schema/1.2.1: + /@humanwhocodes/object-schema@1.2.1: resolution: {integrity: sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==} dev: true - /@iconify-json/ep/1.1.7: + /@iconify-json/ep@1.1.7: resolution: {integrity: sha512-GhXWVKalXFlrGgfrCXAgqBre5hv3pPAknuxyywmjamcrL5gl5Mq9WOZtuhb4cB6cJ5pMiKOMtegt73FheqWscA==} dependencies: '@iconify/types': 1.1.0 dev: true - /@iconify/types/1.1.0: + /@iconify/types@1.1.0: resolution: {integrity: sha512-Jh0llaK2LRXQoYsorIH8maClebsnzTcve+7U3rQUSnC11X4jtPnFuyatqFLvMxZ8MLG8dB4zfHsbPfuvxluONw==} dev: true - /@intlify/core-base/9.2.2: + /@intlify/core-base@9.2.2: resolution: {integrity: sha512-JjUpQtNfn+joMbrXvpR4hTF8iJQ2sEFzzK3KIESOx+f+uwIjgw20igOyaIdhfsVVBCds8ZM64MoeNSx+PHQMkA==} engines: {node: '>= 14'} dependencies: @@ -177,14 +412,14 @@ packages: '@intlify/vue-devtools': 9.2.2 dev: false - /@intlify/devtools-if/9.2.2: + /@intlify/devtools-if@9.2.2: resolution: {integrity: sha512-4ttr/FNO29w+kBbU7HZ/U0Lzuh2cRDhP8UlWOtV9ERcjHzuyXVZmjyleESK6eVP60tGC9QtQW9yZE+JeRhDHkg==} engines: {node: '>= 14'} dependencies: '@intlify/shared': 9.2.2 dev: false - /@intlify/message-compiler/9.2.2: + /@intlify/message-compiler@9.2.2: resolution: {integrity: sha512-IUrQW7byAKN2fMBe8z6sK6riG1pue95e5jfokn8hA5Q3Bqy4MBJ5lJAofUsawQJYHeoPJ7svMDyBaVJ4d0GTtA==} engines: {node: '>= 14'} dependencies: @@ -192,12 +427,12 @@ packages: source-map: 0.6.1 dev: false - /@intlify/shared/9.2.2: + /@intlify/shared@9.2.2: resolution: {integrity: sha512-wRwTpsslgZS5HNyM7uDQYZtxnbI12aGiBZURX3BTR9RFIKKRWpllTsgzHWvj3HKm3Y2Sh5LPC1r0PDCKEhVn9Q==} engines: {node: '>= 14'} dev: false - /@intlify/vue-devtools/9.2.2: + /@intlify/vue-devtools@9.2.2: resolution: {integrity: sha512-+dUyqyCHWHb/UcvY1MlIpO87munedm3Gn6E9WWYdWrMuYLcoIoOEVDWSS8xSwtlPU+kA+MEQTP6Q1iI/ocusJg==} engines: {node: '>= 14'} dependencies: @@ -205,28 +440,42 @@ packages: '@intlify/shared': 9.2.2 dev: false - /@istanbuljs/schema/0.1.3: + /@istanbuljs/schema@0.1.3: resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==} engines: {node: '>=8'} dev: true - /@jridgewell/resolve-uri/3.1.0: + /@jridgewell/gen-mapping@0.3.3: + resolution: {integrity: sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==} + engines: {node: '>=6.0.0'} + dependencies: + '@jridgewell/set-array': 1.1.2 + '@jridgewell/sourcemap-codec': 1.4.14 + '@jridgewell/trace-mapping': 0.3.15 + dev: true + + /@jridgewell/resolve-uri@3.1.0: resolution: {integrity: sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==} engines: {node: '>=6.0.0'} dev: true - /@jridgewell/sourcemap-codec/1.4.14: + /@jridgewell/set-array@1.1.2: + resolution: {integrity: sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==} + engines: {node: '>=6.0.0'} + dev: true + + /@jridgewell/sourcemap-codec@1.4.14: resolution: {integrity: sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==} dev: true - /@jridgewell/trace-mapping/0.3.15: + /@jridgewell/trace-mapping@0.3.15: resolution: {integrity: sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==} dependencies: '@jridgewell/resolve-uri': 3.1.0 '@jridgewell/sourcemap-codec': 1.4.14 dev: true - /@nodelib/fs.scandir/2.1.5: + /@nodelib/fs.scandir@2.1.5: resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} engines: {node: '>= 8'} dependencies: @@ -234,12 +483,12 @@ packages: run-parallel: 1.2.0 dev: true - /@nodelib/fs.stat/2.0.5: + /@nodelib/fs.stat@2.0.5: resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} engines: {node: '>= 8'} dev: true - /@nodelib/fs.walk/1.2.8: + /@nodelib/fs.walk@1.2.8: resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} engines: {node: '>= 8'} dependencies: @@ -247,52 +496,56 @@ packages: fastq: 1.13.0 dev: true - /@sxzz/popperjs-es/2.11.7: + /@sxzz/popperjs-es@2.11.7: resolution: {integrity: sha512-Ccy0NlLkzr0Ex2FKvh2X+OyERHXJ88XJ1MXtsI9y9fGexlaXaVTPzBCRBwIxFkORuOb+uBqeu+RqnpgYTEZRUQ==} dev: false - /@tootallnate/once/2.0.0: + /@tootallnate/once@2.0.0: resolution: {integrity: sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==} engines: {node: '>= 10'} dev: true - /@types/chai-subset/1.3.3: + /@types/chai-subset@1.3.3: resolution: {integrity: sha512-frBecisrNGz+F4T6bcc+NLeolfiojh5FxW2klu669+8BARtyQv2C/GkNW6FUodVe4BroGMP/wER/YDGc7rEllw==} dependencies: - '@types/chai': 4.3.3 + '@types/chai': 4.3.5 dev: true - /@types/chai/4.3.3: - resolution: {integrity: sha512-hC7OMnszpxhZPduX+m+nrx+uFoLkWOMiR4oa/AZF3MuSETYTZmFfJAHqZEM8MVlvfG7BEUcgvtwoCTxBp6hm3g==} + /@types/chai@4.3.5: + resolution: {integrity: sha512-mEo1sAde+UCE6b2hxn332f1g1E8WfYRu6p5SvTKr2ZKC1f7gFJXk4h5PyGP9Dt6gCaG8y8XhwnXWC6Iy2cmBng==} dev: true - /@types/istanbul-lib-coverage/2.0.4: + /@types/istanbul-lib-coverage@2.0.4: resolution: {integrity: sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==} dev: true - /@types/json-schema/7.0.11: + /@types/json-schema@7.0.11: resolution: {integrity: sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==} dev: true - /@types/lodash-es/4.17.6: + /@types/lodash-es@4.17.6: resolution: {integrity: sha512-R+zTeVUKDdfoRxpAryaQNRKk3105Rrgx2CFRClIgRGaqDTdjsm8h6IYA8ir584W3ePzkZfst5xIgDwYrlh9HLg==} dependencies: '@types/lodash': 4.14.183 dev: false - /@types/lodash/4.14.183: + /@types/lodash@4.14.183: resolution: {integrity: sha512-UXavyuxzXKMqJPEpFPri6Ku5F9af6ZJXUneHhvQJxavrEjuHkFp2YnDWHcxJiG7hk8ZkWqjcyNeW1s/smZv5cw==} dev: false - /@types/node/18.7.6: + /@types/node@18.7.6: resolution: {integrity: sha512-EdxgKRXgYsNITy5mjjXjVE/CS8YENSdhiagGrLqjG0pvA2owgJ6i4l7wy/PFZGC0B1/H20lWKN7ONVDNYDZm7A==} dev: true - /@types/web-bluetooth/0.0.14: + /@types/swagger-ui-dist@3.30.1: + resolution: {integrity: sha512-wWojDensMF33dSrhak4iWSPOsWBbvf+rSJ6VWQ7ohQbGdKAiT2IwUexrDZkMvf3+vVAPVnNFDRDtxADFszbh+Q==} + dev: true + + /@types/web-bluetooth@0.0.14: resolution: {integrity: sha512-5d2RhCard1nQUC3aHcq/gHzWYO6K0WJmAbjO7mQJgCQKtZpgXxv1rOM6O/dBDhDYYVutk1sciOgNSe+5YyfM8A==} dev: false - /@typescript-eslint/eslint-plugin/5.33.1_vsoshirnpb7xw6mr7xomgfas2i: + /@typescript-eslint/eslint-plugin@5.33.1(@typescript-eslint/parser@5.33.1)(eslint@8.22.0)(typescript@4.7.4): resolution: {integrity: sha512-S1iZIxrTvKkU3+m63YUOxYPKaP+yWDQrdhxTglVDVEVBf+aCSw85+BmJnyUaQQsk5TXFG/LpBu9fa+LrAQ91fQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -303,23 +556,23 @@ packages: typescript: optional: true dependencies: - '@typescript-eslint/parser': 5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq + '@typescript-eslint/parser': 5.33.1(eslint@8.22.0)(typescript@4.7.4) '@typescript-eslint/scope-manager': 5.33.1 - '@typescript-eslint/type-utils': 5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq - '@typescript-eslint/utils': 5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq + '@typescript-eslint/type-utils': 5.33.1(eslint@8.22.0)(typescript@4.7.4) + '@typescript-eslint/utils': 5.33.1(eslint@8.22.0)(typescript@4.7.4) debug: 4.3.4 eslint: 8.22.0 functional-red-black-tree: 1.0.1 ignore: 5.2.0 regexpp: 3.2.0 semver: 7.3.7 - tsutils: 3.21.0_typescript@4.7.4 + tsutils: 3.21.0(typescript@4.7.4) typescript: 4.7.4 transitivePeerDependencies: - supports-color dev: true - /@typescript-eslint/parser/5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq: + /@typescript-eslint/parser@5.33.1(eslint@8.22.0)(typescript@4.7.4): resolution: {integrity: sha512-IgLLtW7FOzoDlmaMoXdxG8HOCByTBXrB1V2ZQYSEV1ggMmJfAkMWTwUjjzagS6OkfpySyhKFkBw7A9jYmcHpZA==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -331,7 +584,7 @@ packages: dependencies: '@typescript-eslint/scope-manager': 5.33.1 '@typescript-eslint/types': 5.33.1 - '@typescript-eslint/typescript-estree': 5.33.1_typescript@4.7.4 + '@typescript-eslint/typescript-estree': 5.33.1(typescript@4.7.4) debug: 4.3.4 eslint: 8.22.0 typescript: 4.7.4 @@ -339,7 +592,7 @@ packages: - supports-color dev: true - /@typescript-eslint/scope-manager/5.33.1: + /@typescript-eslint/scope-manager@5.33.1: resolution: {integrity: sha512-8ibcZSqy4c5m69QpzJn8XQq9NnqAToC8OdH/W6IXPXv83vRyEDPYLdjAlUx8h/rbusq6MkW4YdQzURGOqsn3CA==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dependencies: @@ -347,7 +600,7 @@ packages: '@typescript-eslint/visitor-keys': 5.33.1 dev: true - /@typescript-eslint/type-utils/5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq: + /@typescript-eslint/type-utils@5.33.1(eslint@8.22.0)(typescript@4.7.4): resolution: {integrity: sha512-X3pGsJsD8OiqhNa5fim41YtlnyiWMF/eKsEZGsHID2HcDqeSC5yr/uLOeph8rNF2/utwuI0IQoAK3fpoxcLl2g==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -357,21 +610,21 @@ packages: typescript: optional: true dependencies: - '@typescript-eslint/utils': 5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq + '@typescript-eslint/utils': 5.33.1(eslint@8.22.0)(typescript@4.7.4) debug: 4.3.4 eslint: 8.22.0 - tsutils: 3.21.0_typescript@4.7.4 + tsutils: 3.21.0(typescript@4.7.4) typescript: 4.7.4 transitivePeerDependencies: - supports-color dev: true - /@typescript-eslint/types/5.33.1: + /@typescript-eslint/types@5.33.1: resolution: {integrity: sha512-7K6MoQPQh6WVEkMrMW5QOA5FO+BOwzHSNd0j3+BlBwd6vtzfZceJ8xJ7Um2XDi/O3umS8/qDX6jdy2i7CijkwQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dev: true - /@typescript-eslint/typescript-estree/5.33.1_typescript@4.7.4: + /@typescript-eslint/typescript-estree@5.33.1(typescript@4.7.4): resolution: {integrity: sha512-JOAzJ4pJ+tHzA2pgsWQi4804XisPHOtbvwUyqsuuq8+y5B5GMZs7lI1xDWs6V2d7gE/Ez5bTGojSK12+IIPtXA==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -386,13 +639,13 @@ packages: globby: 11.1.0 is-glob: 4.0.3 semver: 7.3.7 - tsutils: 3.21.0_typescript@4.7.4 + tsutils: 3.21.0(typescript@4.7.4) typescript: 4.7.4 transitivePeerDependencies: - supports-color dev: true - /@typescript-eslint/utils/5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq: + /@typescript-eslint/utils@5.33.1(eslint@8.22.0)(typescript@4.7.4): resolution: {integrity: sha512-uphZjkMaZ4fE8CR4dU7BquOV6u0doeQAr8n6cQenl/poMaIyJtBu8eys5uk6u5HiDH01Mj5lzbJ5SfeDz7oqMQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -401,16 +654,16 @@ packages: '@types/json-schema': 7.0.11 '@typescript-eslint/scope-manager': 5.33.1 '@typescript-eslint/types': 5.33.1 - '@typescript-eslint/typescript-estree': 5.33.1_typescript@4.7.4 + '@typescript-eslint/typescript-estree': 5.33.1(typescript@4.7.4) eslint: 8.22.0 eslint-scope: 5.1.1 - eslint-utils: 3.0.0_eslint@8.22.0 + eslint-utils: 3.0.0(eslint@8.22.0) transitivePeerDependencies: - supports-color - typescript dev: true - /@typescript-eslint/visitor-keys/5.33.1: + /@typescript-eslint/visitor-keys@5.33.1: resolution: {integrity: sha512-nwIxOK8Z2MPWltLKMLOEZwmfBZReqUdbEoHQXeCpa+sRVARe5twpJGHCB4dk9903Yaf0nMAlGbQfaAH92F60eg==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dependencies: @@ -418,46 +671,88 @@ packages: eslint-visitor-keys: 3.3.0 dev: true - /@vitejs/plugin-vue/3.0.3_vite@3.0.8+vue@3.2.37: - resolution: {integrity: sha512-U4zNBlz9mg+TA+i+5QPc3N5lQvdUXENZLO2h0Wdzp56gI1MWhqJOv+6R+d4kOzoaSSq6TnGPBdZAXKOe4lXy6g==} + /@vitejs/plugin-vue@4.2.3(vite@4.2.3)(vue@3.2.37): + resolution: {integrity: sha512-R6JDUfiZbJA9cMiguQ7jxALsgiprjBeHL5ikpXfJCH62pPHtI+JdJ5xWj6Ev73yXSlYl86+blXn1kZHQ7uElxw==} engines: {node: ^14.18.0 || >=16.0.0} peerDependencies: - vite: ^3.0.0 + vite: ^4.0.0 vue: ^3.2.25 dependencies: - vite: 3.0.8_sass@1.54.4 + vite: 4.2.3(@types/node@18.7.6)(sass@1.54.4) vue: 3.2.37 dev: true - /@vitest/coverage-c8/0.22.0_jsdom@20.0.0+sass@1.54.4: - resolution: {integrity: sha512-jwW6b8U+h9nbzQfKoRmpf2xjDg+mcAjLIdVUrZGhjTnIdekGfvoqFoeiXzsLv2HwYBeFi4943lYUftuj8qD1FQ==} + /@vitest/coverage-v8@0.32.0(vitest@0.32.0): + resolution: {integrity: sha512-VXXlWq9X/NbsoP/l/CHLBjutsFFww1UY1qEhzGjn/DY7Tqe+z0Nu8XKc8im/XUAmjiWsh2XV7sy/F0IKAl4eaw==} + peerDependencies: + vitest: '>=0.32.0 <1' dependencies: - c8: 7.12.0 - vitest: 0.22.0_jsdom@20.0.0+sass@1.54.4 + '@ampproject/remapping': 2.2.1 + '@bcoe/v8-coverage': 0.2.3 + istanbul-lib-coverage: 3.2.0 + istanbul-lib-report: 3.0.0 + istanbul-lib-source-maps: 4.0.1 + istanbul-reports: 3.1.5 + magic-string: 0.30.0 + picocolors: 1.0.0 + std-env: 3.3.3 + test-exclude: 6.0.0 + v8-to-istanbul: 9.1.0 + vitest: 0.32.0(jsdom@20.0.0)(sass@1.54.4) transitivePeerDependencies: - - '@edge-runtime/vm' - - '@vitest/browser' - - '@vitest/ui' - - happy-dom - - jsdom - - less - - sass - - stylus - supports-color - - terser dev: true - /@volar/code-gen/0.38.9: + /@vitest/expect@0.32.0: + resolution: {integrity: sha512-VxVHhIxKw9Lux+O9bwLEEk2gzOUe93xuFHy9SzYWnnoYZFYg1NfBtnfnYWiJN7yooJ7KNElCK5YtA7DTZvtXtg==} + dependencies: + '@vitest/spy': 0.32.0 + '@vitest/utils': 0.32.0 + chai: 4.3.7 + dev: true + + /@vitest/runner@0.32.0: + resolution: {integrity: sha512-QpCmRxftHkr72xt5A08xTEs9I4iWEXIOCHWhQQguWOKE4QH7DXSKZSOFibuwEIMAD7G0ERvtUyQn7iPWIqSwmw==} + dependencies: + '@vitest/utils': 0.32.0 + concordance: 5.0.4 + p-limit: 4.0.0 + pathe: 1.1.1 + dev: true + + /@vitest/snapshot@0.32.0: + resolution: {integrity: sha512-yCKorPWjEnzpUxQpGlxulujTcSPgkblwGzAUEL+z01FTUg/YuCDZ8dxr9sHA08oO2EwxzHXNLjQKWJ2zc2a19Q==} + dependencies: + magic-string: 0.30.0 + pathe: 1.1.1 + pretty-format: 27.5.1 + dev: true + + /@vitest/spy@0.32.0: + resolution: {integrity: sha512-MruAPlM0uyiq3d53BkwTeShXY0rYEfhNGQzVO5GHBmmX3clsxcWp79mMnkOVcV244sNTeDcHbcPFWIjOI4tZvw==} + dependencies: + tinyspy: 2.1.1 + dev: true + + /@vitest/utils@0.32.0: + resolution: {integrity: sha512-53yXunzx47MmbuvcOPpLaVljHaeSu1G2dHdmy7+9ngMnQIkBQcvwOcoclWFnxDMxFbnq8exAfh3aKSZaK71J5A==} + dependencies: + concordance: 5.0.4 + loupe: 2.3.6 + pretty-format: 27.5.1 + dev: true + + /@volar/code-gen@0.38.9: resolution: {integrity: sha512-n6LClucfA+37rQeskvh9vDoZV1VvCVNy++MAPKj2dT4FT+Fbmty/SDQqnsEBtdEe6E3OQctFvA/IcKsx3Mns0A==} dependencies: '@volar/source-map': 0.38.9 dev: true - /@volar/source-map/0.38.9: + /@volar/source-map@0.38.9: resolution: {integrity: sha512-ba0UFoHDYry+vwKdgkWJ6xlQT+8TFtZg1zj9tSjj4PykW1JZDuM0xplMotLun4h3YOoYfY9K1huY5gvxmrNLIw==} dev: true - /@volar/vue-code-gen/0.38.9: + /@volar/vue-code-gen@0.38.9: resolution: {integrity: sha512-tzj7AoarFBKl7e41MR006ncrEmNPHALuk8aG4WdDIaG387X5//5KhWC5Ff3ZfB2InGSeNT+CVUd74M0gS20rjA==} dependencies: '@volar/code-gen': 0.38.9 @@ -467,7 +762,7 @@ packages: '@vue/shared': 3.2.37 dev: true - /@volar/vue-typescript/0.38.9: + /@volar/vue-typescript@0.38.9: resolution: {integrity: sha512-iJMQGU91ADi98u8V1vXd2UBmELDAaeSP0ZJaFjwosClQdKlJQYc6MlxxKfXBZisHqfbhdtrGRyaryulnYtliZw==} dependencies: '@volar/code-gen': 0.38.9 @@ -477,7 +772,7 @@ packages: '@vue/reactivity': 3.2.37 dev: true - /@vue/compiler-core/3.2.37: + /@vue/compiler-core@3.2.37: resolution: {integrity: sha512-81KhEjo7YAOh0vQJoSmAD68wLfYqJvoiD4ulyedzF+OEk/bk6/hx3fTNVfuzugIIaTrOx4PGx6pAiBRe5e9Zmg==} dependencies: '@babel/parser': 7.18.11 @@ -485,13 +780,13 @@ packages: estree-walker: 2.0.2 source-map: 0.6.1 - /@vue/compiler-dom/3.2.37: + /@vue/compiler-dom@3.2.37: resolution: {integrity: sha512-yxJLH167fucHKxaqXpYk7x8z7mMEnXOw3G2q62FTkmsvNxu4FQSu5+3UMb+L7fjKa26DEzhrmCxAgFLLIzVfqQ==} dependencies: '@vue/compiler-core': 3.2.37 '@vue/shared': 3.2.37 - /@vue/compiler-sfc/3.2.37: + /@vue/compiler-sfc@3.2.37: resolution: {integrity: sha512-+7i/2+9LYlpqDv+KTtWhOZH+pa8/HnX/905MdVmAcI/mPQOBwkHHIzrsEsucyOIZQYMkXUiTkmZq5am/NyXKkg==} dependencies: '@babel/parser': 7.18.11 @@ -505,29 +800,29 @@ packages: postcss: 8.4.16 source-map: 0.6.1 - /@vue/compiler-ssr/3.2.37: + /@vue/compiler-ssr@3.2.37: resolution: {integrity: sha512-7mQJD7HdXxQjktmsWp/J67lThEIcxLemz1Vb5I6rYJHR5vI+lON3nPGOH3ubmbvYGt8xEUaAr1j7/tIFWiEOqw==} dependencies: '@vue/compiler-dom': 3.2.37 '@vue/shared': 3.2.37 - /@vue/devtools-api/6.2.1: + /@vue/devtools-api@6.2.1: resolution: {integrity: sha512-OEgAMeQXvCoJ+1x8WyQuVZzFo0wcyCmUR3baRVLmKBo1LmYZWMlRiXlux5jd0fqVJu6PfDbOrZItVqUEzLobeQ==} dev: false - /@vue/eslint-config-prettier/7.0.0_2xd4q2tc5cqa5as7uugqhp6oue: + /@vue/eslint-config-prettier@7.0.0(eslint@8.22.0)(prettier@2.7.1): resolution: {integrity: sha512-/CTc6ML3Wta1tCe1gUeO0EYnVXfo3nJXsIhZ8WJr3sov+cGASr6yuiibJTL6lmIBm7GobopToOuB3B6AWyV0Iw==} peerDependencies: eslint: '>= 7.28.0' prettier: '>= 2.0.0' dependencies: eslint: 8.22.0 - eslint-config-prettier: 8.5.0_eslint@8.22.0 - eslint-plugin-prettier: 4.2.1_i2cojdczqdiurzgttlwdgf764e + eslint-config-prettier: 8.5.0(eslint@8.22.0) + eslint-plugin-prettier: 4.2.1(eslint-config-prettier@8.5.0)(eslint@8.22.0)(prettier@2.7.1) prettier: 2.7.1 dev: true - /@vue/eslint-config-typescript/11.0.0_4py5zxx5ck6utobkmozwvrmyiy: + /@vue/eslint-config-typescript@11.0.0(eslint-plugin-vue@9.3.0)(eslint@8.22.0)(typescript@4.7.4): resolution: {integrity: sha512-txuRzxnQVmtUvvy9UyWUy9sHWXNeRPGmSPqP53hRtaiUeCTAondI9Ho9GQYI/8/eWljYOST7iA4Aa8sANBkWaA==} engines: {node: ^14.17.0 || >=16.0.0} peerDependencies: @@ -538,17 +833,17 @@ packages: typescript: optional: true dependencies: - '@typescript-eslint/eslint-plugin': 5.33.1_vsoshirnpb7xw6mr7xomgfas2i - '@typescript-eslint/parser': 5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq + '@typescript-eslint/eslint-plugin': 5.33.1(@typescript-eslint/parser@5.33.1)(eslint@8.22.0)(typescript@4.7.4) + '@typescript-eslint/parser': 5.33.1(eslint@8.22.0)(typescript@4.7.4) eslint: 8.22.0 - eslint-plugin-vue: 9.3.0_eslint@8.22.0 + eslint-plugin-vue: 9.3.0(eslint@8.22.0) typescript: 4.7.4 - vue-eslint-parser: 9.0.3_eslint@8.22.0 + vue-eslint-parser: 9.0.3(eslint@8.22.0) transitivePeerDependencies: - supports-color dev: true - /@vue/reactivity-transform/3.2.37: + /@vue/reactivity-transform@3.2.37: resolution: {integrity: sha512-IWopkKEb+8qpu/1eMKVeXrK0NLw9HicGviJzhJDEyfxTR9e1WtpnnbYkJWurX6WwoFP0sz10xQg8yL8lgskAZg==} dependencies: '@babel/parser': 7.18.11 @@ -557,25 +852,25 @@ packages: estree-walker: 2.0.2 magic-string: 0.25.9 - /@vue/reactivity/3.2.37: + /@vue/reactivity@3.2.37: resolution: {integrity: sha512-/7WRafBOshOc6m3F7plwzPeCu/RCVv9uMpOwa/5PiY1Zz+WLVRWiy0MYKwmg19KBdGtFWsmZ4cD+LOdVPcs52A==} dependencies: '@vue/shared': 3.2.37 - /@vue/runtime-core/3.2.37: + /@vue/runtime-core@3.2.37: resolution: {integrity: sha512-JPcd9kFyEdXLl/i0ClS7lwgcs0QpUAWj+SKX2ZC3ANKi1U4DOtiEr6cRqFXsPwY5u1L9fAjkinIdB8Rz3FoYNQ==} dependencies: '@vue/reactivity': 3.2.37 '@vue/shared': 3.2.37 - /@vue/runtime-dom/3.2.37: + /@vue/runtime-dom@3.2.37: resolution: {integrity: sha512-HimKdh9BepShW6YozwRKAYjYQWg9mQn63RGEiSswMbW+ssIht1MILYlVGkAGGQbkhSh31PCdoUcfiu4apXJoPw==} dependencies: '@vue/runtime-core': 3.2.37 '@vue/shared': 3.2.37 csstype: 2.6.20 - /@vue/server-renderer/3.2.37_vue@3.2.37: + /@vue/server-renderer@3.2.37(vue@3.2.37): resolution: {integrity: sha512-kLITEJvaYgZQ2h47hIzPh2K3jG8c1zCVbp/o/bzQOyvzaKiCquKS7AaioPI28GNxIsE/zSx+EwWYsNxDCX95MA==} peerDependencies: vue: 3.2.37 @@ -584,10 +879,10 @@ packages: '@vue/shared': 3.2.37 vue: 3.2.37 - /@vue/shared/3.2.37: + /@vue/shared@3.2.37: resolution: {integrity: sha512-4rSJemR2NQIo9Klm1vabqWjD8rs/ZaJSzMxkMNeJS6lHiUjjUeYFbooN19NgFjztubEKh3WlZUeOLVdbbUWHsw==} - /@vue/test-utils/2.0.2_vue@3.2.37: + /@vue/test-utils@2.0.2(vue@3.2.37): resolution: {integrity: sha512-E2P4oXSaWDqTZNbmKZFVLrNN/siVN78YkEqs7pHryWerrlZR9bBFLWdJwRoguX45Ru6HxIflzKl4vQvwRMwm5g==} peerDependencies: vue: ^3.0.1 @@ -595,7 +890,7 @@ packages: vue: 3.2.37 dev: true - /@vueuse/core/8.9.4_vue@3.2.37: + /@vueuse/core@8.9.4(vue@3.2.37): resolution: {integrity: sha512-B/Mdj9TK1peFyWaPof+Zf/mP9XuGAngaJZBwPaXBvU3aCTZlx3ltlrFFFyMV4iGBwsjSCeUCgZrtkEj9dS2Y3Q==} peerDependencies: '@vue/composition-api': ^1.1.0 @@ -608,16 +903,16 @@ packages: dependencies: '@types/web-bluetooth': 0.0.14 '@vueuse/metadata': 8.9.4 - '@vueuse/shared': 8.9.4_vue@3.2.37 + '@vueuse/shared': 8.9.4(vue@3.2.37) vue: 3.2.37 - vue-demi: 0.13.8_vue@3.2.37 + vue-demi: 0.13.8(vue@3.2.37) dev: false - /@vueuse/metadata/8.9.4: + /@vueuse/metadata@8.9.4: resolution: {integrity: sha512-IwSfzH80bnJMzqhaapqJl9JRIiyQU0zsRGEgnxN6jhq7992cPUJIRfV+JHRIZXjYqbwt07E1gTEp0R0zPJ1aqw==} dev: false - /@vueuse/shared/8.9.4_vue@3.2.37: + /@vueuse/shared@8.9.4(vue@3.2.37): resolution: {integrity: sha512-wt+T30c4K6dGRMVqPddexEVLa28YwxW5OFIPmzUHICjphfAuBFTTdDoyqREZNDOFJZ44ARH1WWQNCUK8koJ+Ag==} peerDependencies: '@vue/composition-api': ^1.1.0 @@ -629,21 +924,21 @@ packages: optional: true dependencies: vue: 3.2.37 - vue-demi: 0.13.8_vue@3.2.37 + vue-demi: 0.13.8(vue@3.2.37) dev: false - /abab/2.0.6: + /abab@2.0.6: resolution: {integrity: sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==} dev: true - /acorn-globals/6.0.0: + /acorn-globals@6.0.0: resolution: {integrity: sha512-ZQl7LOWaF5ePqqcX4hLuv/bLXYQNfNWw2c0/yX/TsPRKamzHcTGQnlCjHT3TsmkOUVEPS3crCxiPfdzE/Trlhg==} dependencies: acorn: 7.4.1 acorn-walk: 7.2.0 dev: true - /acorn-jsx/5.3.2_acorn@8.8.0: + /acorn-jsx@5.3.2(acorn@8.8.0): resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} peerDependencies: acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 @@ -651,24 +946,35 @@ packages: acorn: 8.8.0 dev: true - /acorn-walk/7.2.0: + /acorn-walk@7.2.0: resolution: {integrity: sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==} engines: {node: '>=0.4.0'} dev: true - /acorn/7.4.1: + /acorn-walk@8.2.0: + resolution: {integrity: sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==} + engines: {node: '>=0.4.0'} + dev: true + + /acorn@7.4.1: resolution: {integrity: sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==} engines: {node: '>=0.4.0'} hasBin: true dev: true - /acorn/8.8.0: + /acorn@8.8.0: resolution: {integrity: sha512-QOxyigPVrpZ2GXT+PFyZTl6TtOFc5egxHIP9IlQ+RbupQuX4RkT/Bee4/kQuC02Xkzg84JcT7oLYtDIQxp+v7w==} engines: {node: '>=0.4.0'} hasBin: true dev: true - /agent-base/6.0.2: + /acorn@8.8.2: + resolution: {integrity: sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==} + engines: {node: '>=0.4.0'} + hasBin: true + dev: true + + /agent-base@6.0.2: resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==} engines: {node: '>= 6.0.0'} dependencies: @@ -677,7 +983,7 @@ packages: - supports-color dev: true - /ajv/6.12.6: + /ajv@6.12.6: resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} dependencies: fast-deep-equal: 3.1.3 @@ -686,19 +992,24 @@ packages: uri-js: 4.4.1 dev: true - /ansi-regex/5.0.1: + /ansi-regex@5.0.1: resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} engines: {node: '>=8'} dev: true - /ansi-styles/4.3.0: + /ansi-styles@4.3.0: resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} engines: {node: '>=8'} dependencies: color-convert: 2.0.1 dev: true - /anymatch/3.1.2: + /ansi-styles@5.2.0: + resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==} + engines: {node: '>=10'} + dev: true + + /anymatch@3.1.2: resolution: {integrity: sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==} engines: {node: '>= 8'} dependencies: @@ -706,27 +1017,27 @@ packages: picomatch: 2.3.1 dev: true - /argparse/2.0.1: + /argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} dev: true - /array-union/2.1.0: + /array-union@2.1.0: resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} engines: {node: '>=8'} dev: true - /assertion-error/1.1.0: + /assertion-error@1.1.0: resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==} dev: true - /async-validator/4.2.5: + /async-validator@4.2.5: resolution: {integrity: sha512-7HhHjtERjqlNbZtqNqy2rckN/SpOOlmDliet+lP7k+eKZEjPk3DgyeU9lIXLdeLz0uBbbVp+9Qdow9wJWgwwfg==} dev: false - /asynckit/0.4.0: + /asynckit@0.4.0: resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} - /axios/0.27.2: + /axios@0.27.2: resolution: {integrity: sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==} dependencies: follow-redirects: 1.15.1 @@ -735,75 +1046,65 @@ packages: - debug dev: false - /balanced-match/1.0.2: + /balanced-match@1.0.2: resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} dev: true - /binary-extensions/2.2.0: + /binary-extensions@2.2.0: resolution: {integrity: sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==} engines: {node: '>=8'} dev: true - /boolbase/1.0.0: + /blueimp-md5@2.19.0: + resolution: {integrity: sha512-DRQrD6gJyy8FbiE4s+bDoXS9hiW3Vbx5uCdwvcCf3zLHL+Iv7LtGHLpr+GZV8rHG8tK766FGYBwRbu8pELTt+w==} + dev: true + + /boolbase@1.0.0: resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} dev: true - /brace-expansion/1.1.11: + /brace-expansion@1.1.11: resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} dependencies: balanced-match: 1.0.2 concat-map: 0.0.1 dev: true - /braces/3.0.2: + /braces@3.0.2: resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} engines: {node: '>=8'} dependencies: fill-range: 7.0.1 dev: true - /browser-process-hrtime/1.0.0: + /browser-process-hrtime@1.0.0: resolution: {integrity: sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow==} dev: true - /c8/7.12.0: - resolution: {integrity: sha512-CtgQrHOkyxr5koX1wEUmN/5cfDa2ckbHRA4Gy5LAL0zaCFtVWJS5++n+w4/sr2GWGerBxgTjpKeDclk/Qk6W/A==} - engines: {node: '>=10.12.0'} - hasBin: true - dependencies: - '@bcoe/v8-coverage': 0.2.3 - '@istanbuljs/schema': 0.1.3 - find-up: 5.0.0 - foreground-child: 2.0.0 - istanbul-lib-coverage: 3.2.0 - istanbul-lib-report: 3.0.0 - istanbul-reports: 3.1.5 - rimraf: 3.0.2 - test-exclude: 6.0.0 - v8-to-istanbul: 9.0.1 - yargs: 16.2.0 - yargs-parser: 20.2.9 + /cac@6.7.14: + resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} + engines: {node: '>=8'} dev: true - /callsites/3.1.0: + /callsites@3.1.0: resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} engines: {node: '>=6'} dev: true - /chai/4.3.6: - resolution: {integrity: sha512-bbcp3YfHCUzMOvKqsztczerVgBKSsEijCySNlHHbX3VG1nskvqjz5Rfso1gGwD6w6oOV3eI60pKuMOV5MV7p3Q==} + /chai@4.3.7: + resolution: {integrity: sha512-HLnAzZ2iupm25PlN0xFreAlBA5zaBSv3og0DdeGA4Ar6h6rJ3A0rolRUKJhSF2V10GZKDgWF/VmAEsNWjCRB+A==} engines: {node: '>=4'} dependencies: assertion-error: 1.1.0 check-error: 1.0.2 - deep-eql: 3.0.1 + deep-eql: 4.1.3 get-func-name: 2.0.0 loupe: 2.3.4 pathval: 1.1.1 type-detect: 4.0.8 dev: true - /chalk/4.1.2: + /chalk@4.1.2: resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} engines: {node: '>=10'} dependencies: @@ -811,11 +1112,11 @@ packages: supports-color: 7.2.0 dev: true - /check-error/1.0.2: + /check-error@1.0.2: resolution: {integrity: sha512-BrgHpW9NURQgzoNyjfq0Wu6VFO6D7IZEmJNdtgNqpzGG8RuNFHt2jQxWlAs4HMe119chBnv+34syEZtc6IhLtA==} dev: true - /chokidar/3.5.3: + /chokidar@3.5.3: resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==} engines: {node: '>= 8.10.0'} dependencies: @@ -830,42 +1131,48 @@ packages: fsevents: 2.3.2 dev: true - /cliui/7.0.4: - resolution: {integrity: sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==} - dependencies: - string-width: 4.2.3 - strip-ansi: 6.0.1 - wrap-ansi: 7.0.0 - dev: true - - /color-convert/2.0.1: + /color-convert@2.0.1: resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} engines: {node: '>=7.0.0'} dependencies: color-name: 1.1.4 dev: true - /color-name/1.1.4: + /color-name@1.1.4: resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} dev: true - /combined-stream/1.0.8: + /combined-stream@1.0.8: resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} engines: {node: '>= 0.8'} dependencies: delayed-stream: 1.0.0 - /concat-map/0.0.1: + /concat-map@0.0.1: resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} dev: true - /convert-source-map/1.8.0: + /concordance@5.0.4: + resolution: {integrity: sha512-OAcsnTEYu1ARJqWVGwf4zh4JDfHZEaSNlNccFmt8YjB2l/n19/PF2viLINHc57vO4FKIAFl2FWASIGZZWZ2Kxw==} + engines: {node: '>=10.18.0 <11 || >=12.14.0 <13 || >=14'} + dependencies: + date-time: 3.1.0 + esutils: 2.0.3 + fast-diff: 1.2.0 + js-string-escape: 1.0.1 + lodash: 4.17.21 + md5-hex: 3.0.1 + semver: 7.3.7 + well-known-symbols: 2.0.0 + dev: true + + /convert-source-map@1.8.0: resolution: {integrity: sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==} dependencies: safe-buffer: 5.1.2 dev: true - /cross-spawn/7.0.3: + /cross-spawn@7.0.3: resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} engines: {node: '>= 8'} dependencies: @@ -874,353 +1181,177 @@ packages: which: 2.0.2 dev: true - /cssesc/3.0.0: + /cssesc@3.0.0: resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} engines: {node: '>=4'} hasBin: true dev: true - /cssom/0.3.8: + /cssom@0.3.8: resolution: {integrity: sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==} dev: true - /cssom/0.5.0: + /cssom@0.5.0: resolution: {integrity: sha512-iKuQcq+NdHqlAcwUY0o/HL69XQrUaQdMjmStJ8JFmUaiiQErlhrmuigkg/CU4E2J0IyUKUrMAgl36TvN67MqTw==} dev: true - /cssstyle/2.3.0: + /cssstyle@2.3.0: resolution: {integrity: sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A==} engines: {node: '>=8'} dependencies: cssom: 0.3.8 dev: true - /csstype/2.6.20: + /csstype@2.6.20: resolution: {integrity: sha512-/WwNkdXfckNgw6S5R125rrW8ez139lBHWouiBvX8dfMFtcn6V81REDqnH7+CRpRipfYlyU1CmOnOxrmGcFOjeA==} - /data-urls/3.0.2: + /data-urls@3.0.2: resolution: {integrity: sha512-Jy/tj3ldjZJo63sVAvg6LHt2mHvl4V6AgRAmNDtLdm7faqtsx+aJG42rsyCo9JCoRVKwPFzKlIPx3DIibwSIaQ==} engines: {node: '>=12'} dependencies: abab: 2.0.6 whatwg-mimetype: 3.0.0 - whatwg-url: 11.0.0 - dev: true - - /dayjs/1.11.5: - resolution: {integrity: sha512-CAdX5Q3YW3Gclyo5Vpqkgpj8fSdLQcRuzfX6mC6Phy0nfJ0eGYOeS7m4mt2plDWLAtA4TqTakvbboHvUxfe4iA==} - dev: false - - /debug/4.3.4: - resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} - engines: {node: '>=6.0'} - peerDependencies: - supports-color: '*' - peerDependenciesMeta: - supports-color: - optional: true - dependencies: - ms: 2.1.2 - dev: true - - /decimal.js/10.4.0: - resolution: {integrity: sha512-Nv6ENEzyPQ6AItkGwLE2PGKinZZ9g59vSh2BeH6NqPu0OTKZ5ruJsVqh/orbAnqXc9pBbgXAIrc2EyaCj8NpGg==} - dev: true - - /deep-eql/3.0.1: - resolution: {integrity: sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==} - engines: {node: '>=0.12'} - dependencies: - type-detect: 4.0.8 - dev: true - - /deep-is/0.1.4: - resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} - dev: true - - /delayed-stream/1.0.0: - resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} - engines: {node: '>=0.4.0'} - - /dir-glob/3.0.1: - resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} - engines: {node: '>=8'} - dependencies: - path-type: 4.0.0 - dev: true - - /doctrine/3.0.0: - resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} - engines: {node: '>=6.0.0'} - dependencies: - esutils: 2.0.3 - dev: true - - /domexception/4.0.0: - resolution: {integrity: sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==} - engines: {node: '>=12'} - dependencies: - webidl-conversions: 7.0.0 - dev: true - - /element-plus/2.2.13_vue@3.2.37: - resolution: {integrity: sha512-dKQ7BPZC8deUPhv+6s4GgOL0GyGj3KpUarywxm6s1nWnHjH6FqeZlUcxPqBvJd7W/d81POayx3B13GP+rfkG9g==} - peerDependencies: - vue: ^3.2.0 - dependencies: - '@ctrl/tinycolor': 3.4.1 - '@element-plus/icons-vue': 2.0.9_vue@3.2.37 - '@floating-ui/dom': 0.5.4 - '@popperjs/core': /@sxzz/popperjs-es/2.11.7 - '@types/lodash': 4.14.183 - '@types/lodash-es': 4.17.6 - '@vueuse/core': 8.9.4_vue@3.2.37 - async-validator: 4.2.5 - dayjs: 1.11.5 - escape-html: 1.0.3 - lodash: 4.17.21 - lodash-es: 4.17.21 - lodash-unified: 1.0.2_3ib2ivapxullxkx3xftsimdk7u - memoize-one: 6.0.0 - normalize-wheel-es: 1.2.0 - vue: 3.2.37 - transitivePeerDependencies: - - '@vue/composition-api' - dev: false - - /emoji-regex/8.0.0: - resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} - dev: true - - /entities/4.3.1: - resolution: {integrity: sha512-o4q/dYJlmyjP2zfnaWDUC6A3BQFmVTX+tZPezK7k0GLSU9QYCauscf5Y+qcEPzKL+EixVouYDgLQK5H9GrLpkg==} - engines: {node: '>=0.12'} - dev: true - - /esbuild-android-64/0.14.54: - resolution: {integrity: sha512-Tz2++Aqqz0rJ7kYBfz+iqyE3QMycD4vk7LBRyWaAVFgFtQ/O8EJOnVmTOiDWYZ/uYzB4kvP+bqejYdVKzE5lAQ==} - engines: {node: '>=12'} - cpu: [x64] - os: [android] - requiresBuild: true - dev: true - optional: true - - /esbuild-android-arm64/0.14.54: - resolution: {integrity: sha512-F9E+/QDi9sSkLaClO8SOV6etqPd+5DgJje1F9lOWoNncDdOBL2YF59IhsWATSt0TLZbYCf3pNlTHvVV5VfHdvg==} - engines: {node: '>=12'} - cpu: [arm64] - os: [android] - requiresBuild: true - dev: true - optional: true - - /esbuild-darwin-64/0.14.54: - resolution: {integrity: sha512-jtdKWV3nBviOd5v4hOpkVmpxsBy90CGzebpbO9beiqUYVMBtSc0AL9zGftFuBon7PNDcdvNCEuQqw2x0wP9yug==} - engines: {node: '>=12'} - cpu: [x64] - os: [darwin] - requiresBuild: true - dev: true - optional: true - - /esbuild-darwin-arm64/0.14.54: - resolution: {integrity: sha512-OPafJHD2oUPyvJMrsCvDGkRrVCar5aVyHfWGQzY1dWnzErjrDuSETxwA2HSsyg2jORLY8yBfzc1MIpUkXlctmw==} - engines: {node: '>=12'} - cpu: [arm64] - os: [darwin] - requiresBuild: true - dev: true - optional: true - - /esbuild-freebsd-64/0.14.54: - resolution: {integrity: sha512-OKwd4gmwHqOTp4mOGZKe/XUlbDJ4Q9TjX0hMPIDBUWWu/kwhBAudJdBoxnjNf9ocIB6GN6CPowYpR/hRCbSYAg==} - engines: {node: '>=12'} - cpu: [x64] - os: [freebsd] - requiresBuild: true - dev: true - optional: true - - /esbuild-freebsd-arm64/0.14.54: - resolution: {integrity: sha512-sFwueGr7OvIFiQT6WeG0jRLjkjdqWWSrfbVwZp8iMP+8UHEHRBvlaxL6IuKNDwAozNUmbb8nIMXa7oAOARGs1Q==} - engines: {node: '>=12'} - cpu: [arm64] - os: [freebsd] - requiresBuild: true - dev: true - optional: true - - /esbuild-linux-32/0.14.54: - resolution: {integrity: sha512-1ZuY+JDI//WmklKlBgJnglpUL1owm2OX+8E1syCD6UAxcMM/XoWd76OHSjl/0MR0LisSAXDqgjT3uJqT67O3qw==} - engines: {node: '>=12'} - cpu: [ia32] - os: [linux] - requiresBuild: true - dev: true - optional: true - - /esbuild-linux-64/0.14.54: - resolution: {integrity: sha512-EgjAgH5HwTbtNsTqQOXWApBaPVdDn7XcK+/PtJwZLT1UmpLoznPd8c5CxqsH2dQK3j05YsB3L17T8vE7cp4cCg==} - engines: {node: '>=12'} - cpu: [x64] - os: [linux] - requiresBuild: true - dev: true - optional: true - - /esbuild-linux-arm/0.14.54: - resolution: {integrity: sha512-qqz/SjemQhVMTnvcLGoLOdFpCYbz4v4fUo+TfsWG+1aOu70/80RV6bgNpR2JCrppV2moUQkww+6bWxXRL9YMGw==} - engines: {node: '>=12'} - cpu: [arm] - os: [linux] - requiresBuild: true - dev: true - optional: true - - /esbuild-linux-arm64/0.14.54: - resolution: {integrity: sha512-WL71L+0Rwv+Gv/HTmxTEmpv0UgmxYa5ftZILVi2QmZBgX3q7+tDeOQNqGtdXSdsL8TQi1vIaVFHUPDe0O0kdig==} - engines: {node: '>=12'} - cpu: [arm64] - os: [linux] - requiresBuild: true + whatwg-url: 11.0.0 dev: true - optional: true - /esbuild-linux-mips64le/0.14.54: - resolution: {integrity: sha512-qTHGQB8D1etd0u1+sB6p0ikLKRVuCWhYQhAHRPkO+OF3I/iSlTKNNS0Lh2Oc0g0UFGguaFZZiPJdJey3AGpAlw==} - engines: {node: '>=12'} - cpu: [mips64el] - os: [linux] - requiresBuild: true - dev: true - optional: true + /date-fns@2.29.3: + resolution: {integrity: sha512-dDCnyH2WnnKusqvZZ6+jA1O51Ibt8ZMRNkDZdyAyK4YfbDwa/cEmuztzG5pk6hqlp9aSBPYcjOlktquahGwGeA==} + engines: {node: '>=0.11'} + dev: false - /esbuild-linux-ppc64le/0.14.54: - resolution: {integrity: sha512-j3OMlzHiqwZBDPRCDFKcx595XVfOfOnv68Ax3U4UKZ3MTYQB5Yz3X1mn5GnodEVYzhtZgxEBidLWeIs8FDSfrQ==} - engines: {node: '>=12'} - cpu: [ppc64] - os: [linux] - requiresBuild: true + /date-time@3.1.0: + resolution: {integrity: sha512-uqCUKXE5q1PNBXjPqvwhwJf9SwMoAHBgWJ6DcrnS5o+W2JOiIILl0JEdVD8SGujrNS02GGxgwAg2PN2zONgtjg==} + engines: {node: '>=6'} + dependencies: + time-zone: 1.0.0 dev: true - optional: true - /esbuild-linux-riscv64/0.14.54: - resolution: {integrity: sha512-y7Vt7Wl9dkOGZjxQZnDAqqn+XOqFD7IMWiewY5SPlNlzMX39ocPQlOaoxvT4FllA5viyV26/QzHtvTjVNOxHZg==} - engines: {node: '>=12'} - cpu: [riscv64] - os: [linux] - requiresBuild: true + /dayjs@1.11.5: + resolution: {integrity: sha512-CAdX5Q3YW3Gclyo5Vpqkgpj8fSdLQcRuzfX6mC6Phy0nfJ0eGYOeS7m4mt2plDWLAtA4TqTakvbboHvUxfe4iA==} + dev: false + + /debug@4.3.4: + resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.1.2 dev: true - optional: true - /esbuild-linux-s390x/0.14.54: - resolution: {integrity: sha512-zaHpW9dziAsi7lRcyV4r8dhfG1qBidQWUXweUjnw+lliChJqQr+6XD71K41oEIC3Mx1KStovEmlzm+MkGZHnHA==} - engines: {node: '>=12'} - cpu: [s390x] - os: [linux] - requiresBuild: true + /decimal.js@10.4.0: + resolution: {integrity: sha512-Nv6ENEzyPQ6AItkGwLE2PGKinZZ9g59vSh2BeH6NqPu0OTKZ5ruJsVqh/orbAnqXc9pBbgXAIrc2EyaCj8NpGg==} dev: true - optional: true - /esbuild-netbsd-64/0.14.54: - resolution: {integrity: sha512-PR01lmIMnfJTgeU9VJTDY9ZerDWVFIUzAtJuDHwwceppW7cQWjBBqP48NdeRtoP04/AtO9a7w3viI+PIDr6d+w==} - engines: {node: '>=12'} - cpu: [x64] - os: [netbsd] - requiresBuild: true + /deep-eql@4.1.3: + resolution: {integrity: sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==} + engines: {node: '>=6'} + dependencies: + type-detect: 4.0.8 dev: true - optional: true - /esbuild-openbsd-64/0.14.54: - resolution: {integrity: sha512-Qyk7ikT2o7Wu76UsvvDS5q0amJvmRzDyVlL0qf5VLsLchjCa1+IAvd8kTBgUxD7VBUUVgItLkk609ZHUc1oCaw==} - engines: {node: '>=12'} - cpu: [x64] - os: [openbsd] - requiresBuild: true + /deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} dev: true - optional: true - /esbuild-sunos-64/0.14.54: - resolution: {integrity: sha512-28GZ24KmMSeKi5ueWzMcco6EBHStL3B6ubM7M51RmPwXQGLe0teBGJocmWhgwccA1GeFXqxzILIxXpHbl9Q/Kw==} - engines: {node: '>=12'} - cpu: [x64] - os: [sunos] - requiresBuild: true + /delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + + /dir-glob@3.0.1: + resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} + engines: {node: '>=8'} + dependencies: + path-type: 4.0.0 dev: true - optional: true - /esbuild-windows-32/0.14.54: - resolution: {integrity: sha512-T+rdZW19ql9MjS7pixmZYVObd9G7kcaZo+sETqNH4RCkuuYSuv9AGHUVnPoP9hhuE1WM1ZimHz1CIBHBboLU7w==} - engines: {node: '>=12'} - cpu: [ia32] - os: [win32] - requiresBuild: true + /doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + dependencies: + esutils: 2.0.3 dev: true - optional: true - /esbuild-windows-64/0.14.54: - resolution: {integrity: sha512-AoHTRBUuYwXtZhjXZbA1pGfTo8cJo3vZIcWGLiUcTNgHpJJMC1rVA44ZereBHMJtotyN71S8Qw0npiCIkW96cQ==} + /domexception@4.0.0: + resolution: {integrity: sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==} engines: {node: '>=12'} - cpu: [x64] - os: [win32] - requiresBuild: true + dependencies: + webidl-conversions: 7.0.0 dev: true - optional: true - /esbuild-windows-arm64/0.14.54: - resolution: {integrity: sha512-M0kuUvXhot1zOISQGXwWn6YtS+Y/1RT9WrVIOywZnJHo3jCDyewAc79aKNQWFCQm+xNHVTq9h8dZKvygoXQQRg==} - engines: {node: '>=12'} - cpu: [arm64] - os: [win32] - requiresBuild: true + /element-plus@2.2.13(vue@3.2.37): + resolution: {integrity: sha512-dKQ7BPZC8deUPhv+6s4GgOL0GyGj3KpUarywxm6s1nWnHjH6FqeZlUcxPqBvJd7W/d81POayx3B13GP+rfkG9g==} + peerDependencies: + vue: ^3.2.0 + dependencies: + '@ctrl/tinycolor': 3.4.1 + '@element-plus/icons-vue': 2.0.9(vue@3.2.37) + '@floating-ui/dom': 0.5.4 + '@popperjs/core': /@sxzz/popperjs-es@2.11.7 + '@types/lodash': 4.14.183 + '@types/lodash-es': 4.17.6 + '@vueuse/core': 8.9.4(vue@3.2.37) + async-validator: 4.2.5 + dayjs: 1.11.5 + escape-html: 1.0.3 + lodash: 4.17.21 + lodash-es: 4.17.21 + lodash-unified: 1.0.2(@types/lodash-es@4.17.6)(lodash-es@4.17.21)(lodash@4.17.21) + memoize-one: 6.0.0 + normalize-wheel-es: 1.2.0 + vue: 3.2.37 + transitivePeerDependencies: + - '@vue/composition-api' + dev: false + + /entities@4.3.1: + resolution: {integrity: sha512-o4q/dYJlmyjP2zfnaWDUC6A3BQFmVTX+tZPezK7k0GLSU9QYCauscf5Y+qcEPzKL+EixVouYDgLQK5H9GrLpkg==} + engines: {node: '>=0.12'} dev: true - optional: true - /esbuild/0.14.54: - resolution: {integrity: sha512-Cy9llcy8DvET5uznocPyqL3BFRrFXSVqbgpMJ9Wz8oVjZlh/zUSNbPRbov0VX7VxN2JH1Oa0uNxZ7eLRb62pJA==} + /esbuild@0.17.19: + resolution: {integrity: sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==} engines: {node: '>=12'} hasBin: true requiresBuild: true optionalDependencies: - '@esbuild/linux-loong64': 0.14.54 - esbuild-android-64: 0.14.54 - esbuild-android-arm64: 0.14.54 - esbuild-darwin-64: 0.14.54 - esbuild-darwin-arm64: 0.14.54 - esbuild-freebsd-64: 0.14.54 - esbuild-freebsd-arm64: 0.14.54 - esbuild-linux-32: 0.14.54 - esbuild-linux-64: 0.14.54 - esbuild-linux-arm: 0.14.54 - esbuild-linux-arm64: 0.14.54 - esbuild-linux-mips64le: 0.14.54 - esbuild-linux-ppc64le: 0.14.54 - esbuild-linux-riscv64: 0.14.54 - esbuild-linux-s390x: 0.14.54 - esbuild-netbsd-64: 0.14.54 - esbuild-openbsd-64: 0.14.54 - esbuild-sunos-64: 0.14.54 - esbuild-windows-32: 0.14.54 - esbuild-windows-64: 0.14.54 - esbuild-windows-arm64: 0.14.54 - dev: true - - /escalade/3.1.1: - resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==} - engines: {node: '>=6'} - dev: true - - /escape-html/1.0.3: + '@esbuild/android-arm': 0.17.19 + '@esbuild/android-arm64': 0.17.19 + '@esbuild/android-x64': 0.17.19 + '@esbuild/darwin-arm64': 0.17.19 + '@esbuild/darwin-x64': 0.17.19 + '@esbuild/freebsd-arm64': 0.17.19 + '@esbuild/freebsd-x64': 0.17.19 + '@esbuild/linux-arm': 0.17.19 + '@esbuild/linux-arm64': 0.17.19 + '@esbuild/linux-ia32': 0.17.19 + '@esbuild/linux-loong64': 0.17.19 + '@esbuild/linux-mips64el': 0.17.19 + '@esbuild/linux-ppc64': 0.17.19 + '@esbuild/linux-riscv64': 0.17.19 + '@esbuild/linux-s390x': 0.17.19 + '@esbuild/linux-x64': 0.17.19 + '@esbuild/netbsd-x64': 0.17.19 + '@esbuild/openbsd-x64': 0.17.19 + '@esbuild/sunos-x64': 0.17.19 + '@esbuild/win32-arm64': 0.17.19 + '@esbuild/win32-ia32': 0.17.19 + '@esbuild/win32-x64': 0.17.19 + dev: true + + /escape-html@1.0.3: resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} dev: false - /escape-string-regexp/4.0.0: + /escape-string-regexp@4.0.0: resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} engines: {node: '>=10'} dev: true - /escodegen/2.0.0: + /escodegen@2.0.0: resolution: {integrity: sha512-mmHKys/C8BFUGI+MAWNcSYoORYLMdPzjrknd2Vc+bUsjN5bXcr8EhrNB+UTqfL1y3I9c4fw2ihgtMPQLBRiQxw==} engines: {node: '>=6.0'} hasBin: true @@ -1233,7 +1364,7 @@ packages: source-map: 0.6.1 dev: true - /eslint-config-prettier/8.5.0_eslint@8.22.0: + /eslint-config-prettier@8.5.0(eslint@8.22.0): resolution: {integrity: sha512-obmWKLUNCnhtQRKc+tmnYuQl0pFU1ibYJQ5BGhTVB08bHe9wC8qUeG7c08dj9XX+AuPj1YSGSQIHl1pnDHZR0Q==} hasBin: true peerDependencies: @@ -1242,23 +1373,7 @@ packages: eslint: 8.22.0 dev: true - /eslint-plugin-prettier/4.2.1_2xd4q2tc5cqa5as7uugqhp6oue: - resolution: {integrity: sha512-f/0rXLXUt0oFYs8ra4w49wYZBG5GKZpAYsJSm6rnYL5uVDjd+zowwMwVZHnAjf4edNrKpCDYfXDgmRE/Ak7QyQ==} - engines: {node: '>=12.0.0'} - peerDependencies: - eslint: '>=7.28.0' - eslint-config-prettier: '*' - prettier: '>=2.0.0' - peerDependenciesMeta: - eslint-config-prettier: - optional: true - dependencies: - eslint: 8.22.0 - prettier: 2.7.1 - prettier-linter-helpers: 1.0.0 - dev: true - - /eslint-plugin-prettier/4.2.1_i2cojdczqdiurzgttlwdgf764e: + /eslint-plugin-prettier@4.2.1(eslint-config-prettier@8.5.0)(eslint@8.22.0)(prettier@2.7.1): resolution: {integrity: sha512-f/0rXLXUt0oFYs8ra4w49wYZBG5GKZpAYsJSm6rnYL5uVDjd+zowwMwVZHnAjf4edNrKpCDYfXDgmRE/Ak7QyQ==} engines: {node: '>=12.0.0'} peerDependencies: @@ -1270,30 +1385,30 @@ packages: optional: true dependencies: eslint: 8.22.0 - eslint-config-prettier: 8.5.0_eslint@8.22.0 + eslint-config-prettier: 8.5.0(eslint@8.22.0) prettier: 2.7.1 prettier-linter-helpers: 1.0.0 dev: true - /eslint-plugin-vue/9.3.0_eslint@8.22.0: + /eslint-plugin-vue@9.3.0(eslint@8.22.0): resolution: {integrity: sha512-iscKKkBZgm6fGZwFt6poRoWC0Wy2dQOlwUPW++CiPoQiw1enctV2Hj5DBzzjJZfyqs+FAXhgzL4q0Ww03AgSmQ==} engines: {node: ^14.17.0 || >=16.0.0} peerDependencies: eslint: ^6.2.0 || ^7.0.0 || ^8.0.0 dependencies: eslint: 8.22.0 - eslint-utils: 3.0.0_eslint@8.22.0 + eslint-utils: 3.0.0(eslint@8.22.0) natural-compare: 1.4.0 nth-check: 2.1.1 postcss-selector-parser: 6.0.10 semver: 7.3.7 - vue-eslint-parser: 9.0.3_eslint@8.22.0 + vue-eslint-parser: 9.0.3(eslint@8.22.0) xml-name-validator: 4.0.0 transitivePeerDependencies: - supports-color dev: true - /eslint-scope/5.1.1: + /eslint-scope@5.1.1: resolution: {integrity: sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==} engines: {node: '>=8.0.0'} dependencies: @@ -1301,7 +1416,7 @@ packages: estraverse: 4.3.0 dev: true - /eslint-scope/7.1.1: + /eslint-scope@7.1.1: resolution: {integrity: sha512-QKQM/UXpIiHcLqJ5AOyIW7XZmzjkzQXYE54n1++wb0u9V/abW3l9uQnxX8Z5Xd18xyKIMTUAyQ0k1e8pz6LUrw==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dependencies: @@ -1309,7 +1424,7 @@ packages: estraverse: 5.3.0 dev: true - /eslint-utils/3.0.0_eslint@8.22.0: + /eslint-utils@3.0.0(eslint@8.22.0): resolution: {integrity: sha512-uuQC43IGctw68pJA1RgbQS8/NP7rch6Cwd4j3ZBtgo4/8Flj4eGE7ZYSZRN3iq5pVUv6GPdW5Z1RFleo84uLDA==} engines: {node: ^10.0.0 || ^12.0.0 || >= 14.0.0} peerDependencies: @@ -1319,17 +1434,17 @@ packages: eslint-visitor-keys: 2.1.0 dev: true - /eslint-visitor-keys/2.1.0: + /eslint-visitor-keys@2.1.0: resolution: {integrity: sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==} engines: {node: '>=10'} dev: true - /eslint-visitor-keys/3.3.0: + /eslint-visitor-keys@3.3.0: resolution: {integrity: sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dev: true - /eslint/8.22.0: + /eslint@8.22.0: resolution: {integrity: sha512-ci4t0sz6vSRKdmkOGmprBo6fmI4PrphDFMy5JEq/fNS0gQkJM3rLmrqcp8ipMcdobH3KtUP40KniAE9W19S4wA==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} hasBin: true @@ -1344,7 +1459,7 @@ packages: doctrine: 3.0.0 escape-string-regexp: 4.0.0 eslint-scope: 7.1.1 - eslint-utils: 3.0.0_eslint@8.22.0 + eslint-utils: 3.0.0(eslint@8.22.0) eslint-visitor-keys: 3.3.0 espree: 9.3.3 esquery: 1.4.0 @@ -1377,62 +1492,62 @@ packages: - supports-color dev: true - /espree/9.3.3: + /espree@9.3.3: resolution: {integrity: sha512-ORs1Rt/uQTqUKjDdGCyrtYxbazf5umATSf/K4qxjmZHORR6HJk+2s/2Pqe+Kk49HHINC/xNIrGfgh8sZcll0ng==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dependencies: acorn: 8.8.0 - acorn-jsx: 5.3.2_acorn@8.8.0 + acorn-jsx: 5.3.2(acorn@8.8.0) eslint-visitor-keys: 3.3.0 dev: true - /esprima/4.0.1: + /esprima@4.0.1: resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} engines: {node: '>=4'} hasBin: true dev: true - /esquery/1.4.0: + /esquery@1.4.0: resolution: {integrity: sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==} engines: {node: '>=0.10'} dependencies: estraverse: 5.3.0 dev: true - /esrecurse/4.3.0: + /esrecurse@4.3.0: resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} engines: {node: '>=4.0'} dependencies: estraverse: 5.3.0 dev: true - /estraverse/4.3.0: + /estraverse@4.3.0: resolution: {integrity: sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==} engines: {node: '>=4.0'} dev: true - /estraverse/5.3.0: + /estraverse@5.3.0: resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} engines: {node: '>=4.0'} dev: true - /estree-walker/2.0.2: + /estree-walker@2.0.2: resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} - /esutils/2.0.3: + /esutils@2.0.3: resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} engines: {node: '>=0.10.0'} dev: true - /fast-deep-equal/3.1.3: + /fast-deep-equal@3.1.3: resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} dev: true - /fast-diff/1.2.0: + /fast-diff@1.2.0: resolution: {integrity: sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w==} dev: true - /fast-glob/3.2.11: + /fast-glob@3.2.11: resolution: {integrity: sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew==} engines: {node: '>=8.6.0'} dependencies: @@ -1443,35 +1558,35 @@ packages: micromatch: 4.0.5 dev: true - /fast-json-stable-stringify/2.1.0: + /fast-json-stable-stringify@2.1.0: resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} dev: true - /fast-levenshtein/2.0.6: + /fast-levenshtein@2.0.6: resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} dev: true - /fastq/1.13.0: + /fastq@1.13.0: resolution: {integrity: sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==} dependencies: reusify: 1.0.4 dev: true - /file-entry-cache/6.0.1: + /file-entry-cache@6.0.1: resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} engines: {node: ^10.12.0 || >=12.0.0} dependencies: flat-cache: 3.0.4 dev: true - /fill-range/7.0.1: + /fill-range@7.0.1: resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} engines: {node: '>=8'} dependencies: to-regex-range: 5.0.1 dev: true - /find-up/5.0.0: + /find-up@5.0.0: resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} engines: {node: '>=10'} dependencies: @@ -1479,7 +1594,7 @@ packages: path-exists: 4.0.0 dev: true - /flat-cache/3.0.4: + /flat-cache@3.0.4: resolution: {integrity: sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==} engines: {node: ^10.12.0 || >=12.0.0} dependencies: @@ -1487,11 +1602,11 @@ packages: rimraf: 3.0.2 dev: true - /flatted/3.2.6: + /flatted@3.2.6: resolution: {integrity: sha512-0sQoMh9s0BYsm+12Huy/rkKxVu4R1+r96YX5cG44rHV0pQ6iC3Q+mkoMFaGWObMFYQxCVT+ssG1ksneA2MI9KQ==} dev: true - /follow-redirects/1.15.1: + /follow-redirects@1.15.1: resolution: {integrity: sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==} engines: {node: '>=4.0'} peerDependencies: @@ -1501,15 +1616,7 @@ packages: optional: true dev: false - /foreground-child/2.0.0: - resolution: {integrity: sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA==} - engines: {node: '>=8.0.0'} - dependencies: - cross-spawn: 7.0.3 - signal-exit: 3.0.7 - dev: true - - /form-data/4.0.0: + /form-data@4.0.0: resolution: {integrity: sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==} engines: {node: '>= 6'} dependencies: @@ -1517,11 +1624,11 @@ packages: combined-stream: 1.0.8 mime-types: 2.1.35 - /fs.realpath/1.0.0: + /fs.realpath@1.0.0: resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} dev: true - /fsevents/2.3.2: + /fsevents@2.3.2: resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==} engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} os: [darwin] @@ -1529,38 +1636,33 @@ packages: dev: true optional: true - /function-bind/1.1.1: + /function-bind@1.1.1: resolution: {integrity: sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==} dev: true - /functional-red-black-tree/1.0.1: + /functional-red-black-tree@1.0.1: resolution: {integrity: sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g==} dev: true - /get-caller-file/2.0.5: - resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} - engines: {node: 6.* || 8.* || >= 10.*} - dev: true - - /get-func-name/2.0.0: + /get-func-name@2.0.0: resolution: {integrity: sha512-Hm0ixYtaSZ/V7C8FJrtZIuBBI+iSgL+1Aq82zSu8VQNB4S3Gk8e7Qs3VwBDJAhmRZcFqkl3tQu36g/Foh5I5ig==} dev: true - /glob-parent/5.1.2: + /glob-parent@5.1.2: resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} engines: {node: '>= 6'} dependencies: is-glob: 4.0.3 dev: true - /glob-parent/6.0.2: + /glob-parent@6.0.2: resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} engines: {node: '>=10.13.0'} dependencies: is-glob: 4.0.3 dev: true - /glob/7.2.3: + /glob@7.2.3: resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} dependencies: fs.realpath: 1.0.0 @@ -1571,14 +1673,14 @@ packages: path-is-absolute: 1.0.1 dev: true - /globals/13.17.0: + /globals@13.17.0: resolution: {integrity: sha512-1C+6nQRb1GwGMKm2dH/E7enFAMxGTmGI7/dEdhy/DNelv85w9B72t3uc5frtMNXIbzrarJJ/lTCjcaZwbLJmyw==} engines: {node: '>=8'} dependencies: type-fest: 0.20.2 dev: true - /globby/11.1.0: + /globby@11.1.0: resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} engines: {node: '>=10'} dependencies: @@ -1590,34 +1692,34 @@ packages: slash: 3.0.0 dev: true - /grapheme-splitter/1.0.4: + /grapheme-splitter@1.0.4: resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==} dev: true - /has-flag/4.0.0: + /has-flag@4.0.0: resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} engines: {node: '>=8'} dev: true - /has/1.0.3: + /has@1.0.3: resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==} engines: {node: '>= 0.4.0'} dependencies: function-bind: 1.1.1 dev: true - /html-encoding-sniffer/3.0.0: + /html-encoding-sniffer@3.0.0: resolution: {integrity: sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==} engines: {node: '>=12'} dependencies: whatwg-encoding: 2.0.0 dev: true - /html-escaper/2.0.2: + /html-escaper@2.0.2: resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} dev: true - /http-proxy-agent/5.0.0: + /http-proxy-agent@5.0.0: resolution: {integrity: sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==} engines: {node: '>= 6'} dependencies: @@ -1628,7 +1730,7 @@ packages: - supports-color dev: true - /https-proxy-agent/5.0.1: + /https-proxy-agent@5.0.1: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} dependencies: @@ -1638,23 +1740,23 @@ packages: - supports-color dev: true - /iconv-lite/0.6.3: + /iconv-lite@0.6.3: resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} engines: {node: '>=0.10.0'} dependencies: safer-buffer: 2.1.2 dev: true - /ignore/5.2.0: + /ignore@5.2.0: resolution: {integrity: sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==} engines: {node: '>= 4'} dev: true - /immutable/4.1.0: + /immutable@4.1.0: resolution: {integrity: sha512-oNkuqVTA8jqG1Q6c+UglTOD1xhC1BtjKI7XkCXRkZHrN5m18/XsnUp8Q89GkQO/z+0WjonSvl0FLhDYftp46nQ==} dev: true - /import-fresh/3.3.0: + /import-fresh@3.3.0: resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} engines: {node: '>=6'} dependencies: @@ -1662,71 +1764,66 @@ packages: resolve-from: 4.0.0 dev: true - /imurmurhash/0.1.4: + /imurmurhash@0.1.4: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} dev: true - /inflight/1.0.6: + /inflight@1.0.6: resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} dependencies: once: 1.4.0 wrappy: 1.0.2 dev: true - /inherits/2.0.4: + /inherits@2.0.4: resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} dev: true - /is-binary-path/2.1.0: + /is-binary-path@2.1.0: resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} engines: {node: '>=8'} dependencies: binary-extensions: 2.2.0 dev: true - /is-core-module/2.10.0: + /is-core-module@2.10.0: resolution: {integrity: sha512-Erxj2n/LDAZ7H8WNJXd9tw38GYM3dv8rk8Zcs+jJuxYTW7sozH+SS8NtrSjVL1/vpLvWi1hxy96IzjJ3EHTJJg==} dependencies: has: 1.0.3 dev: true - /is-extglob/2.1.1: + /is-extglob@2.1.1: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} dev: true - /is-fullwidth-code-point/3.0.0: - resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} - engines: {node: '>=8'} - dev: true - - /is-glob/4.0.3: + /is-glob@4.0.3: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} dependencies: is-extglob: 2.1.1 dev: true - /is-number/7.0.0: + /is-number@7.0.0: resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} engines: {node: '>=0.12.0'} dev: true - /is-potential-custom-element-name/1.0.1: + /is-potential-custom-element-name@1.0.1: resolution: {integrity: sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==} dev: true - /isexe/2.0.0: + /isexe@2.0.0: resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} dev: true - /istanbul-lib-coverage/3.2.0: + /istanbul-lib-coverage@3.2.0: resolution: {integrity: sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==} engines: {node: '>=8'} dev: true - /istanbul-lib-report/3.0.0: + /istanbul-lib-report@3.0.0: resolution: {integrity: sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==} engines: {node: '>=8'} dependencies: @@ -1735,7 +1832,18 @@ packages: supports-color: 7.2.0 dev: true - /istanbul-reports/3.1.5: + /istanbul-lib-source-maps@4.0.1: + resolution: {integrity: sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==} + engines: {node: '>=10'} + dependencies: + debug: 4.3.4 + istanbul-lib-coverage: 3.2.0 + source-map: 0.6.1 + transitivePeerDependencies: + - supports-color + dev: true + + /istanbul-reports@3.1.5: resolution: {integrity: sha512-nUsEMa9pBt/NOHqbcbeJEgqIlY/K7rVWUX6Lql2orY5e9roQOthbR3vtY4zzf2orPELg80fnxxk9zUyPlgwD1w==} engines: {node: '>=8'} dependencies: @@ -1743,14 +1851,19 @@ packages: istanbul-lib-report: 3.0.0 dev: true - /js-yaml/4.1.0: + /js-string-escape@1.0.1: + resolution: {integrity: sha512-Smw4xcfIQ5LVjAOuJCvN/zIodzA/BBSsluuoSykP+lUvScIi4U6RJLfwHet5cxFnCswUjISV8oAXaqaJDY3chg==} + engines: {node: '>= 0.8'} + dev: true + + /js-yaml@4.1.0: resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} hasBin: true dependencies: argparse: 2.0.1 dev: true - /jsdom/20.0.0: + /jsdom@20.0.0: resolution: {integrity: sha512-x4a6CKCgx00uCmP+QakBDFXwjAJ69IkkIWHmtmjd3wvXPcdOS44hfX2vqkOQrVrq8l9DhNNADZRXaCEWvgXtVA==} engines: {node: '>=14'} peerDependencies: @@ -1777,7 +1890,7 @@ packages: parse5: 7.0.0 saxes: 6.0.0 symbol-tree: 3.2.4 - tough-cookie: 4.0.0 + tough-cookie: 4.1.3 w3c-hr-time: 1.0.2 w3c-xmlserializer: 3.0.0 webidl-conversions: 7.0.0 @@ -1792,15 +1905,19 @@ packages: - utf-8-validate dev: true - /json-schema-traverse/0.4.1: + /json-schema-traverse@0.4.1: resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} dev: true - /json-stable-stringify-without-jsonify/1.0.1: + /json-stable-stringify-without-jsonify@1.0.1: resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} dev: true - /levn/0.3.0: + /jsonc-parser@3.2.0: + resolution: {integrity: sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==} + dev: true + + /levn@0.3.0: resolution: {integrity: sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA==} engines: {node: '>= 0.8.0'} dependencies: @@ -1808,7 +1925,7 @@ packages: type-check: 0.3.2 dev: true - /levn/0.4.1: + /levn@0.4.1: resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} engines: {node: '>= 0.8.0'} dependencies: @@ -1816,23 +1933,23 @@ packages: type-check: 0.4.0 dev: true - /local-pkg/0.4.2: - resolution: {integrity: sha512-mlERgSPrbxU3BP4qBqAvvwlgW4MTg78iwJdGGnv7kibKjWcJksrG3t6LB5lXI93wXRDvG4NpUgJFmTG4T6rdrg==} + /local-pkg@0.4.3: + resolution: {integrity: sha512-SFppqq5p42fe2qcZQqqEOiVRXl+WCP1MdT6k7BDEW1j++sp5fIY+/fdRQitvKgB5BrBcmrs5m/L0v2FrU5MY1g==} engines: {node: '>=14'} dev: true - /locate-path/6.0.0: + /locate-path@6.0.0: resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} engines: {node: '>=10'} dependencies: p-locate: 5.0.0 dev: true - /lodash-es/4.17.21: + /lodash-es@4.17.21: resolution: {integrity: sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==} dev: false - /lodash-unified/1.0.2_3ib2ivapxullxkx3xftsimdk7u: + /lodash-unified@1.0.2(@types/lodash-es@4.17.6)(lodash-es@4.17.21)(lodash@4.17.21): resolution: {integrity: sha512-OGbEy+1P+UT26CYi4opY4gebD8cWRDxAT6MAObIVQMiqYdxZr1g3QHWCToVsm31x2NkLS4K3+MC2qInaRMa39g==} peerDependencies: '@types/lodash-es': '*' @@ -1844,48 +1961,68 @@ packages: lodash-es: 4.17.21 dev: false - /lodash.merge/4.6.2: + /lodash.merge@4.6.2: resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} dev: true - /lodash/4.17.21: + /lodash@4.17.21: resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} - /loupe/2.3.4: + /loupe@2.3.4: resolution: {integrity: sha512-OvKfgCC2Ndby6aSTREl5aCCPTNIzlDfQZvZxNUrBrihDhL3xcrYegTblhmEiCrg2kKQz4XsFIaemE5BF4ybSaQ==} dependencies: get-func-name: 2.0.0 dev: true - /lru-cache/6.0.0: + /loupe@2.3.6: + resolution: {integrity: sha512-RaPMZKiMy8/JruncMU5Bt6na1eftNoo++R4Y+N2FrxkDVTrGvcyzFTsaGif4QTeKESheMGegbhw6iUAq+5A8zA==} + dependencies: + get-func-name: 2.0.0 + dev: true + + /lru-cache@6.0.0: resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==} engines: {node: '>=10'} dependencies: yallist: 4.0.0 dev: true - /magic-string/0.25.9: + /magic-string@0.25.9: resolution: {integrity: sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==} dependencies: sourcemap-codec: 1.4.8 - /make-dir/3.1.0: + /magic-string@0.30.0: + resolution: {integrity: sha512-LA+31JYDJLs82r2ScLrlz1GjSgu66ZV518eyWT+S8VhyQn/JL0u9MeBOvQMGYiPk1DBiSN9DDMOcXvigJZaViQ==} + engines: {node: '>=12'} + dependencies: + '@jridgewell/sourcemap-codec': 1.4.14 + dev: true + + /make-dir@3.1.0: resolution: {integrity: sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==} engines: {node: '>=8'} dependencies: semver: 6.3.0 dev: true - /memoize-one/6.0.0: + /md5-hex@3.0.1: + resolution: {integrity: sha512-BUiRtTtV39LIJwinWBjqVsU9xhdnz7/i889V859IBFpuqGAj6LuOvHv5XLbgZ2R7ptJoJaEcxkv88/h25T7Ciw==} + engines: {node: '>=8'} + dependencies: + blueimp-md5: 2.19.0 + dev: true + + /memoize-one@6.0.0: resolution: {integrity: sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==} dev: false - /merge2/1.4.1: + /merge2@1.4.1: resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} engines: {node: '>= 8'} dev: true - /micromatch/4.0.5: + /micromatch@4.0.5: resolution: {integrity: sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==} engines: {node: '>=8.6'} dependencies: @@ -1893,61 +2030,76 @@ packages: picomatch: 2.3.1 dev: true - /mime-db/1.52.0: + /mime-db@1.52.0: resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} engines: {node: '>= 0.6'} - /mime-types/2.1.35: + /mime-types@2.1.35: resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} engines: {node: '>= 0.6'} dependencies: mime-db: 1.52.0 - /minimatch/3.1.2: + /minimatch@3.1.2: resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} dependencies: brace-expansion: 1.1.11 dev: true - /ms/2.1.2: + /mlly@1.3.0: + resolution: {integrity: sha512-HT5mcgIQKkOrZecOjOX3DJorTikWXwsBfpcr/MGBkhfWcjiqvnaL/9ppxvIUXfjT6xt4DVIAsN9fMUz1ev4bIw==} + dependencies: + acorn: 8.8.2 + pathe: 1.1.1 + pkg-types: 1.0.3 + ufo: 1.1.2 + dev: true + + /ms@2.1.2: resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} dev: true - /nanoid/3.3.4: + /nanoid@3.3.4: resolution: {integrity: sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==} engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} hasBin: true - /natural-compare/1.4.0: + /nanoid@3.3.6: + resolution: {integrity: sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + dev: true + + /natural-compare@1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} dev: true - /normalize-path/3.0.0: + /normalize-path@3.0.0: resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} engines: {node: '>=0.10.0'} dev: true - /normalize-wheel-es/1.2.0: + /normalize-wheel-es@1.2.0: resolution: {integrity: sha512-Wj7+EJQ8mSuXr2iWfnujrimU35R2W4FAErEyTmJoJ7ucwTn2hOUSsRehMb5RSYkxXGTM7Y9QpvPmp++w5ftoJw==} dev: false - /nth-check/2.1.1: + /nth-check@2.1.1: resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==} dependencies: boolbase: 1.0.0 dev: true - /nwsapi/2.2.1: + /nwsapi@2.2.1: resolution: {integrity: sha512-JYOWTeFoS0Z93587vRJgASD5Ut11fYl5NyihP3KrYBvMe1FRRs6RN7m20SA/16GM4P6hTnZjT+UmDOt38UeXNg==} dev: true - /once/1.4.0: + /once@1.4.0: resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} dependencies: wrappy: 1.0.2 dev: true - /optionator/0.8.3: + /optionator@0.8.3: resolution: {integrity: sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==} engines: {node: '>= 0.8.0'} dependencies: @@ -1956,10 +2108,10 @@ packages: levn: 0.3.0 prelude-ls: 1.1.2 type-check: 0.3.2 - word-wrap: 1.2.3 + word-wrap: 1.2.4 dev: true - /optionator/0.9.1: + /optionator@0.9.1: resolution: {integrity: sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==} engines: {node: '>= 0.8.0'} dependencies: @@ -1968,73 +2120,84 @@ packages: levn: 0.4.1 prelude-ls: 1.2.1 type-check: 0.4.0 - word-wrap: 1.2.3 + word-wrap: 1.2.4 dev: true - /p-limit/3.1.0: + /p-limit@3.1.0: resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} engines: {node: '>=10'} dependencies: yocto-queue: 0.1.0 dev: true - /p-locate/5.0.0: + /p-limit@4.0.0: + resolution: {integrity: sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + yocto-queue: 1.0.0 + dev: true + + /p-locate@5.0.0: resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} engines: {node: '>=10'} dependencies: p-limit: 3.1.0 dev: true - /parent-module/1.0.1: + /parent-module@1.0.1: resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} engines: {node: '>=6'} dependencies: callsites: 3.1.0 dev: true - /parse5/7.0.0: + /parse5@7.0.0: resolution: {integrity: sha512-y/t8IXSPWTuRZqXc0ajH/UwDj4mnqLEbSttNbThcFhGrZuOyoyvNBO85PBp2jQa55wY9d07PBNjsK8ZP3K5U6g==} dependencies: entities: 4.3.1 dev: true - /path-exists/4.0.0: + /path-exists@4.0.0: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} engines: {node: '>=8'} dev: true - /path-is-absolute/1.0.1: + /path-is-absolute@1.0.1: resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} engines: {node: '>=0.10.0'} dev: true - /path-key/3.1.1: + /path-key@3.1.1: resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} engines: {node: '>=8'} dev: true - /path-parse/1.0.7: + /path-parse@1.0.7: resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} dev: true - /path-type/4.0.0: + /path-type@4.0.0: resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} engines: {node: '>=8'} dev: true - /pathval/1.1.1: + /pathe@1.1.1: + resolution: {integrity: sha512-d+RQGp0MAYTIaDBIMmOfMwz3E+LOZnxx1HZd5R18mmCZY0QBlK0LDZfPc8FW8Ed2DlvsuE6PRjroDY+wg4+j/Q==} + dev: true + + /pathval@1.1.1: resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==} dev: true - /picocolors/1.0.0: + /picocolors@1.0.0: resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==} - /picomatch/2.3.1: + /picomatch@2.3.1: resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} engines: {node: '>=8.6'} dev: true - /pinia-plugin-persistedstate/2.1.1_pinia@2.0.18: + /pinia-plugin-persistedstate@2.1.1(pinia@2.0.18): resolution: {integrity: sha512-HUgsU5IRtM75eAQiIqzT3p1oPEuYH1/B2ipTMU++yE+FV0LkHaBswdKXs0RMWYCmugO8s62oxLTh/N1dLNp+5A==} peerDependencies: pinia: ^2.0.0 @@ -2042,10 +2205,10 @@ packages: pinia: optional: true dependencies: - pinia: 2.0.18_j6bzmzd4ujpabbp5objtwxyjp4 + pinia: 2.0.18(typescript@4.7.4)(vue@3.2.37) dev: false - /pinia/2.0.18_j6bzmzd4ujpabbp5objtwxyjp4: + /pinia@2.0.18(typescript@4.7.4)(vue@3.2.37): resolution: {integrity: sha512-I5MW05UVX6a5Djka136oH3VzYFiZUgeOApBwFjMx6pL91eHtGVlE3adjNUKLgtwGnrxiBRuJ8+4R3LKJKwnyZg==} peerDependencies: '@vue/composition-api': ^1.4.0 @@ -2060,10 +2223,18 @@ packages: '@vue/devtools-api': 6.2.1 typescript: 4.7.4 vue: 3.2.37 - vue-demi: 0.13.8_vue@3.2.37 + vue-demi: 0.13.8(vue@3.2.37) dev: false - /postcss-selector-parser/6.0.10: + /pkg-types@1.0.3: + resolution: {integrity: sha512-nN7pYi0AQqJnoLPC9eHFQ8AcyaixBUOwvqc5TDnIKCMEE6I0y8P7OKA7fPexsXGCGxQDl/cmrLAp26LhcwxZ4A==} + dependencies: + jsonc-parser: 3.2.0 + mlly: 1.3.0 + pathe: 1.1.1 + dev: true + + /postcss-selector-parser@6.0.10: resolution: {integrity: sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==} engines: {node: '>=4'} dependencies: @@ -2071,7 +2242,7 @@ packages: util-deprecate: 1.0.2 dev: true - /postcss/8.4.16: + /postcss@8.4.16: resolution: {integrity: sha512-ipHE1XBvKzm5xI7hiHCZJCSugxvsdq2mPnsq5+UF+VHCjiBvtDrlxJfMBToWaP9D5XlgNmcFGqoHmUn0EYEaRQ==} engines: {node: ^10 || ^12 || >=14} dependencies: @@ -2079,65 +2250,90 @@ packages: picocolors: 1.0.0 source-map-js: 1.0.2 - /prelude-ls/1.1.2: + /postcss@8.4.24: + resolution: {integrity: sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==} + engines: {node: ^10 || ^12 || >=14} + dependencies: + nanoid: 3.3.6 + picocolors: 1.0.0 + source-map-js: 1.0.2 + dev: true + + /prelude-ls@1.1.2: resolution: {integrity: sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==} engines: {node: '>= 0.8.0'} dev: true - /prelude-ls/1.2.1: + /prelude-ls@1.2.1: resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} engines: {node: '>= 0.8.0'} dev: true - /prettier-linter-helpers/1.0.0: + /prettier-linter-helpers@1.0.0: resolution: {integrity: sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==} engines: {node: '>=6.0.0'} dependencies: fast-diff: 1.2.0 dev: true - /prettier/2.7.1: + /prettier@2.7.1: resolution: {integrity: sha512-ujppO+MkdPqoVINuDFDRLClm7D78qbDt0/NR+wp5FqEZOoTNAjPHWj17QRhu7geIHJfcNhRk1XVQmF8Bp3ye+g==} engines: {node: '>=10.13.0'} hasBin: true dev: true - /psl/1.9.0: + /pretty-format@27.5.1: + resolution: {integrity: sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==} + engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} + dependencies: + ansi-regex: 5.0.1 + ansi-styles: 5.2.0 + react-is: 17.0.2 + dev: true + + /psl@1.9.0: resolution: {integrity: sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==} dev: true - /punycode/2.1.1: + /punycode@2.1.1: resolution: {integrity: sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==} engines: {node: '>=6'} dev: true - /queue-microtask/1.2.3: + /querystringify@2.2.0: + resolution: {integrity: sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==} + dev: true + + /queue-microtask@1.2.3: resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} dev: true - /readdirp/3.6.0: + /react-is@17.0.2: + resolution: {integrity: sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==} + dev: true + + /readdirp@3.6.0: resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} engines: {node: '>=8.10.0'} dependencies: picomatch: 2.3.1 dev: true - /regexpp/3.2.0: + /regexpp@3.2.0: resolution: {integrity: sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==} engines: {node: '>=8'} dev: true - /require-directory/2.1.1: - resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} - engines: {node: '>=0.10.0'} + /requires-port@1.0.0: + resolution: {integrity: sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==} dev: true - /resolve-from/4.0.0: + /resolve-from@4.0.0: resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} engines: {node: '>=4'} dev: true - /resolve/1.22.1: + /resolve@1.22.1: resolution: {integrity: sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==} hasBin: true dependencies: @@ -2146,41 +2342,41 @@ packages: supports-preserve-symlinks-flag: 1.0.0 dev: true - /reusify/1.0.4: + /reusify@1.0.4: resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} engines: {iojs: '>=1.0.0', node: '>=0.10.0'} dev: true - /rimraf/3.0.2: + /rimraf@3.0.2: resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} hasBin: true dependencies: glob: 7.2.3 dev: true - /rollup/2.77.3: - resolution: {integrity: sha512-/qxNTG7FbmefJWoeeYJFbHehJ2HNWnjkAFRKzWN/45eNBBF/r8lo992CwcJXEzyVxs5FmfId+vTSTQDb+bxA+g==} - engines: {node: '>=10.0.0'} + /rollup@3.24.0: + resolution: {integrity: sha512-OgraHOIg2YpHQTjl0/ymWfFNBEyPucB7lmhXrQUh38qNOegxLapSPFs9sNr0qKR75awW41D93XafoR2QfhBdUQ==} + engines: {node: '>=14.18.0', npm: '>=8.0.0'} hasBin: true optionalDependencies: fsevents: 2.3.2 dev: true - /run-parallel/1.2.0: + /run-parallel@1.2.0: resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} dependencies: queue-microtask: 1.2.3 dev: true - /safe-buffer/5.1.2: + /safe-buffer@5.1.2: resolution: {integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==} dev: true - /safer-buffer/2.1.2: + /safer-buffer@2.1.2: resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} dev: true - /sass/1.54.4: + /sass@1.54.4: resolution: {integrity: sha512-3tmF16yvnBwtlPrNBHw/H907j8MlOX8aTBnlNX1yrKx24RKcJGPyLhFUwkoKBKesR3unP93/2z14Ll8NicwQUA==} engines: {node: '>=12.0.0'} hasBin: true @@ -2190,19 +2386,19 @@ packages: source-map-js: 1.0.2 dev: true - /saxes/6.0.0: + /saxes@6.0.0: resolution: {integrity: sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==} engines: {node: '>=v12.22.7'} dependencies: xmlchars: 2.2.0 dev: true - /semver/6.3.0: + /semver@6.3.0: resolution: {integrity: sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==} hasBin: true dev: true - /semver/7.3.7: + /semver@7.3.7: resolution: {integrity: sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==} engines: {node: '>=10'} hasBin: true @@ -2210,76 +2406,85 @@ packages: lru-cache: 6.0.0 dev: true - /shebang-command/2.0.0: + /shebang-command@2.0.0: resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} engines: {node: '>=8'} dependencies: shebang-regex: 3.0.0 dev: true - /shebang-regex/3.0.0: + /shebang-regex@3.0.0: resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} engines: {node: '>=8'} dev: true - /signal-exit/3.0.7: - resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} + /siginfo@2.0.0: + resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} dev: true - /slash/3.0.0: + /slash@3.0.0: resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} engines: {node: '>=8'} dev: true - /source-map-js/1.0.2: + /source-map-js@1.0.2: resolution: {integrity: sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==} engines: {node: '>=0.10.0'} - /source-map/0.6.1: + /source-map@0.6.1: resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} engines: {node: '>=0.10.0'} - /sourcemap-codec/1.4.8: + /sourcemap-codec@1.4.8: resolution: {integrity: sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==} - /string-width/4.2.3: - resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} - engines: {node: '>=8'} - dependencies: - emoji-regex: 8.0.0 - is-fullwidth-code-point: 3.0.0 - strip-ansi: 6.0.1 + /stackback@0.0.2: + resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + dev: true + + /std-env@3.3.3: + resolution: {integrity: sha512-Rz6yejtVyWnVjC1RFvNmYL10kgjC49EOghxWn0RFqlCHGFpQx+Xe7yW3I4ceK1SGrWIGMjD5Kbue8W/udkbMJg==} dev: true - /strip-ansi/6.0.1: + /strip-ansi@6.0.1: resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} engines: {node: '>=8'} dependencies: ansi-regex: 5.0.1 dev: true - /strip-json-comments/3.1.1: + /strip-json-comments@3.1.1: resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} engines: {node: '>=8'} dev: true - /supports-color/7.2.0: + /strip-literal@1.0.1: + resolution: {integrity: sha512-QZTsipNpa2Ppr6v1AmJHESqJ3Uz247MUS0OjrnnZjFAvEoWqxuyFuXn2xLgMtRnijJShAa1HL0gtJyUs7u7n3Q==} + dependencies: + acorn: 8.8.2 + dev: true + + /supports-color@7.2.0: resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} engines: {node: '>=8'} dependencies: has-flag: 4.0.0 dev: true - /supports-preserve-symlinks-flag/1.0.0: + /supports-preserve-symlinks-flag@1.0.0: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} dev: true - /symbol-tree/3.2.4: + /swagger-ui-dist@4.19.1: + resolution: {integrity: sha512-n/gFn+R7G/BXWwl5UZLw6F1YgWOlf3zkwGlsPhTMhNtAAolBGKg0JS5b2RKt5NI6/hSopVaSrki2wTIMUDDy2w==} + dev: false + + /symbol-tree@3.2.4: resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} dev: true - /test-exclude/6.0.0: + /test-exclude@6.0.0: resolution: {integrity: sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==} engines: {node: '>=8'} dependencies: @@ -2288,52 +2493,62 @@ packages: minimatch: 3.1.2 dev: true - /text-table/0.2.0: + /text-table@0.2.0: resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} dev: true - /tinypool/0.2.4: - resolution: {integrity: sha512-Vs3rhkUH6Qq1t5bqtb816oT+HeJTXfwt2cbPH17sWHIYKTotQIFPk3tf2fgqRrVyMDVOc1EnPgzIxfIulXVzwQ==} + /time-zone@1.0.0: + resolution: {integrity: sha512-TIsDdtKo6+XrPtiTm1ssmMngN1sAhyKnTO2kunQWqNPWIVvCm15Wmw4SWInwTVgJ5u/Tr04+8Ei9TNcw4x4ONA==} + engines: {node: '>=4'} + dev: true + + /tinybench@2.5.0: + resolution: {integrity: sha512-kRwSG8Zx4tjF9ZiyH4bhaebu+EDz1BOx9hOigYHlUW4xxI/wKIUQUqo018UlU4ar6ATPBsaMrdbKZ+tmPdohFA==} + dev: true + + /tinypool@0.5.0: + resolution: {integrity: sha512-paHQtnrlS1QZYKF/GnLoOM/DN9fqaGOFbCbxzAhwniySnzl9Ebk8w73/dd34DAhe/obUbPAOldTyYXQZxnPBPQ==} engines: {node: '>=14.0.0'} dev: true - /tinyspy/1.0.0: - resolution: {integrity: sha512-FI5B2QdODQYDRjfuLF+OrJ8bjWRMCXokQPcwKm0W3IzcbUmBNv536cQc7eXGoAuXphZwgx1DFbqImwzz08Fnhw==} + /tinyspy@2.1.1: + resolution: {integrity: sha512-XPJL2uSzcOyBMky6OFrusqWlzfFrXtE0hPuMgW8A2HmaqrPo4ZQHRN/V0QXN3FSjKxpsbRrFc5LI7KOwBsT1/w==} engines: {node: '>=14.0.0'} dev: true - /to-fast-properties/2.0.0: + /to-fast-properties@2.0.0: resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==} engines: {node: '>=4'} - /to-regex-range/5.0.1: + /to-regex-range@5.0.1: resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} engines: {node: '>=8.0'} dependencies: is-number: 7.0.0 dev: true - /tough-cookie/4.0.0: - resolution: {integrity: sha512-tHdtEpQCMrc1YLrMaqXXcj6AxhYi/xgit6mZu1+EDWUn+qhUf8wMQoFIy9NXuq23zAwtcB0t/MjACGR18pcRbg==} + /tough-cookie@4.1.3: + resolution: {integrity: sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==} engines: {node: '>=6'} dependencies: psl: 1.9.0 punycode: 2.1.1 - universalify: 0.1.2 + universalify: 0.2.0 + url-parse: 1.5.10 dev: true - /tr46/3.0.0: + /tr46@3.0.0: resolution: {integrity: sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA==} engines: {node: '>=12'} dependencies: punycode: 2.1.1 dev: true - /tslib/1.14.1: + /tslib@1.14.1: resolution: {integrity: sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==} dev: true - /tsutils/3.21.0_typescript@4.7.4: + /tsutils@3.21.0(typescript@4.7.4): resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==} engines: {node: '>= 6'} peerDependencies: @@ -2343,56 +2558,67 @@ packages: typescript: 4.7.4 dev: true - /type-check/0.3.2: + /type-check@0.3.2: resolution: {integrity: sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==} engines: {node: '>= 0.8.0'} dependencies: prelude-ls: 1.1.2 dev: true - /type-check/0.4.0: + /type-check@0.4.0: resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} engines: {node: '>= 0.8.0'} dependencies: prelude-ls: 1.2.1 dev: true - /type-detect/4.0.8: + /type-detect@4.0.8: resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} engines: {node: '>=4'} dev: true - /type-fest/0.20.2: + /type-fest@0.20.2: resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} engines: {node: '>=10'} dev: true - /typescript/4.7.4: + /typescript@4.7.4: resolution: {integrity: sha512-C0WQT0gezHuw6AdY1M2jxUO83Rjf0HP7Sk1DtXj6j1EwkQNZrHAg2XPWlq62oqEhYvONq5pkC2Y9oPljWToLmQ==} engines: {node: '>=4.2.0'} hasBin: true - /universalify/0.1.2: - resolution: {integrity: sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==} + /ufo@1.1.2: + resolution: {integrity: sha512-TrY6DsjTQQgyS3E3dBaOXf0TpPD8u9FVrVYmKVegJuFw51n/YB9XPt+U6ydzFG5ZIN7+DIjPbNmXoBj9esYhgQ==} + dev: true + + /universalify@0.2.0: + resolution: {integrity: sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==} engines: {node: '>= 4.0.0'} dev: true - /uri-js/4.4.1: + /uri-js@4.4.1: resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} dependencies: punycode: 2.1.1 dev: true - /util-deprecate/1.0.2: + /url-parse@1.5.10: + resolution: {integrity: sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==} + dependencies: + querystringify: 2.2.0 + requires-port: 1.0.0 + dev: true + + /util-deprecate@1.0.2: resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} dev: true - /v8-compile-cache/2.3.0: + /v8-compile-cache@2.3.0: resolution: {integrity: sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==} dev: true - /v8-to-istanbul/9.0.1: - resolution: {integrity: sha512-74Y4LqY74kLE6IFyIjPtkSTWzUZmj8tdHT9Ii/26dvQ6K9Dl2NbEfj0XgU2sHCtKgt5VupqhlO/5aWuqS+IY1w==} + /v8-to-istanbul@9.1.0: + resolution: {integrity: sha512-6z3GW9x8G1gd+JIIgQQQxXuiJtCXeAjp6RaPEPLv62mH3iPHPxV6W3robxtCzNErRo6ZwTmzWhsbNvjyEBKzKA==} engines: {node: '>=10.12.0'} dependencies: '@jridgewell/trace-mapping': 0.3.15 @@ -2400,37 +2626,65 @@ packages: convert-source-map: 1.8.0 dev: true - /vite/3.0.8_sass@1.54.4: - resolution: {integrity: sha512-AOZ4eN7mrkJiOLuw8IA7piS4IdOQyQCA81GxGsAQvAZzMRi9ZwGB3TOaYsj4uLAWK46T5L4AfQ6InNGlxX30IQ==} + /vite-node@0.32.0(@types/node@18.7.6)(sass@1.54.4): + resolution: {integrity: sha512-220P/y8YacYAU+daOAqiGEFXx2A8AwjadDzQqos6wSukjvvTWNqleJSwoUn0ckyNdjHIKoxn93Nh1vWBqEKr3Q==} + engines: {node: '>=v14.18.0'} + hasBin: true + dependencies: + cac: 6.7.14 + debug: 4.3.4 + mlly: 1.3.0 + pathe: 1.1.1 + picocolors: 1.0.0 + vite: 4.2.3(@types/node@18.7.6)(sass@1.54.4) + transitivePeerDependencies: + - '@types/node' + - less + - sass + - stylus + - sugarss + - supports-color + - terser + dev: true + + /vite@4.2.3(@types/node@18.7.6)(sass@1.54.4): + resolution: {integrity: sha512-kLU+m2q0Y434Y1kCy3TchefAdtFso0ILi0dLyFV8Us3InXTU11H/B5ZTqCKIQHzSKNxVG/yEx813EA9f1imQ9A==} engines: {node: ^14.18.0 || >=16.0.0} hasBin: true peerDependencies: + '@types/node': '>= 14' less: '*' sass: '*' stylus: '*' + sugarss: '*' terser: ^5.4.0 peerDependenciesMeta: + '@types/node': + optional: true less: optional: true sass: optional: true stylus: optional: true + sugarss: + optional: true terser: optional: true dependencies: - esbuild: 0.14.54 - postcss: 8.4.16 + '@types/node': 18.7.6 + esbuild: 0.17.19 + postcss: 8.4.24 resolve: 1.22.1 - rollup: 2.77.3 + rollup: 3.24.0 sass: 1.54.4 optionalDependencies: fsevents: 2.3.2 dev: true - /vitest/0.22.0_jsdom@20.0.0+sass@1.54.4: - resolution: {integrity: sha512-BSIro/QOHLaQY08FHwT6THWhqLQ+VPU+N4Rdo4pcP+16XB6oLmNNAXGcSh/MOLUhfUy+mqCwx7AyKmU7Ms5R+g==} - engines: {node: '>=v14.16.0'} + /vitest@0.32.0(jsdom@20.0.0)(sass@1.54.4): + resolution: {integrity: sha512-SW83o629gCqnV3BqBnTxhB10DAwzwEx3z+rqYZESehUB+eWsJxwcBQx7CKy0otuGMJTYh7qCVuUX23HkftGl/Q==} + engines: {node: '>=v14.18.0'} hasBin: true peerDependencies: '@edge-runtime/vm': '*' @@ -2438,6 +2692,9 @@ packages: '@vitest/ui': '*' happy-dom: '*' jsdom: '*' + playwright: '*' + safaridriver: '*' + webdriverio: '*' peerDependenciesMeta: '@edge-runtime/vm': optional: true @@ -2449,26 +2706,49 @@ packages: optional: true jsdom: optional: true + playwright: + optional: true + safaridriver: + optional: true + webdriverio: + optional: true dependencies: - '@types/chai': 4.3.3 + '@types/chai': 4.3.5 '@types/chai-subset': 1.3.3 '@types/node': 18.7.6 - chai: 4.3.6 + '@vitest/expect': 0.32.0 + '@vitest/runner': 0.32.0 + '@vitest/snapshot': 0.32.0 + '@vitest/spy': 0.32.0 + '@vitest/utils': 0.32.0 + acorn: 8.8.2 + acorn-walk: 8.2.0 + cac: 6.7.14 + chai: 4.3.7 + concordance: 5.0.4 debug: 4.3.4 jsdom: 20.0.0 - local-pkg: 0.4.2 - tinypool: 0.2.4 - tinyspy: 1.0.0 - vite: 3.0.8_sass@1.54.4 + local-pkg: 0.4.3 + magic-string: 0.30.0 + pathe: 1.1.1 + picocolors: 1.0.0 + std-env: 3.3.3 + strip-literal: 1.0.1 + tinybench: 2.5.0 + tinypool: 0.5.0 + vite: 4.2.3(@types/node@18.7.6)(sass@1.54.4) + vite-node: 0.32.0(@types/node@18.7.6)(sass@1.54.4) + why-is-node-running: 2.2.2 transitivePeerDependencies: - less - sass - stylus + - sugarss - supports-color - terser dev: true - /vue-demi/0.13.8_vue@3.2.37: + /vue-demi@0.13.8(vue@3.2.37): resolution: {integrity: sha512-Vy1zbZhCOdsmvGR6tJhAvO5vhP7eiS8xkbYQSoVa7o6KlIy3W8Rc53ED4qI4qpeRDjv3mLfXSEpYU6Yq4pgXRg==} engines: {node: '>=12'} hasBin: true @@ -2483,7 +2763,7 @@ packages: vue: 3.2.37 dev: false - /vue-eslint-parser/9.0.3_eslint@8.22.0: + /vue-eslint-parser@9.0.3(eslint@8.22.0): resolution: {integrity: sha512-yL+ZDb+9T0ELG4VIFo/2anAOz8SvBdlqEnQnvJ3M7Scq56DvtjY0VY88bByRZB0D4J0u8olBcfrXTVONXsh4og==} engines: {node: ^14.17.0 || >=16.0.0} peerDependencies: @@ -2501,7 +2781,7 @@ packages: - supports-color dev: true - /vue-i18n/9.2.2_vue@3.2.37: + /vue-i18n@9.2.2(vue@3.2.37): resolution: {integrity: sha512-yswpwtj89rTBhegUAv9Mu37LNznyu3NpyLQmozF3i1hYOhwpG8RjcjIFIIfnu+2MDZJGSZPXaKWvnQA71Yv9TQ==} engines: {node: '>= 14'} peerDependencies: @@ -2514,7 +2794,7 @@ packages: vue: 3.2.37 dev: false - /vue-router/4.1.3_vue@3.2.37: + /vue-router@4.1.3(vue@3.2.37): resolution: {integrity: sha512-XvK81bcYglKiayT7/vYAg/f36ExPC4t90R/HIpzrZ5x+17BOWptXLCrEPufGgZeuq68ww4ekSIMBZY1qdUdfjA==} peerDependencies: vue: ^3.2.0 @@ -2523,7 +2803,7 @@ packages: vue: 3.2.37 dev: false - /vue-tsc/0.38.9_typescript@4.7.4: + /vue-tsc@0.38.9(typescript@4.7.4): resolution: {integrity: sha512-Yoy5phgvGqyF98Fb4mYqboR4Q149jrdcGv5kSmufXJUq++RZJ2iMVG0g6zl+v3t4ORVWkQmRpsV4x2szufZ0LQ==} hasBin: true peerDependencies: @@ -2533,46 +2813,51 @@ packages: typescript: 4.7.4 dev: true - /vue/3.2.37: + /vue@3.2.37: resolution: {integrity: sha512-bOKEZxrm8Eh+fveCqS1/NkG/n6aMidsI6hahas7pa0w/l7jkbssJVsRhVDs07IdDq7h9KHswZOgItnwJAgtVtQ==} dependencies: '@vue/compiler-dom': 3.2.37 '@vue/compiler-sfc': 3.2.37 '@vue/runtime-dom': 3.2.37 - '@vue/server-renderer': 3.2.37_vue@3.2.37 + '@vue/server-renderer': 3.2.37(vue@3.2.37) '@vue/shared': 3.2.37 - /w3c-hr-time/1.0.2: + /w3c-hr-time@1.0.2: resolution: {integrity: sha512-z8P5DvDNjKDoFIHK7q8r8lackT6l+jo/Ye3HOle7l9nICP9lf1Ci25fy9vHd0JOWewkIFzXIEig3TdKT7JQ5fQ==} dependencies: browser-process-hrtime: 1.0.0 dev: true - /w3c-xmlserializer/3.0.0: + /w3c-xmlserializer@3.0.0: resolution: {integrity: sha512-3WFqGEgSXIyGhOmAFtlicJNMjEps8b1MG31NCA0/vOF9+nKMUW1ckhi9cnNHmf88Rzw5V+dwIwsm2C7X8k9aQg==} engines: {node: '>=12'} dependencies: xml-name-validator: 4.0.0 dev: true - /webidl-conversions/7.0.0: + /webidl-conversions@7.0.0: resolution: {integrity: sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==} engines: {node: '>=12'} dev: true - /whatwg-encoding/2.0.0: + /well-known-symbols@2.0.0: + resolution: {integrity: sha512-ZMjC3ho+KXo0BfJb7JgtQ5IBuvnShdlACNkKkdsqBmYw3bPAaJfPeYUo6tLUaT5tG/Gkh7xkpBhKRQ9e7pyg9Q==} + engines: {node: '>=6'} + dev: true + + /whatwg-encoding@2.0.0: resolution: {integrity: sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==} engines: {node: '>=12'} dependencies: iconv-lite: 0.6.3 dev: true - /whatwg-mimetype/3.0.0: + /whatwg-mimetype@3.0.0: resolution: {integrity: sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==} engines: {node: '>=12'} dev: true - /whatwg-url/11.0.0: + /whatwg-url@11.0.0: resolution: {integrity: sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ==} engines: {node: '>=12'} dependencies: @@ -2580,7 +2865,7 @@ packages: webidl-conversions: 7.0.0 dev: true - /which/2.0.2: + /which@2.0.2: resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} engines: {node: '>= 8'} hasBin: true @@ -2588,25 +2873,25 @@ packages: isexe: 2.0.0 dev: true - /word-wrap/1.2.3: - resolution: {integrity: sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==} - engines: {node: '>=0.10.0'} + /why-is-node-running@2.2.2: + resolution: {integrity: sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==} + engines: {node: '>=8'} + hasBin: true + dependencies: + siginfo: 2.0.0 + stackback: 0.0.2 dev: true - /wrap-ansi/7.0.0: - resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} - engines: {node: '>=10'} - dependencies: - ansi-styles: 4.3.0 - string-width: 4.2.3 - strip-ansi: 6.0.1 + /word-wrap@1.2.4: + resolution: {integrity: sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==} + engines: {node: '>=0.10.0'} dev: true - /wrappy/1.0.2: + /wrappy@1.0.2: resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} dev: true - /ws/8.8.1: + /ws@8.8.1: resolution: {integrity: sha512-bGy2JzvzkPowEJV++hF07hAD6niYSr0JzBNo/J29WsB57A2r7Wlc1UFcTR9IzrPvuNVO4B8LGqF8qcpsVOhJCA==} engines: {node: '>=10.0.0'} peerDependencies: @@ -2619,43 +2904,25 @@ packages: optional: true dev: true - /xml-name-validator/4.0.0: + /xml-name-validator@4.0.0: resolution: {integrity: sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==} engines: {node: '>=12'} dev: true - /xmlchars/2.2.0: + /xmlchars@2.2.0: resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} dev: true - /y18n/5.0.8: - resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} - engines: {node: '>=10'} - dev: true - - /yallist/4.0.0: + /yallist@4.0.0: resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} dev: true - /yargs-parser/20.2.9: - resolution: {integrity: sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==} - engines: {node: '>=10'} - dev: true - - /yargs/16.2.0: - resolution: {integrity: sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==} + /yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} engines: {node: '>=10'} - dependencies: - cliui: 7.0.4 - escalade: 3.1.1 - get-caller-file: 2.0.5 - require-directory: 2.1.1 - string-width: 4.2.3 - y18n: 5.0.8 - yargs-parser: 20.2.9 dev: true - /yocto-queue/0.1.0: - resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} - engines: {node: '>=10'} + /yocto-queue@1.0.0: + resolution: {integrity: sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==} + engines: {node: '>=12.20'} dev: true diff --git a/kyuubi-server/web-ui/src/api/engine/index.ts b/kyuubi-server/web-ui/src/api/engine/index.ts new file mode 100644 index 00000000000..ff6dc038da5 --- /dev/null +++ b/kyuubi-server/web-ui/src/api/engine/index.ts @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import request from '@/utils/request' +import { IEngineSearch } from './types' + +export function getAllEngines(params: IEngineSearch) { + return request({ + url: 'api/v1/admin/engine', + method: 'get', + params + }) +} + +export function deleteEngine(params: IEngineSearch) { + return request({ + url: 'api/v1/admin/engine', + method: 'delete', + params + }) +} diff --git a/kyuubi-server/web-ui/src/api/engine/types.ts b/kyuubi-server/web-ui/src/api/engine/types.ts new file mode 100644 index 00000000000..86a05dd292c --- /dev/null +++ b/kyuubi-server/web-ui/src/api/engine/types.ts @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +interface IEngineSearch { + type: null | string + sharelevel: null | string + 'hive.server2.proxy.user': null | string + subdomain?: null | string +} + +export { IEngineSearch } diff --git a/kyuubi-server/web-ui/src/api/operation/index.ts b/kyuubi-server/web-ui/src/api/operation/index.ts new file mode 100644 index 00000000000..51a3b5394d5 --- /dev/null +++ b/kyuubi-server/web-ui/src/api/operation/index.ts @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import request from '@/utils/request' + +export function getAllOperations() { + return request({ + url: 'api/v1/admin/operations', + method: 'get' + }) +} + +export function actionOnOperation( + operationId: string, + data: { + action: 'CANCEL' | 'CLOSE' + } +) { + return request({ + url: `api/v1/operations/${operationId}`, + method: 'put', + data + }) +} diff --git a/kyuubi-server/web-ui/src/api/server/index.ts b/kyuubi-server/web-ui/src/api/server/index.ts new file mode 100644 index 00000000000..e2d74d7dbaf --- /dev/null +++ b/kyuubi-server/web-ui/src/api/server/index.ts @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import request from '@/utils/request' + +export function getAllServer() { + return request({ + url: 'api/v1/admin/server', + method: 'get' + }) +} diff --git a/kyuubi-server/web-ui/src/api/session/index.ts b/kyuubi-server/web-ui/src/api/session/index.ts new file mode 100644 index 00000000000..fa4759b363a --- /dev/null +++ b/kyuubi-server/web-ui/src/api/session/index.ts @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import request from '@/utils/request' + +export function getAllSessions() { + return request({ + url: 'api/v1/admin/sessions', + method: 'get' + }) +} + +export function deleteSession(sessionId: string) { + return request({ + url: `api/v1/admin/sessions/${sessionId}`, + method: 'delete' + }) +} + +export function getSession(sessionId: string) { + return request({ + url: `api/v1/sessions/${sessionId}`, + method: 'get' + }) +} + +export function getAllTypeOperation(sessionId: string) { + return request({ + url: `api/v1/sessions/${sessionId}/operations`, + method: 'get' + }) +} diff --git a/kyuubi-server/web-ui/src/components/menu/index.vue b/kyuubi-server/web-ui/src/components/menu/index.vue index b563b491ec8..d6d4d1b56f1 100644 --- a/kyuubi-server/web-ui/src/components/menu/index.vue +++ b/kyuubi-server/web-ui/src/components/menu/index.vue @@ -21,14 +21,12 @@ class="el-menu-container" :collapse="isCollapse" :default-active="activePath" - :router="true" - > + :router="true">